dmaengine: omap-dma: move reading of dma position to omap-dma.c
[deliverable/linux.git] / drivers / dma / omap-dma.c
CommitLineData
7bedaa55
RK
1/*
2 * OMAP DMAengine support
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
fa3ad86a 8#include <linux/delay.h>
7bedaa55
RK
9#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/omap-dma.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
8d30662a
JH
20#include <linux/of_dma.h>
21#include <linux/of_device.h>
7bedaa55
RK
22
23#include "virt-dma.h"
7d7e1eba 24
7bedaa55
RK
25struct omap_dmadev {
26 struct dma_device ddev;
27 spinlock_t lock;
28 struct tasklet_struct task;
29 struct list_head pending;
1b416c4b 30 struct omap_system_dma_plat_info *plat;
7bedaa55
RK
31};
32
33struct omap_chan {
34 struct virt_dma_chan vc;
35 struct list_head node;
1b416c4b 36 struct omap_system_dma_plat_info *plat;
7bedaa55
RK
37
38 struct dma_slave_config cfg;
39 unsigned dma_sig;
3a774ea9 40 bool cyclic;
2dcdf570 41 bool paused;
7bedaa55
RK
42
43 int dma_ch;
44 struct omap_desc *desc;
45 unsigned sgidx;
46};
47
48struct omap_sg {
49 dma_addr_t addr;
50 uint32_t en; /* number of elements (24-bit) */
51 uint32_t fn; /* number of frames (16-bit) */
52};
53
54struct omap_desc {
55 struct virt_dma_desc vd;
56 enum dma_transfer_direction dir;
57 dma_addr_t dev_addr;
58
7c836bc7 59 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
7bedaa55
RK
60 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
61 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
62 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
63 uint8_t periph_port; /* Peripheral port */
fa3ad86a 64 uint16_t cicr; /* CICR value */
7bedaa55
RK
65
66 unsigned sglen;
67 struct omap_sg sg[0];
68};
69
70static const unsigned es_bytes[] = {
71 [OMAP_DMA_DATA_TYPE_S8] = 1,
72 [OMAP_DMA_DATA_TYPE_S16] = 2,
73 [OMAP_DMA_DATA_TYPE_S32] = 4,
74};
75
8d30662a
JH
76static struct of_dma_filter_info omap_dma_info = {
77 .filter_fn = omap_dma_filter_fn,
78};
79
7bedaa55
RK
80static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
81{
82 return container_of(d, struct omap_dmadev, ddev);
83}
84
85static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
86{
87 return container_of(c, struct omap_chan, vc.chan);
88}
89
90static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
91{
92 return container_of(t, struct omap_desc, vd.tx);
93}
94
95static void omap_dma_desc_free(struct virt_dma_desc *vd)
96{
97 kfree(container_of(vd, struct omap_desc, vd));
98}
99
fa3ad86a
RK
100static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
101{
102 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
103 uint32_t val;
104
105 if (__dma_omap15xx(od->plat->dma_attr))
106 c->plat->dma_write(0, CPC, c->dma_ch);
107 else
108 c->plat->dma_write(0, CDAC, c->dma_ch);
109
110 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
111 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
112
113 if (dma_omap1())
114 val &= ~(1 << 14);
115
116 val |= c->dma_ch | 1 << 15;
117
118 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
119 } else if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
120 c->plat->dma_write(c->dma_ch, CLNK_CTRL, c->dma_ch);
121
122 /* Clear CSR */
123 if (dma_omap1())
124 c->plat->dma_read(CSR, c->dma_ch);
125 else
126 c->plat->dma_write(~0, CSR, c->dma_ch);
127
128 /* Enable interrupts */
129 c->plat->dma_write(d->cicr, CICR, c->dma_ch);
130
131 val = c->plat->dma_read(CCR, c->dma_ch);
132 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
133 val |= OMAP_DMA_CCR_BUFFERING_DISABLE;
134 val |= OMAP_DMA_CCR_EN;
135 mb();
136 c->plat->dma_write(val, CCR, c->dma_ch);
137}
138
139static void omap_dma_stop(struct omap_chan *c)
140{
141 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
142 uint32_t val;
143
144 /* disable irq */
145 c->plat->dma_write(0, CICR, c->dma_ch);
146
147 /* Clear CSR */
148 if (dma_omap1())
149 c->plat->dma_read(CSR, c->dma_ch);
150 else
151 c->plat->dma_write(~0, CSR, c->dma_ch);
152
153 val = c->plat->dma_read(CCR, c->dma_ch);
154 if (od->plat->errata & DMA_ERRATA_i541 &&
155 val & OMAP_DMA_CCR_SEL_SRC_DST_SYNC) {
156 uint32_t sysconfig;
157 unsigned i;
158
159 sysconfig = c->plat->dma_read(OCP_SYSCONFIG, c->dma_ch);
160 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
161 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
162 c->plat->dma_write(val, OCP_SYSCONFIG, c->dma_ch);
163
164 val = c->plat->dma_read(CCR, c->dma_ch);
165 val &= ~OMAP_DMA_CCR_EN;
166 c->plat->dma_write(val, CCR, c->dma_ch);
167
168 /* Wait for sDMA FIFO to drain */
169 for (i = 0; ; i++) {
170 val = c->plat->dma_read(CCR, c->dma_ch);
171 if (!(val & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE)))
172 break;
173
174 if (i > 100)
175 break;
176
177 udelay(5);
178 }
179
180 if (val & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE))
181 dev_err(c->vc.chan.device->dev,
182 "DMA drain did not complete on lch %d\n",
183 c->dma_ch);
184
185 c->plat->dma_write(sysconfig, OCP_SYSCONFIG, c->dma_ch);
186 } else {
187 val &= ~OMAP_DMA_CCR_EN;
188 c->plat->dma_write(val, CCR, c->dma_ch);
189 }
190
191 mb();
192
193 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
194 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
195
196 if (dma_omap1())
197 val |= 1 << 14; /* set the STOP_LNK bit */
198 else
199 val &= ~(1 << 15); /* Clear the ENABLE_LNK bit */
200
201 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
202 }
203}
204
7bedaa55
RK
205static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
206 unsigned idx)
207{
208 struct omap_sg *sg = d->sg + idx;
913a2d0c
RK
209
210 if (d->dir == DMA_DEV_TO_MEM) {
211 c->plat->dma_write(sg->addr, CDSA, c->dma_ch);
212 c->plat->dma_write(0, CDEI, c->dma_ch);
213 c->plat->dma_write(0, CDFI, c->dma_ch);
214 } else {
215 c->plat->dma_write(sg->addr, CSSA, c->dma_ch);
216 c->plat->dma_write(0, CSEI, c->dma_ch);
217 c->plat->dma_write(0, CSFI, c->dma_ch);
218 }
219
220 c->plat->dma_write(sg->en, CEN, c->dma_ch);
221 c->plat->dma_write(sg->fn, CFN, c->dma_ch);
222
fa3ad86a 223 omap_dma_start(c, d);
913a2d0c
RK
224}
225
226static void omap_dma_start_desc(struct omap_chan *c)
227{
228 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
229 struct omap_desc *d;
b9e97822
RK
230 uint32_t val;
231
913a2d0c
RK
232 if (!vd) {
233 c->desc = NULL;
234 return;
235 }
236
237 list_del(&vd->node);
238
239 c->desc = d = to_omap_dma_desc(&vd->tx);
240 c->sgidx = 0;
241
b9e97822
RK
242 if (d->dir == DMA_DEV_TO_MEM) {
243 if (dma_omap1()) {
244 val = c->plat->dma_read(CSDP, c->dma_ch);
913a2d0c 245 val &= ~(0x1f << 9 | 0x1f << 2);
b9e97822 246 val |= OMAP_DMA_PORT_EMIFF << 9;
913a2d0c 247 val |= d->periph_port << 2;
b9e97822
RK
248 c->plat->dma_write(val, CSDP, c->dma_ch);
249 }
7bedaa55 250
b9e97822 251 val = c->plat->dma_read(CCR, c->dma_ch);
913a2d0c 252 val &= ~(0x03 << 14 | 0x03 << 12);
b9e97822 253 val |= OMAP_DMA_AMODE_POST_INC << 14;
913a2d0c 254 val |= OMAP_DMA_AMODE_CONSTANT << 12;
b9e97822
RK
255 c->plat->dma_write(val, CCR, c->dma_ch);
256
913a2d0c
RK
257 c->plat->dma_write(d->dev_addr, CSSA, c->dma_ch);
258 c->plat->dma_write(0, CSEI, c->dma_ch);
259 c->plat->dma_write(d->fi, CSFI, c->dma_ch);
b9e97822
RK
260 } else {
261 if (dma_omap1()) {
262 val = c->plat->dma_read(CSDP, c->dma_ch);
913a2d0c
RK
263 val &= ~(0x1f << 9 | 0x1f << 2);
264 val |= d->periph_port << 9;
b9e97822
RK
265 val |= OMAP_DMA_PORT_EMIFF << 2;
266 c->plat->dma_write(val, CSDP, c->dma_ch);
267 }
268
269 val = c->plat->dma_read(CCR, c->dma_ch);
913a2d0c
RK
270 val &= ~(0x03 << 12 | 0x03 << 14);
271 val |= OMAP_DMA_AMODE_CONSTANT << 14;
b9e97822
RK
272 val |= OMAP_DMA_AMODE_POST_INC << 12;
273 c->plat->dma_write(val, CCR, c->dma_ch);
274
913a2d0c
RK
275 c->plat->dma_write(d->dev_addr, CDSA, c->dma_ch);
276 c->plat->dma_write(0, CDEI, c->dma_ch);
277 c->plat->dma_write(d->fi, CDFI, c->dma_ch);
b9e97822
RK
278 }
279
280 val = c->plat->dma_read(CSDP, c->dma_ch);
281 val &= ~0x03;
282 val |= d->es;
283 c->plat->dma_write(val, CSDP, c->dma_ch);
284
285 if (dma_omap1()) {
286 val = c->plat->dma_read(CCR, c->dma_ch);
287 val &= ~(1 << 5);
288 if (d->sync_mode == OMAP_DMA_SYNC_FRAME)
289 val |= 1 << 5;
290 c->plat->dma_write(val, CCR, c->dma_ch);
291
292 val = c->plat->dma_read(CCR2, c->dma_ch);
293 val &= ~(1 << 2);
294 if (d->sync_mode == OMAP_DMA_SYNC_BLOCK)
295 val |= 1 << 2;
296 c->plat->dma_write(val, CCR2, c->dma_ch);
297 } else if (c->dma_sig) {
298 val = c->plat->dma_read(CCR, c->dma_ch);
299
300 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
913a2d0c 301 val &= ~(1 << 24 | 1 << 23 | 3 << 19 | 1 << 18 | 1 << 5 | 0x1f);
b9e97822
RK
302 val |= (c->dma_sig & ~0x1f) << 14;
303 val |= c->dma_sig & 0x1f;
304
305 if (d->sync_mode & OMAP_DMA_SYNC_FRAME)
306 val |= 1 << 5;
b9e97822
RK
307
308 if (d->sync_mode & OMAP_DMA_SYNC_BLOCK)
309 val |= 1 << 18;
b9e97822
RK
310
311 switch (d->sync_type) {
913a2d0c 312 case OMAP_DMA_DST_SYNC_PREFETCH:/* dest synch */
b9e97822
RK
313 val |= 1 << 23; /* Prefetch */
314 break;
315 case 0:
b9e97822
RK
316 break;
317 default:
913a2d0c 318 val |= 1 << 24; /* source synch */
b9e97822
RK
319 break;
320 }
321 c->plat->dma_write(val, CCR, c->dma_ch);
322 }
7bedaa55 323
7bedaa55
RK
324 omap_dma_start_sg(c, d, 0);
325}
326
327static void omap_dma_callback(int ch, u16 status, void *data)
328{
329 struct omap_chan *c = data;
330 struct omap_desc *d;
331 unsigned long flags;
332
333 spin_lock_irqsave(&c->vc.lock, flags);
334 d = c->desc;
335 if (d) {
3a774ea9
RK
336 if (!c->cyclic) {
337 if (++c->sgidx < d->sglen) {
338 omap_dma_start_sg(c, d, c->sgidx);
339 } else {
340 omap_dma_start_desc(c);
341 vchan_cookie_complete(&d->vd);
342 }
7bedaa55 343 } else {
3a774ea9 344 vchan_cyclic_callback(&d->vd);
7bedaa55
RK
345 }
346 }
347 spin_unlock_irqrestore(&c->vc.lock, flags);
348}
349
350/*
351 * This callback schedules all pending channels. We could be more
352 * clever here by postponing allocation of the real DMA channels to
353 * this point, and freeing them when our virtual channel becomes idle.
354 *
355 * We would then need to deal with 'all channels in-use'
356 */
357static void omap_dma_sched(unsigned long data)
358{
359 struct omap_dmadev *d = (struct omap_dmadev *)data;
360 LIST_HEAD(head);
361
362 spin_lock_irq(&d->lock);
363 list_splice_tail_init(&d->pending, &head);
364 spin_unlock_irq(&d->lock);
365
366 while (!list_empty(&head)) {
367 struct omap_chan *c = list_first_entry(&head,
368 struct omap_chan, node);
369
370 spin_lock_irq(&c->vc.lock);
371 list_del_init(&c->node);
372 omap_dma_start_desc(c);
373 spin_unlock_irq(&c->vc.lock);
374 }
375}
376
377static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
378{
379 struct omap_chan *c = to_omap_dma_chan(chan);
380
9e2f7d82 381 dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
7bedaa55
RK
382
383 return omap_request_dma(c->dma_sig, "DMA engine",
384 omap_dma_callback, c, &c->dma_ch);
385}
386
387static void omap_dma_free_chan_resources(struct dma_chan *chan)
388{
389 struct omap_chan *c = to_omap_dma_chan(chan);
390
391 vchan_free_chan_resources(&c->vc);
392 omap_free_dma(c->dma_ch);
393
9e2f7d82 394 dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
7bedaa55
RK
395}
396
3850e22f
RK
397static size_t omap_dma_sg_size(struct omap_sg *sg)
398{
399 return sg->en * sg->fn;
400}
401
402static size_t omap_dma_desc_size(struct omap_desc *d)
403{
404 unsigned i;
405 size_t size;
406
407 for (size = i = 0; i < d->sglen; i++)
408 size += omap_dma_sg_size(&d->sg[i]);
409
410 return size * es_bytes[d->es];
411}
412
413static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
414{
415 unsigned i;
416 size_t size, es_size = es_bytes[d->es];
417
418 for (size = i = 0; i < d->sglen; i++) {
419 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
420
421 if (size)
422 size += this_size;
423 else if (addr >= d->sg[i].addr &&
424 addr < d->sg[i].addr + this_size)
425 size += d->sg[i].addr + this_size - addr;
426 }
427 return size;
428}
429
3997cab3
RK
430static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
431{
432 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
433 dma_addr_t addr;
434
435 if (__dma_omap15xx(od->plat->dma_attr))
436 addr = c->plat->dma_read(CPC, c->dma_ch);
437 else
438 addr = c->plat->dma_read(CSAC, c->dma_ch);
439
440 if (od->plat->errata & DMA_ERRATA_3_3 && addr == 0)
441 addr = c->plat->dma_read(CSAC, c->dma_ch);
442
443 if (!__dma_omap15xx(od->plat->dma_attr)) {
444 /*
445 * CDAC == 0 indicates that the DMA transfer on the channel has
446 * not been started (no data has been transferred so far).
447 * Return the programmed source start address in this case.
448 */
449 if (c->plat->dma_read(CDAC, c->dma_ch))
450 addr = c->plat->dma_read(CSAC, c->dma_ch);
451 else
452 addr = c->plat->dma_read(CSSA, c->dma_ch);
453 }
454
455 if (dma_omap1())
456 addr |= c->plat->dma_read(CSSA, c->dma_ch) & 0xffff0000;
457
458 return addr;
459}
460
461static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
462{
463 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
464 dma_addr_t addr;
465
466 if (__dma_omap15xx(od->plat->dma_attr))
467 addr = c->plat->dma_read(CPC, c->dma_ch);
468 else
469 addr = c->plat->dma_read(CDAC, c->dma_ch);
470
471 /*
472 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
473 * read before the DMA controller finished disabling the channel.
474 */
475 if (!__dma_omap15xx(od->plat->dma_attr) && addr == 0) {
476 addr = c->plat->dma_read(CDAC, c->dma_ch);
477 /*
478 * CDAC == 0 indicates that the DMA transfer on the channel has
479 * not been started (no data has been transferred so far).
480 * Return the programmed destination start address in this case.
481 */
482 if (addr == 0)
483 addr = c->plat->dma_read(CDSA, c->dma_ch);
484 }
485
486 if (dma_omap1())
487 addr |= c->plat->dma_read(CDSA, c->dma_ch) & 0xffff0000;
488
489 return addr;
490}
491
7bedaa55
RK
492static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
493 dma_cookie_t cookie, struct dma_tx_state *txstate)
494{
3850e22f
RK
495 struct omap_chan *c = to_omap_dma_chan(chan);
496 struct virt_dma_desc *vd;
497 enum dma_status ret;
498 unsigned long flags;
499
500 ret = dma_cookie_status(chan, cookie, txstate);
7cce5083 501 if (ret == DMA_COMPLETE || !txstate)
3850e22f
RK
502 return ret;
503
504 spin_lock_irqsave(&c->vc.lock, flags);
505 vd = vchan_find_desc(&c->vc, cookie);
506 if (vd) {
507 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
508 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
509 struct omap_desc *d = c->desc;
510 dma_addr_t pos;
511
512 if (d->dir == DMA_MEM_TO_DEV)
3997cab3 513 pos = omap_dma_get_src_pos(c);
3850e22f 514 else if (d->dir == DMA_DEV_TO_MEM)
3997cab3 515 pos = omap_dma_get_dst_pos(c);
3850e22f
RK
516 else
517 pos = 0;
518
519 txstate->residue = omap_dma_desc_size_pos(d, pos);
520 } else {
521 txstate->residue = 0;
522 }
523 spin_unlock_irqrestore(&c->vc.lock, flags);
524
525 return ret;
7bedaa55
RK
526}
527
528static void omap_dma_issue_pending(struct dma_chan *chan)
529{
530 struct omap_chan *c = to_omap_dma_chan(chan);
531 unsigned long flags;
532
533 spin_lock_irqsave(&c->vc.lock, flags);
534 if (vchan_issue_pending(&c->vc) && !c->desc) {
76502469
PU
535 /*
536 * c->cyclic is used only by audio and in this case the DMA need
537 * to be started without delay.
538 */
539 if (!c->cyclic) {
540 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
541 spin_lock(&d->lock);
542 if (list_empty(&c->node))
543 list_add_tail(&c->node, &d->pending);
544 spin_unlock(&d->lock);
545 tasklet_schedule(&d->task);
546 } else {
547 omap_dma_start_desc(c);
548 }
7bedaa55
RK
549 }
550 spin_unlock_irqrestore(&c->vc.lock, flags);
551}
552
553static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
554 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
555 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
556{
557 struct omap_chan *c = to_omap_dma_chan(chan);
558 enum dma_slave_buswidth dev_width;
559 struct scatterlist *sgent;
560 struct omap_desc *d;
561 dma_addr_t dev_addr;
562 unsigned i, j = 0, es, en, frame_bytes, sync_type;
563 u32 burst;
564
565 if (dir == DMA_DEV_TO_MEM) {
566 dev_addr = c->cfg.src_addr;
567 dev_width = c->cfg.src_addr_width;
568 burst = c->cfg.src_maxburst;
569 sync_type = OMAP_DMA_SRC_SYNC;
570 } else if (dir == DMA_MEM_TO_DEV) {
571 dev_addr = c->cfg.dst_addr;
572 dev_width = c->cfg.dst_addr_width;
573 burst = c->cfg.dst_maxburst;
574 sync_type = OMAP_DMA_DST_SYNC;
575 } else {
576 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
577 return NULL;
578 }
579
580 /* Bus width translates to the element size (ES) */
581 switch (dev_width) {
582 case DMA_SLAVE_BUSWIDTH_1_BYTE:
583 es = OMAP_DMA_DATA_TYPE_S8;
584 break;
585 case DMA_SLAVE_BUSWIDTH_2_BYTES:
586 es = OMAP_DMA_DATA_TYPE_S16;
587 break;
588 case DMA_SLAVE_BUSWIDTH_4_BYTES:
589 es = OMAP_DMA_DATA_TYPE_S32;
590 break;
591 default: /* not reached */
592 return NULL;
593 }
594
595 /* Now allocate and setup the descriptor. */
596 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
597 if (!d)
598 return NULL;
599
600 d->dir = dir;
601 d->dev_addr = dev_addr;
602 d->es = es;
603 d->sync_mode = OMAP_DMA_SYNC_FRAME;
604 d->sync_type = sync_type;
605 d->periph_port = OMAP_DMA_PORT_TIPB;
fa3ad86a
RK
606 d->cicr = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
607
608 if (dma_omap1())
609 d->cicr |= OMAP1_DMA_TOUT_IRQ;
610 else
611 d->cicr |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ;
7bedaa55
RK
612
613 /*
614 * Build our scatterlist entries: each contains the address,
615 * the number of elements (EN) in each frame, and the number of
616 * frames (FN). Number of bytes for this entry = ES * EN * FN.
617 *
618 * Burst size translates to number of elements with frame sync.
619 * Note: DMA engine defines burst to be the number of dev-width
620 * transfers.
621 */
622 en = burst;
623 frame_bytes = es_bytes[es] * en;
624 for_each_sg(sgl, sgent, sglen, i) {
625 d->sg[j].addr = sg_dma_address(sgent);
626 d->sg[j].en = en;
627 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
628 j++;
629 }
630
631 d->sglen = j;
632
633 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
634}
635
3a774ea9
RK
636static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
637 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
ec8b5e48
PU
638 size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
639 void *context)
3a774ea9 640{
fa3ad86a 641 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
3a774ea9
RK
642 struct omap_chan *c = to_omap_dma_chan(chan);
643 enum dma_slave_buswidth dev_width;
644 struct omap_desc *d;
645 dma_addr_t dev_addr;
646 unsigned es, sync_type;
647 u32 burst;
648
649 if (dir == DMA_DEV_TO_MEM) {
650 dev_addr = c->cfg.src_addr;
651 dev_width = c->cfg.src_addr_width;
652 burst = c->cfg.src_maxburst;
653 sync_type = OMAP_DMA_SRC_SYNC;
654 } else if (dir == DMA_MEM_TO_DEV) {
655 dev_addr = c->cfg.dst_addr;
656 dev_width = c->cfg.dst_addr_width;
657 burst = c->cfg.dst_maxburst;
658 sync_type = OMAP_DMA_DST_SYNC;
659 } else {
660 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
661 return NULL;
662 }
663
664 /* Bus width translates to the element size (ES) */
665 switch (dev_width) {
666 case DMA_SLAVE_BUSWIDTH_1_BYTE:
667 es = OMAP_DMA_DATA_TYPE_S8;
668 break;
669 case DMA_SLAVE_BUSWIDTH_2_BYTES:
670 es = OMAP_DMA_DATA_TYPE_S16;
671 break;
672 case DMA_SLAVE_BUSWIDTH_4_BYTES:
673 es = OMAP_DMA_DATA_TYPE_S32;
674 break;
675 default: /* not reached */
676 return NULL;
677 }
678
679 /* Now allocate and setup the descriptor. */
680 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
681 if (!d)
682 return NULL;
683
684 d->dir = dir;
685 d->dev_addr = dev_addr;
686 d->fi = burst;
687 d->es = es;
ccffa387
PU
688 if (burst)
689 d->sync_mode = OMAP_DMA_SYNC_PACKET;
690 else
691 d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
3a774ea9
RK
692 d->sync_type = sync_type;
693 d->periph_port = OMAP_DMA_PORT_MPUI;
694 d->sg[0].addr = buf_addr;
695 d->sg[0].en = period_len / es_bytes[es];
696 d->sg[0].fn = buf_len / period_len;
697 d->sglen = 1;
fa3ad86a
RK
698 d->cicr = OMAP_DMA_DROP_IRQ;
699 if (flags & DMA_PREP_INTERRUPT)
700 d->cicr |= OMAP_DMA_FRAME_IRQ;
701
702 if (dma_omap1())
703 d->cicr |= OMAP1_DMA_TOUT_IRQ;
704 else
705 d->cicr |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ;
3a774ea9
RK
706
707 if (!c->cyclic) {
708 c->cyclic = true;
2dde5b90 709
fa3ad86a
RK
710 if (__dma_omap15xx(od->plat->dma_attr)) {
711 uint32_t val;
2dde5b90 712
fa3ad86a
RK
713 val = c->plat->dma_read(CCR, c->dma_ch);
714 val |= 3 << 8;
715 c->plat->dma_write(val, CCR, c->dma_ch);
716 }
3a774ea9
RK
717 }
718
27615a97 719 if (dma_omap2plus()) {
b9e97822
RK
720 uint32_t val;
721
722 val = c->plat->dma_read(CSDP, c->dma_ch);
723 val |= 0x03 << 7; /* src burst mode 16 */
724 val |= 0x03 << 14; /* dst burst mode 16 */
725 c->plat->dma_write(val, CSDP, c->dma_ch);
3a774ea9
RK
726 }
727
2dde5b90 728 return vchan_tx_prep(&c->vc, &d->vd, flags);
3a774ea9
RK
729}
730
7bedaa55
RK
731static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
732{
733 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
734 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
735 return -EINVAL;
736
737 memcpy(&c->cfg, cfg, sizeof(c->cfg));
738
739 return 0;
740}
741
742static int omap_dma_terminate_all(struct omap_chan *c)
743{
744 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
745 unsigned long flags;
746 LIST_HEAD(head);
747
748 spin_lock_irqsave(&c->vc.lock, flags);
749
750 /* Prevent this channel being scheduled */
751 spin_lock(&d->lock);
752 list_del_init(&c->node);
753 spin_unlock(&d->lock);
754
755 /*
756 * Stop DMA activity: we assume the callback will not be called
fa3ad86a 757 * after omap_dma_stop() returns (even if it does, it will see
7bedaa55
RK
758 * c->desc is NULL and exit.)
759 */
760 if (c->desc) {
761 c->desc = NULL;
2dcdf570
PU
762 /* Avoid stopping the dma twice */
763 if (!c->paused)
fa3ad86a 764 omap_dma_stop(c);
7bedaa55
RK
765 }
766
3a774ea9
RK
767 if (c->cyclic) {
768 c->cyclic = false;
2dcdf570 769 c->paused = false;
fa3ad86a
RK
770
771 if (__dma_omap15xx(od->plat->dma_attr)) {
772 uint32_t val;
773
774 val = c->plat->dma_read(CCR, c->dma_ch);
775 val &= ~(3 << 8);
776 c->plat->dma_write(val, CCR, c->dma_ch);
777 }
3a774ea9
RK
778 }
779
7bedaa55
RK
780 vchan_get_all_descriptors(&c->vc, &head);
781 spin_unlock_irqrestore(&c->vc.lock, flags);
782 vchan_dma_desc_free_list(&c->vc, &head);
783
784 return 0;
785}
786
787static int omap_dma_pause(struct omap_chan *c)
788{
2dcdf570
PU
789 /* Pause/Resume only allowed with cyclic mode */
790 if (!c->cyclic)
791 return -EINVAL;
792
793 if (!c->paused) {
fa3ad86a 794 omap_dma_stop(c);
2dcdf570
PU
795 c->paused = true;
796 }
797
798 return 0;
7bedaa55
RK
799}
800
801static int omap_dma_resume(struct omap_chan *c)
802{
2dcdf570
PU
803 /* Pause/Resume only allowed with cyclic mode */
804 if (!c->cyclic)
805 return -EINVAL;
806
807 if (c->paused) {
fa3ad86a 808 omap_dma_start(c, c->desc);
2dcdf570
PU
809 c->paused = false;
810 }
811
812 return 0;
7bedaa55
RK
813}
814
815static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
816 unsigned long arg)
817{
818 struct omap_chan *c = to_omap_dma_chan(chan);
819 int ret;
820
821 switch (cmd) {
822 case DMA_SLAVE_CONFIG:
823 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
824 break;
825
826 case DMA_TERMINATE_ALL:
827 ret = omap_dma_terminate_all(c);
828 break;
829
830 case DMA_PAUSE:
831 ret = omap_dma_pause(c);
832 break;
833
834 case DMA_RESUME:
835 ret = omap_dma_resume(c);
836 break;
837
838 default:
839 ret = -ENXIO;
840 break;
841 }
842
843 return ret;
844}
845
846static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
847{
848 struct omap_chan *c;
849
850 c = kzalloc(sizeof(*c), GFP_KERNEL);
851 if (!c)
852 return -ENOMEM;
853
1b416c4b 854 c->plat = od->plat;
7bedaa55
RK
855 c->dma_sig = dma_sig;
856 c->vc.desc_free = omap_dma_desc_free;
857 vchan_init(&c->vc, &od->ddev);
858 INIT_LIST_HEAD(&c->node);
859
860 od->ddev.chancnt++;
861
862 return 0;
863}
864
865static void omap_dma_free(struct omap_dmadev *od)
866{
867 tasklet_kill(&od->task);
868 while (!list_empty(&od->ddev.channels)) {
869 struct omap_chan *c = list_first_entry(&od->ddev.channels,
870 struct omap_chan, vc.chan.device_node);
871
872 list_del(&c->vc.chan.device_node);
873 tasklet_kill(&c->vc.task);
874 kfree(c);
875 }
7bedaa55
RK
876}
877
878static int omap_dma_probe(struct platform_device *pdev)
879{
880 struct omap_dmadev *od;
881 int rc, i;
882
104fce73 883 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
7bedaa55
RK
884 if (!od)
885 return -ENOMEM;
886
1b416c4b
RK
887 od->plat = omap_get_plat_info();
888 if (!od->plat)
889 return -EPROBE_DEFER;
890
7bedaa55 891 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
3a774ea9 892 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
7bedaa55
RK
893 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
894 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
895 od->ddev.device_tx_status = omap_dma_tx_status;
896 od->ddev.device_issue_pending = omap_dma_issue_pending;
897 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
3a774ea9 898 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
7bedaa55
RK
899 od->ddev.device_control = omap_dma_control;
900 od->ddev.dev = &pdev->dev;
901 INIT_LIST_HEAD(&od->ddev.channels);
902 INIT_LIST_HEAD(&od->pending);
903 spin_lock_init(&od->lock);
904
905 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
906
907 for (i = 0; i < 127; i++) {
908 rc = omap_dma_chan_init(od, i);
909 if (rc) {
910 omap_dma_free(od);
911 return rc;
912 }
913 }
914
915 rc = dma_async_device_register(&od->ddev);
916 if (rc) {
917 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
918 rc);
919 omap_dma_free(od);
8d30662a
JH
920 return rc;
921 }
922
923 platform_set_drvdata(pdev, od);
924
925 if (pdev->dev.of_node) {
926 omap_dma_info.dma_cap = od->ddev.cap_mask;
927
928 /* Device-tree DMA controller registration */
929 rc = of_dma_controller_register(pdev->dev.of_node,
930 of_dma_simple_xlate, &omap_dma_info);
931 if (rc) {
932 pr_warn("OMAP-DMA: failed to register DMA controller\n");
933 dma_async_device_unregister(&od->ddev);
934 omap_dma_free(od);
935 }
7bedaa55
RK
936 }
937
938 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
939
940 return rc;
941}
942
943static int omap_dma_remove(struct platform_device *pdev)
944{
945 struct omap_dmadev *od = platform_get_drvdata(pdev);
946
8d30662a
JH
947 if (pdev->dev.of_node)
948 of_dma_controller_free(pdev->dev.of_node);
949
7bedaa55
RK
950 dma_async_device_unregister(&od->ddev);
951 omap_dma_free(od);
952
953 return 0;
954}
955
8d30662a
JH
956static const struct of_device_id omap_dma_match[] = {
957 { .compatible = "ti,omap2420-sdma", },
958 { .compatible = "ti,omap2430-sdma", },
959 { .compatible = "ti,omap3430-sdma", },
960 { .compatible = "ti,omap3630-sdma", },
961 { .compatible = "ti,omap4430-sdma", },
962 {},
963};
964MODULE_DEVICE_TABLE(of, omap_dma_match);
965
7bedaa55
RK
966static struct platform_driver omap_dma_driver = {
967 .probe = omap_dma_probe,
968 .remove = omap_dma_remove,
969 .driver = {
970 .name = "omap-dma-engine",
971 .owner = THIS_MODULE,
8d30662a 972 .of_match_table = of_match_ptr(omap_dma_match),
7bedaa55
RK
973 },
974};
975
976bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
977{
978 if (chan->device->dev->driver == &omap_dma_driver.driver) {
979 struct omap_chan *c = to_omap_dma_chan(chan);
980 unsigned req = *(unsigned *)param;
981
982 return req == c->dma_sig;
983 }
984 return false;
985}
986EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
987
7bedaa55
RK
988static int omap_dma_init(void)
989{
be1f9481 990 return platform_driver_register(&omap_dma_driver);
7bedaa55
RK
991}
992subsys_initcall(omap_dma_init);
993
994static void __exit omap_dma_exit(void)
995{
7bedaa55
RK
996 platform_driver_unregister(&omap_dma_driver);
997}
998module_exit(omap_dma_exit);
999
1000MODULE_AUTHOR("Russell King");
1001MODULE_LICENSE("GPL");
This page took 0.128395 seconds and 5 git commands to generate.