dmaengine: omap-dma: control start/stop directly
[deliverable/linux.git] / drivers / dma / omap-dma.c
CommitLineData
7bedaa55
RK
1/*
2 * OMAP DMAengine support
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
fa3ad86a 8#include <linux/delay.h>
7bedaa55
RK
9#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/omap-dma.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
8d30662a
JH
20#include <linux/of_dma.h>
21#include <linux/of_device.h>
7bedaa55
RK
22
23#include "virt-dma.h"
7d7e1eba 24
7bedaa55
RK
25struct omap_dmadev {
26 struct dma_device ddev;
27 spinlock_t lock;
28 struct tasklet_struct task;
29 struct list_head pending;
1b416c4b 30 struct omap_system_dma_plat_info *plat;
7bedaa55
RK
31};
32
33struct omap_chan {
34 struct virt_dma_chan vc;
35 struct list_head node;
1b416c4b 36 struct omap_system_dma_plat_info *plat;
7bedaa55
RK
37
38 struct dma_slave_config cfg;
39 unsigned dma_sig;
3a774ea9 40 bool cyclic;
2dcdf570 41 bool paused;
7bedaa55
RK
42
43 int dma_ch;
44 struct omap_desc *desc;
45 unsigned sgidx;
46};
47
48struct omap_sg {
49 dma_addr_t addr;
50 uint32_t en; /* number of elements (24-bit) */
51 uint32_t fn; /* number of frames (16-bit) */
52};
53
54struct omap_desc {
55 struct virt_dma_desc vd;
56 enum dma_transfer_direction dir;
57 dma_addr_t dev_addr;
58
7c836bc7 59 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
7bedaa55
RK
60 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
61 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
62 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
63 uint8_t periph_port; /* Peripheral port */
fa3ad86a 64 uint16_t cicr; /* CICR value */
7bedaa55
RK
65
66 unsigned sglen;
67 struct omap_sg sg[0];
68};
69
70static const unsigned es_bytes[] = {
71 [OMAP_DMA_DATA_TYPE_S8] = 1,
72 [OMAP_DMA_DATA_TYPE_S16] = 2,
73 [OMAP_DMA_DATA_TYPE_S32] = 4,
74};
75
8d30662a
JH
76static struct of_dma_filter_info omap_dma_info = {
77 .filter_fn = omap_dma_filter_fn,
78};
79
7bedaa55
RK
80static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
81{
82 return container_of(d, struct omap_dmadev, ddev);
83}
84
85static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
86{
87 return container_of(c, struct omap_chan, vc.chan);
88}
89
90static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
91{
92 return container_of(t, struct omap_desc, vd.tx);
93}
94
95static void omap_dma_desc_free(struct virt_dma_desc *vd)
96{
97 kfree(container_of(vd, struct omap_desc, vd));
98}
99
fa3ad86a
RK
100static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
101{
102 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
103 uint32_t val;
104
105 if (__dma_omap15xx(od->plat->dma_attr))
106 c->plat->dma_write(0, CPC, c->dma_ch);
107 else
108 c->plat->dma_write(0, CDAC, c->dma_ch);
109
110 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
111 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
112
113 if (dma_omap1())
114 val &= ~(1 << 14);
115
116 val |= c->dma_ch | 1 << 15;
117
118 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
119 } else if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
120 c->plat->dma_write(c->dma_ch, CLNK_CTRL, c->dma_ch);
121
122 /* Clear CSR */
123 if (dma_omap1())
124 c->plat->dma_read(CSR, c->dma_ch);
125 else
126 c->plat->dma_write(~0, CSR, c->dma_ch);
127
128 /* Enable interrupts */
129 c->plat->dma_write(d->cicr, CICR, c->dma_ch);
130
131 val = c->plat->dma_read(CCR, c->dma_ch);
132 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
133 val |= OMAP_DMA_CCR_BUFFERING_DISABLE;
134 val |= OMAP_DMA_CCR_EN;
135 mb();
136 c->plat->dma_write(val, CCR, c->dma_ch);
137}
138
139static void omap_dma_stop(struct omap_chan *c)
140{
141 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
142 uint32_t val;
143
144 /* disable irq */
145 c->plat->dma_write(0, CICR, c->dma_ch);
146
147 /* Clear CSR */
148 if (dma_omap1())
149 c->plat->dma_read(CSR, c->dma_ch);
150 else
151 c->plat->dma_write(~0, CSR, c->dma_ch);
152
153 val = c->plat->dma_read(CCR, c->dma_ch);
154 if (od->plat->errata & DMA_ERRATA_i541 &&
155 val & OMAP_DMA_CCR_SEL_SRC_DST_SYNC) {
156 uint32_t sysconfig;
157 unsigned i;
158
159 sysconfig = c->plat->dma_read(OCP_SYSCONFIG, c->dma_ch);
160 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
161 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
162 c->plat->dma_write(val, OCP_SYSCONFIG, c->dma_ch);
163
164 val = c->plat->dma_read(CCR, c->dma_ch);
165 val &= ~OMAP_DMA_CCR_EN;
166 c->plat->dma_write(val, CCR, c->dma_ch);
167
168 /* Wait for sDMA FIFO to drain */
169 for (i = 0; ; i++) {
170 val = c->plat->dma_read(CCR, c->dma_ch);
171 if (!(val & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE)))
172 break;
173
174 if (i > 100)
175 break;
176
177 udelay(5);
178 }
179
180 if (val & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE))
181 dev_err(c->vc.chan.device->dev,
182 "DMA drain did not complete on lch %d\n",
183 c->dma_ch);
184
185 c->plat->dma_write(sysconfig, OCP_SYSCONFIG, c->dma_ch);
186 } else {
187 val &= ~OMAP_DMA_CCR_EN;
188 c->plat->dma_write(val, CCR, c->dma_ch);
189 }
190
191 mb();
192
193 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
194 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
195
196 if (dma_omap1())
197 val |= 1 << 14; /* set the STOP_LNK bit */
198 else
199 val &= ~(1 << 15); /* Clear the ENABLE_LNK bit */
200
201 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
202 }
203}
204
7bedaa55
RK
205static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
206 unsigned idx)
207{
208 struct omap_sg *sg = d->sg + idx;
913a2d0c
RK
209
210 if (d->dir == DMA_DEV_TO_MEM) {
211 c->plat->dma_write(sg->addr, CDSA, c->dma_ch);
212 c->plat->dma_write(0, CDEI, c->dma_ch);
213 c->plat->dma_write(0, CDFI, c->dma_ch);
214 } else {
215 c->plat->dma_write(sg->addr, CSSA, c->dma_ch);
216 c->plat->dma_write(0, CSEI, c->dma_ch);
217 c->plat->dma_write(0, CSFI, c->dma_ch);
218 }
219
220 c->plat->dma_write(sg->en, CEN, c->dma_ch);
221 c->plat->dma_write(sg->fn, CFN, c->dma_ch);
222
fa3ad86a 223 omap_dma_start(c, d);
913a2d0c
RK
224}
225
226static void omap_dma_start_desc(struct omap_chan *c)
227{
228 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
229 struct omap_desc *d;
b9e97822
RK
230 uint32_t val;
231
913a2d0c
RK
232 if (!vd) {
233 c->desc = NULL;
234 return;
235 }
236
237 list_del(&vd->node);
238
239 c->desc = d = to_omap_dma_desc(&vd->tx);
240 c->sgidx = 0;
241
b9e97822
RK
242 if (d->dir == DMA_DEV_TO_MEM) {
243 if (dma_omap1()) {
244 val = c->plat->dma_read(CSDP, c->dma_ch);
913a2d0c 245 val &= ~(0x1f << 9 | 0x1f << 2);
b9e97822 246 val |= OMAP_DMA_PORT_EMIFF << 9;
913a2d0c 247 val |= d->periph_port << 2;
b9e97822
RK
248 c->plat->dma_write(val, CSDP, c->dma_ch);
249 }
7bedaa55 250
b9e97822 251 val = c->plat->dma_read(CCR, c->dma_ch);
913a2d0c 252 val &= ~(0x03 << 14 | 0x03 << 12);
b9e97822 253 val |= OMAP_DMA_AMODE_POST_INC << 14;
913a2d0c 254 val |= OMAP_DMA_AMODE_CONSTANT << 12;
b9e97822
RK
255 c->plat->dma_write(val, CCR, c->dma_ch);
256
913a2d0c
RK
257 c->plat->dma_write(d->dev_addr, CSSA, c->dma_ch);
258 c->plat->dma_write(0, CSEI, c->dma_ch);
259 c->plat->dma_write(d->fi, CSFI, c->dma_ch);
b9e97822
RK
260 } else {
261 if (dma_omap1()) {
262 val = c->plat->dma_read(CSDP, c->dma_ch);
913a2d0c
RK
263 val &= ~(0x1f << 9 | 0x1f << 2);
264 val |= d->periph_port << 9;
b9e97822
RK
265 val |= OMAP_DMA_PORT_EMIFF << 2;
266 c->plat->dma_write(val, CSDP, c->dma_ch);
267 }
268
269 val = c->plat->dma_read(CCR, c->dma_ch);
913a2d0c
RK
270 val &= ~(0x03 << 12 | 0x03 << 14);
271 val |= OMAP_DMA_AMODE_CONSTANT << 14;
b9e97822
RK
272 val |= OMAP_DMA_AMODE_POST_INC << 12;
273 c->plat->dma_write(val, CCR, c->dma_ch);
274
913a2d0c
RK
275 c->plat->dma_write(d->dev_addr, CDSA, c->dma_ch);
276 c->plat->dma_write(0, CDEI, c->dma_ch);
277 c->plat->dma_write(d->fi, CDFI, c->dma_ch);
b9e97822
RK
278 }
279
280 val = c->plat->dma_read(CSDP, c->dma_ch);
281 val &= ~0x03;
282 val |= d->es;
283 c->plat->dma_write(val, CSDP, c->dma_ch);
284
285 if (dma_omap1()) {
286 val = c->plat->dma_read(CCR, c->dma_ch);
287 val &= ~(1 << 5);
288 if (d->sync_mode == OMAP_DMA_SYNC_FRAME)
289 val |= 1 << 5;
290 c->plat->dma_write(val, CCR, c->dma_ch);
291
292 val = c->plat->dma_read(CCR2, c->dma_ch);
293 val &= ~(1 << 2);
294 if (d->sync_mode == OMAP_DMA_SYNC_BLOCK)
295 val |= 1 << 2;
296 c->plat->dma_write(val, CCR2, c->dma_ch);
297 } else if (c->dma_sig) {
298 val = c->plat->dma_read(CCR, c->dma_ch);
299
300 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
913a2d0c 301 val &= ~(1 << 24 | 1 << 23 | 3 << 19 | 1 << 18 | 1 << 5 | 0x1f);
b9e97822
RK
302 val |= (c->dma_sig & ~0x1f) << 14;
303 val |= c->dma_sig & 0x1f;
304
305 if (d->sync_mode & OMAP_DMA_SYNC_FRAME)
306 val |= 1 << 5;
b9e97822
RK
307
308 if (d->sync_mode & OMAP_DMA_SYNC_BLOCK)
309 val |= 1 << 18;
b9e97822
RK
310
311 switch (d->sync_type) {
913a2d0c 312 case OMAP_DMA_DST_SYNC_PREFETCH:/* dest synch */
b9e97822
RK
313 val |= 1 << 23; /* Prefetch */
314 break;
315 case 0:
b9e97822
RK
316 break;
317 default:
913a2d0c 318 val |= 1 << 24; /* source synch */
b9e97822
RK
319 break;
320 }
321 c->plat->dma_write(val, CCR, c->dma_ch);
322 }
7bedaa55 323
7bedaa55
RK
324 omap_dma_start_sg(c, d, 0);
325}
326
327static void omap_dma_callback(int ch, u16 status, void *data)
328{
329 struct omap_chan *c = data;
330 struct omap_desc *d;
331 unsigned long flags;
332
333 spin_lock_irqsave(&c->vc.lock, flags);
334 d = c->desc;
335 if (d) {
3a774ea9
RK
336 if (!c->cyclic) {
337 if (++c->sgidx < d->sglen) {
338 omap_dma_start_sg(c, d, c->sgidx);
339 } else {
340 omap_dma_start_desc(c);
341 vchan_cookie_complete(&d->vd);
342 }
7bedaa55 343 } else {
3a774ea9 344 vchan_cyclic_callback(&d->vd);
7bedaa55
RK
345 }
346 }
347 spin_unlock_irqrestore(&c->vc.lock, flags);
348}
349
350/*
351 * This callback schedules all pending channels. We could be more
352 * clever here by postponing allocation of the real DMA channels to
353 * this point, and freeing them when our virtual channel becomes idle.
354 *
355 * We would then need to deal with 'all channels in-use'
356 */
357static void omap_dma_sched(unsigned long data)
358{
359 struct omap_dmadev *d = (struct omap_dmadev *)data;
360 LIST_HEAD(head);
361
362 spin_lock_irq(&d->lock);
363 list_splice_tail_init(&d->pending, &head);
364 spin_unlock_irq(&d->lock);
365
366 while (!list_empty(&head)) {
367 struct omap_chan *c = list_first_entry(&head,
368 struct omap_chan, node);
369
370 spin_lock_irq(&c->vc.lock);
371 list_del_init(&c->node);
372 omap_dma_start_desc(c);
373 spin_unlock_irq(&c->vc.lock);
374 }
375}
376
377static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
378{
379 struct omap_chan *c = to_omap_dma_chan(chan);
380
9e2f7d82 381 dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
7bedaa55
RK
382
383 return omap_request_dma(c->dma_sig, "DMA engine",
384 omap_dma_callback, c, &c->dma_ch);
385}
386
387static void omap_dma_free_chan_resources(struct dma_chan *chan)
388{
389 struct omap_chan *c = to_omap_dma_chan(chan);
390
391 vchan_free_chan_resources(&c->vc);
392 omap_free_dma(c->dma_ch);
393
9e2f7d82 394 dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
7bedaa55
RK
395}
396
3850e22f
RK
397static size_t omap_dma_sg_size(struct omap_sg *sg)
398{
399 return sg->en * sg->fn;
400}
401
402static size_t omap_dma_desc_size(struct omap_desc *d)
403{
404 unsigned i;
405 size_t size;
406
407 for (size = i = 0; i < d->sglen; i++)
408 size += omap_dma_sg_size(&d->sg[i]);
409
410 return size * es_bytes[d->es];
411}
412
413static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
414{
415 unsigned i;
416 size_t size, es_size = es_bytes[d->es];
417
418 for (size = i = 0; i < d->sglen; i++) {
419 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
420
421 if (size)
422 size += this_size;
423 else if (addr >= d->sg[i].addr &&
424 addr < d->sg[i].addr + this_size)
425 size += d->sg[i].addr + this_size - addr;
426 }
427 return size;
428}
429
7bedaa55
RK
430static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
431 dma_cookie_t cookie, struct dma_tx_state *txstate)
432{
3850e22f
RK
433 struct omap_chan *c = to_omap_dma_chan(chan);
434 struct virt_dma_desc *vd;
435 enum dma_status ret;
436 unsigned long flags;
437
438 ret = dma_cookie_status(chan, cookie, txstate);
7cce5083 439 if (ret == DMA_COMPLETE || !txstate)
3850e22f
RK
440 return ret;
441
442 spin_lock_irqsave(&c->vc.lock, flags);
443 vd = vchan_find_desc(&c->vc, cookie);
444 if (vd) {
445 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
446 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
447 struct omap_desc *d = c->desc;
448 dma_addr_t pos;
449
450 if (d->dir == DMA_MEM_TO_DEV)
451 pos = omap_get_dma_src_pos(c->dma_ch);
452 else if (d->dir == DMA_DEV_TO_MEM)
453 pos = omap_get_dma_dst_pos(c->dma_ch);
454 else
455 pos = 0;
456
457 txstate->residue = omap_dma_desc_size_pos(d, pos);
458 } else {
459 txstate->residue = 0;
460 }
461 spin_unlock_irqrestore(&c->vc.lock, flags);
462
463 return ret;
7bedaa55
RK
464}
465
466static void omap_dma_issue_pending(struct dma_chan *chan)
467{
468 struct omap_chan *c = to_omap_dma_chan(chan);
469 unsigned long flags;
470
471 spin_lock_irqsave(&c->vc.lock, flags);
472 if (vchan_issue_pending(&c->vc) && !c->desc) {
76502469
PU
473 /*
474 * c->cyclic is used only by audio and in this case the DMA need
475 * to be started without delay.
476 */
477 if (!c->cyclic) {
478 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
479 spin_lock(&d->lock);
480 if (list_empty(&c->node))
481 list_add_tail(&c->node, &d->pending);
482 spin_unlock(&d->lock);
483 tasklet_schedule(&d->task);
484 } else {
485 omap_dma_start_desc(c);
486 }
7bedaa55
RK
487 }
488 spin_unlock_irqrestore(&c->vc.lock, flags);
489}
490
491static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
492 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
493 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
494{
495 struct omap_chan *c = to_omap_dma_chan(chan);
496 enum dma_slave_buswidth dev_width;
497 struct scatterlist *sgent;
498 struct omap_desc *d;
499 dma_addr_t dev_addr;
500 unsigned i, j = 0, es, en, frame_bytes, sync_type;
501 u32 burst;
502
503 if (dir == DMA_DEV_TO_MEM) {
504 dev_addr = c->cfg.src_addr;
505 dev_width = c->cfg.src_addr_width;
506 burst = c->cfg.src_maxburst;
507 sync_type = OMAP_DMA_SRC_SYNC;
508 } else if (dir == DMA_MEM_TO_DEV) {
509 dev_addr = c->cfg.dst_addr;
510 dev_width = c->cfg.dst_addr_width;
511 burst = c->cfg.dst_maxburst;
512 sync_type = OMAP_DMA_DST_SYNC;
513 } else {
514 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
515 return NULL;
516 }
517
518 /* Bus width translates to the element size (ES) */
519 switch (dev_width) {
520 case DMA_SLAVE_BUSWIDTH_1_BYTE:
521 es = OMAP_DMA_DATA_TYPE_S8;
522 break;
523 case DMA_SLAVE_BUSWIDTH_2_BYTES:
524 es = OMAP_DMA_DATA_TYPE_S16;
525 break;
526 case DMA_SLAVE_BUSWIDTH_4_BYTES:
527 es = OMAP_DMA_DATA_TYPE_S32;
528 break;
529 default: /* not reached */
530 return NULL;
531 }
532
533 /* Now allocate and setup the descriptor. */
534 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
535 if (!d)
536 return NULL;
537
538 d->dir = dir;
539 d->dev_addr = dev_addr;
540 d->es = es;
541 d->sync_mode = OMAP_DMA_SYNC_FRAME;
542 d->sync_type = sync_type;
543 d->periph_port = OMAP_DMA_PORT_TIPB;
fa3ad86a
RK
544 d->cicr = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
545
546 if (dma_omap1())
547 d->cicr |= OMAP1_DMA_TOUT_IRQ;
548 else
549 d->cicr |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ;
7bedaa55
RK
550
551 /*
552 * Build our scatterlist entries: each contains the address,
553 * the number of elements (EN) in each frame, and the number of
554 * frames (FN). Number of bytes for this entry = ES * EN * FN.
555 *
556 * Burst size translates to number of elements with frame sync.
557 * Note: DMA engine defines burst to be the number of dev-width
558 * transfers.
559 */
560 en = burst;
561 frame_bytes = es_bytes[es] * en;
562 for_each_sg(sgl, sgent, sglen, i) {
563 d->sg[j].addr = sg_dma_address(sgent);
564 d->sg[j].en = en;
565 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
566 j++;
567 }
568
569 d->sglen = j;
570
571 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
572}
573
3a774ea9
RK
574static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
575 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
ec8b5e48
PU
576 size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
577 void *context)
3a774ea9 578{
fa3ad86a 579 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
3a774ea9
RK
580 struct omap_chan *c = to_omap_dma_chan(chan);
581 enum dma_slave_buswidth dev_width;
582 struct omap_desc *d;
583 dma_addr_t dev_addr;
584 unsigned es, sync_type;
585 u32 burst;
586
587 if (dir == DMA_DEV_TO_MEM) {
588 dev_addr = c->cfg.src_addr;
589 dev_width = c->cfg.src_addr_width;
590 burst = c->cfg.src_maxburst;
591 sync_type = OMAP_DMA_SRC_SYNC;
592 } else if (dir == DMA_MEM_TO_DEV) {
593 dev_addr = c->cfg.dst_addr;
594 dev_width = c->cfg.dst_addr_width;
595 burst = c->cfg.dst_maxburst;
596 sync_type = OMAP_DMA_DST_SYNC;
597 } else {
598 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
599 return NULL;
600 }
601
602 /* Bus width translates to the element size (ES) */
603 switch (dev_width) {
604 case DMA_SLAVE_BUSWIDTH_1_BYTE:
605 es = OMAP_DMA_DATA_TYPE_S8;
606 break;
607 case DMA_SLAVE_BUSWIDTH_2_BYTES:
608 es = OMAP_DMA_DATA_TYPE_S16;
609 break;
610 case DMA_SLAVE_BUSWIDTH_4_BYTES:
611 es = OMAP_DMA_DATA_TYPE_S32;
612 break;
613 default: /* not reached */
614 return NULL;
615 }
616
617 /* Now allocate and setup the descriptor. */
618 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
619 if (!d)
620 return NULL;
621
622 d->dir = dir;
623 d->dev_addr = dev_addr;
624 d->fi = burst;
625 d->es = es;
ccffa387
PU
626 if (burst)
627 d->sync_mode = OMAP_DMA_SYNC_PACKET;
628 else
629 d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
3a774ea9
RK
630 d->sync_type = sync_type;
631 d->periph_port = OMAP_DMA_PORT_MPUI;
632 d->sg[0].addr = buf_addr;
633 d->sg[0].en = period_len / es_bytes[es];
634 d->sg[0].fn = buf_len / period_len;
635 d->sglen = 1;
fa3ad86a
RK
636 d->cicr = OMAP_DMA_DROP_IRQ;
637 if (flags & DMA_PREP_INTERRUPT)
638 d->cicr |= OMAP_DMA_FRAME_IRQ;
639
640 if (dma_omap1())
641 d->cicr |= OMAP1_DMA_TOUT_IRQ;
642 else
643 d->cicr |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ;
3a774ea9
RK
644
645 if (!c->cyclic) {
646 c->cyclic = true;
2dde5b90 647
fa3ad86a
RK
648 if (__dma_omap15xx(od->plat->dma_attr)) {
649 uint32_t val;
2dde5b90 650
fa3ad86a
RK
651 val = c->plat->dma_read(CCR, c->dma_ch);
652 val |= 3 << 8;
653 c->plat->dma_write(val, CCR, c->dma_ch);
654 }
3a774ea9
RK
655 }
656
27615a97 657 if (dma_omap2plus()) {
b9e97822
RK
658 uint32_t val;
659
660 val = c->plat->dma_read(CSDP, c->dma_ch);
661 val |= 0x03 << 7; /* src burst mode 16 */
662 val |= 0x03 << 14; /* dst burst mode 16 */
663 c->plat->dma_write(val, CSDP, c->dma_ch);
3a774ea9
RK
664 }
665
2dde5b90 666 return vchan_tx_prep(&c->vc, &d->vd, flags);
3a774ea9
RK
667}
668
7bedaa55
RK
669static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
670{
671 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
672 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
673 return -EINVAL;
674
675 memcpy(&c->cfg, cfg, sizeof(c->cfg));
676
677 return 0;
678}
679
680static int omap_dma_terminate_all(struct omap_chan *c)
681{
682 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
683 unsigned long flags;
684 LIST_HEAD(head);
685
686 spin_lock_irqsave(&c->vc.lock, flags);
687
688 /* Prevent this channel being scheduled */
689 spin_lock(&d->lock);
690 list_del_init(&c->node);
691 spin_unlock(&d->lock);
692
693 /*
694 * Stop DMA activity: we assume the callback will not be called
fa3ad86a 695 * after omap_dma_stop() returns (even if it does, it will see
7bedaa55
RK
696 * c->desc is NULL and exit.)
697 */
698 if (c->desc) {
699 c->desc = NULL;
2dcdf570
PU
700 /* Avoid stopping the dma twice */
701 if (!c->paused)
fa3ad86a 702 omap_dma_stop(c);
7bedaa55
RK
703 }
704
3a774ea9
RK
705 if (c->cyclic) {
706 c->cyclic = false;
2dcdf570 707 c->paused = false;
fa3ad86a
RK
708
709 if (__dma_omap15xx(od->plat->dma_attr)) {
710 uint32_t val;
711
712 val = c->plat->dma_read(CCR, c->dma_ch);
713 val &= ~(3 << 8);
714 c->plat->dma_write(val, CCR, c->dma_ch);
715 }
3a774ea9
RK
716 }
717
7bedaa55
RK
718 vchan_get_all_descriptors(&c->vc, &head);
719 spin_unlock_irqrestore(&c->vc.lock, flags);
720 vchan_dma_desc_free_list(&c->vc, &head);
721
722 return 0;
723}
724
725static int omap_dma_pause(struct omap_chan *c)
726{
2dcdf570
PU
727 /* Pause/Resume only allowed with cyclic mode */
728 if (!c->cyclic)
729 return -EINVAL;
730
731 if (!c->paused) {
fa3ad86a 732 omap_dma_stop(c);
2dcdf570
PU
733 c->paused = true;
734 }
735
736 return 0;
7bedaa55
RK
737}
738
739static int omap_dma_resume(struct omap_chan *c)
740{
2dcdf570
PU
741 /* Pause/Resume only allowed with cyclic mode */
742 if (!c->cyclic)
743 return -EINVAL;
744
745 if (c->paused) {
fa3ad86a 746 omap_dma_start(c, c->desc);
2dcdf570
PU
747 c->paused = false;
748 }
749
750 return 0;
7bedaa55
RK
751}
752
753static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
754 unsigned long arg)
755{
756 struct omap_chan *c = to_omap_dma_chan(chan);
757 int ret;
758
759 switch (cmd) {
760 case DMA_SLAVE_CONFIG:
761 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
762 break;
763
764 case DMA_TERMINATE_ALL:
765 ret = omap_dma_terminate_all(c);
766 break;
767
768 case DMA_PAUSE:
769 ret = omap_dma_pause(c);
770 break;
771
772 case DMA_RESUME:
773 ret = omap_dma_resume(c);
774 break;
775
776 default:
777 ret = -ENXIO;
778 break;
779 }
780
781 return ret;
782}
783
784static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
785{
786 struct omap_chan *c;
787
788 c = kzalloc(sizeof(*c), GFP_KERNEL);
789 if (!c)
790 return -ENOMEM;
791
1b416c4b 792 c->plat = od->plat;
7bedaa55
RK
793 c->dma_sig = dma_sig;
794 c->vc.desc_free = omap_dma_desc_free;
795 vchan_init(&c->vc, &od->ddev);
796 INIT_LIST_HEAD(&c->node);
797
798 od->ddev.chancnt++;
799
800 return 0;
801}
802
803static void omap_dma_free(struct omap_dmadev *od)
804{
805 tasklet_kill(&od->task);
806 while (!list_empty(&od->ddev.channels)) {
807 struct omap_chan *c = list_first_entry(&od->ddev.channels,
808 struct omap_chan, vc.chan.device_node);
809
810 list_del(&c->vc.chan.device_node);
811 tasklet_kill(&c->vc.task);
812 kfree(c);
813 }
7bedaa55
RK
814}
815
816static int omap_dma_probe(struct platform_device *pdev)
817{
818 struct omap_dmadev *od;
819 int rc, i;
820
104fce73 821 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
7bedaa55
RK
822 if (!od)
823 return -ENOMEM;
824
1b416c4b
RK
825 od->plat = omap_get_plat_info();
826 if (!od->plat)
827 return -EPROBE_DEFER;
828
7bedaa55 829 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
3a774ea9 830 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
7bedaa55
RK
831 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
832 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
833 od->ddev.device_tx_status = omap_dma_tx_status;
834 od->ddev.device_issue_pending = omap_dma_issue_pending;
835 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
3a774ea9 836 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
7bedaa55
RK
837 od->ddev.device_control = omap_dma_control;
838 od->ddev.dev = &pdev->dev;
839 INIT_LIST_HEAD(&od->ddev.channels);
840 INIT_LIST_HEAD(&od->pending);
841 spin_lock_init(&od->lock);
842
843 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
844
845 for (i = 0; i < 127; i++) {
846 rc = omap_dma_chan_init(od, i);
847 if (rc) {
848 omap_dma_free(od);
849 return rc;
850 }
851 }
852
853 rc = dma_async_device_register(&od->ddev);
854 if (rc) {
855 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
856 rc);
857 omap_dma_free(od);
8d30662a
JH
858 return rc;
859 }
860
861 platform_set_drvdata(pdev, od);
862
863 if (pdev->dev.of_node) {
864 omap_dma_info.dma_cap = od->ddev.cap_mask;
865
866 /* Device-tree DMA controller registration */
867 rc = of_dma_controller_register(pdev->dev.of_node,
868 of_dma_simple_xlate, &omap_dma_info);
869 if (rc) {
870 pr_warn("OMAP-DMA: failed to register DMA controller\n");
871 dma_async_device_unregister(&od->ddev);
872 omap_dma_free(od);
873 }
7bedaa55
RK
874 }
875
876 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
877
878 return rc;
879}
880
881static int omap_dma_remove(struct platform_device *pdev)
882{
883 struct omap_dmadev *od = platform_get_drvdata(pdev);
884
8d30662a
JH
885 if (pdev->dev.of_node)
886 of_dma_controller_free(pdev->dev.of_node);
887
7bedaa55
RK
888 dma_async_device_unregister(&od->ddev);
889 omap_dma_free(od);
890
891 return 0;
892}
893
8d30662a
JH
894static const struct of_device_id omap_dma_match[] = {
895 { .compatible = "ti,omap2420-sdma", },
896 { .compatible = "ti,omap2430-sdma", },
897 { .compatible = "ti,omap3430-sdma", },
898 { .compatible = "ti,omap3630-sdma", },
899 { .compatible = "ti,omap4430-sdma", },
900 {},
901};
902MODULE_DEVICE_TABLE(of, omap_dma_match);
903
7bedaa55
RK
904static struct platform_driver omap_dma_driver = {
905 .probe = omap_dma_probe,
906 .remove = omap_dma_remove,
907 .driver = {
908 .name = "omap-dma-engine",
909 .owner = THIS_MODULE,
8d30662a 910 .of_match_table = of_match_ptr(omap_dma_match),
7bedaa55
RK
911 },
912};
913
914bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
915{
916 if (chan->device->dev->driver == &omap_dma_driver.driver) {
917 struct omap_chan *c = to_omap_dma_chan(chan);
918 unsigned req = *(unsigned *)param;
919
920 return req == c->dma_sig;
921 }
922 return false;
923}
924EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
925
7bedaa55
RK
926static int omap_dma_init(void)
927{
be1f9481 928 return platform_driver_register(&omap_dma_driver);
7bedaa55
RK
929}
930subsys_initcall(omap_dma_init);
931
932static void __exit omap_dma_exit(void)
933{
7bedaa55
RK
934 platform_driver_unregister(&omap_dma_driver);
935}
936module_exit(omap_dma_exit);
937
938MODULE_AUTHOR("Russell King");
939MODULE_LICENSE("GPL");
This page took 0.177809 seconds and 5 git commands to generate.