dmaengine: omap-dma: move register read/writes into omap-dma.c
[deliverable/linux.git] / drivers / dma / omap-dma.c
CommitLineData
7bedaa55
RK
1/*
2 * OMAP DMAengine support
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
fa3ad86a 8#include <linux/delay.h>
7bedaa55
RK
9#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/omap-dma.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
8d30662a
JH
20#include <linux/of_dma.h>
21#include <linux/of_device.h>
7bedaa55
RK
22
23#include "virt-dma.h"
7d7e1eba 24
7bedaa55
RK
25struct omap_dmadev {
26 struct dma_device ddev;
27 spinlock_t lock;
28 struct tasklet_struct task;
29 struct list_head pending;
596c471b
RK
30 void __iomem *base;
31 const struct omap_dma_reg *reg_map;
1b416c4b 32 struct omap_system_dma_plat_info *plat;
7bedaa55
RK
33};
34
35struct omap_chan {
36 struct virt_dma_chan vc;
37 struct list_head node;
596c471b
RK
38 void __iomem *channel_base;
39 const struct omap_dma_reg *reg_map;
7bedaa55
RK
40
41 struct dma_slave_config cfg;
42 unsigned dma_sig;
3a774ea9 43 bool cyclic;
2dcdf570 44 bool paused;
7bedaa55
RK
45
46 int dma_ch;
47 struct omap_desc *desc;
48 unsigned sgidx;
49};
50
51struct omap_sg {
52 dma_addr_t addr;
53 uint32_t en; /* number of elements (24-bit) */
54 uint32_t fn; /* number of frames (16-bit) */
55};
56
57struct omap_desc {
58 struct virt_dma_desc vd;
59 enum dma_transfer_direction dir;
60 dma_addr_t dev_addr;
61
7c836bc7 62 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
9043826d 63 uint8_t es; /* CSDP_DATA_TYPE_xxx */
3ed4d18f 64 uint32_t ccr; /* CCR value */
965aeb4d 65 uint16_t clnk_ctrl; /* CLNK_CTRL value */
fa3ad86a 66 uint16_t cicr; /* CICR value */
2f0d13bd 67 uint32_t csdp; /* CSDP value */
7bedaa55
RK
68
69 unsigned sglen;
70 struct omap_sg sg[0];
71};
72
9043826d
RK
73enum {
74 CCR_FS = BIT(5),
75 CCR_READ_PRIORITY = BIT(6),
76 CCR_ENABLE = BIT(7),
77 CCR_AUTO_INIT = BIT(8), /* OMAP1 only */
78 CCR_REPEAT = BIT(9), /* OMAP1 only */
79 CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */
80 CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */
81 CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */
82 CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */
83 CCR_SRC_AMODE_CONSTANT = 0 << 12,
84 CCR_SRC_AMODE_POSTINC = 1 << 12,
85 CCR_SRC_AMODE_SGLIDX = 2 << 12,
86 CCR_SRC_AMODE_DBLIDX = 3 << 12,
87 CCR_DST_AMODE_CONSTANT = 0 << 14,
88 CCR_DST_AMODE_POSTINC = 1 << 14,
89 CCR_DST_AMODE_SGLIDX = 2 << 14,
90 CCR_DST_AMODE_DBLIDX = 3 << 14,
91 CCR_CONSTANT_FILL = BIT(16),
92 CCR_TRANSPARENT_COPY = BIT(17),
93 CCR_BS = BIT(18),
94 CCR_SUPERVISOR = BIT(22),
95 CCR_PREFETCH = BIT(23),
96 CCR_TRIGGER_SRC = BIT(24),
97 CCR_BUFFERING_DISABLE = BIT(25),
98 CCR_WRITE_PRIORITY = BIT(26),
99 CCR_SYNC_ELEMENT = 0,
100 CCR_SYNC_FRAME = CCR_FS,
101 CCR_SYNC_BLOCK = CCR_BS,
102 CCR_SYNC_PACKET = CCR_BS | CCR_FS,
103
104 CSDP_DATA_TYPE_8 = 0,
105 CSDP_DATA_TYPE_16 = 1,
106 CSDP_DATA_TYPE_32 = 2,
107 CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */
108 CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */
109 CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */
110 CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */
111 CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */
112 CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */
113 CSDP_SRC_PACKED = BIT(6),
114 CSDP_SRC_BURST_1 = 0 << 7,
115 CSDP_SRC_BURST_16 = 1 << 7,
116 CSDP_SRC_BURST_32 = 2 << 7,
117 CSDP_SRC_BURST_64 = 3 << 7,
118 CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */
119 CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */
120 CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */
121 CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */
122 CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */
123 CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */
124 CSDP_DST_PACKED = BIT(13),
125 CSDP_DST_BURST_1 = 0 << 14,
126 CSDP_DST_BURST_16 = 1 << 14,
127 CSDP_DST_BURST_32 = 2 << 14,
128 CSDP_DST_BURST_64 = 3 << 14,
129
130 CICR_TOUT_IE = BIT(0), /* OMAP1 only */
131 CICR_DROP_IE = BIT(1),
132 CICR_HALF_IE = BIT(2),
133 CICR_FRAME_IE = BIT(3),
134 CICR_LAST_IE = BIT(4),
135 CICR_BLOCK_IE = BIT(5),
136 CICR_PKT_IE = BIT(7), /* OMAP2+ only */
137 CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */
138 CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */
139 CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */
140 CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */
141 CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */
142
143 CLNK_CTRL_ENABLE_LNK = BIT(15),
144};
145
7bedaa55 146static const unsigned es_bytes[] = {
9043826d
RK
147 [CSDP_DATA_TYPE_8] = 1,
148 [CSDP_DATA_TYPE_16] = 2,
149 [CSDP_DATA_TYPE_32] = 4,
7bedaa55
RK
150};
151
8d30662a
JH
152static struct of_dma_filter_info omap_dma_info = {
153 .filter_fn = omap_dma_filter_fn,
154};
155
7bedaa55
RK
156static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
157{
158 return container_of(d, struct omap_dmadev, ddev);
159}
160
161static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
162{
163 return container_of(c, struct omap_chan, vc.chan);
164}
165
166static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
167{
168 return container_of(t, struct omap_desc, vd.tx);
169}
170
171static void omap_dma_desc_free(struct virt_dma_desc *vd)
172{
173 kfree(container_of(vd, struct omap_desc, vd));
174}
175
596c471b
RK
176static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
177{
178 switch (type) {
179 case OMAP_DMA_REG_16BIT:
180 writew_relaxed(val, addr);
181 break;
182 case OMAP_DMA_REG_2X16BIT:
183 writew_relaxed(val, addr);
184 writew_relaxed(val >> 16, addr + 2);
185 break;
186 case OMAP_DMA_REG_32BIT:
187 writel_relaxed(val, addr);
188 break;
189 default:
190 WARN_ON(1);
191 }
192}
193
194static unsigned omap_dma_read(unsigned type, void __iomem *addr)
195{
196 unsigned val;
197
198 switch (type) {
199 case OMAP_DMA_REG_16BIT:
200 val = readw_relaxed(addr);
201 break;
202 case OMAP_DMA_REG_2X16BIT:
203 val = readw_relaxed(addr);
204 val |= readw_relaxed(addr + 2) << 16;
205 break;
206 case OMAP_DMA_REG_32BIT:
207 val = readl_relaxed(addr);
208 break;
209 default:
210 WARN_ON(1);
211 val = 0;
212 }
213
214 return val;
215}
216
c5ed98b6
RK
217static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
218{
596c471b
RK
219 const struct omap_dma_reg *r = od->reg_map + reg;
220
221 WARN_ON(r->stride);
222
223 omap_dma_write(val, r->type, od->base + r->offset);
c5ed98b6
RK
224}
225
226static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
227{
596c471b
RK
228 const struct omap_dma_reg *r = od->reg_map + reg;
229
230 WARN_ON(r->stride);
231
232 return omap_dma_read(r->type, od->base + r->offset);
c5ed98b6
RK
233}
234
235static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
236{
596c471b
RK
237 const struct omap_dma_reg *r = c->reg_map + reg;
238
239 omap_dma_write(val, r->type, c->channel_base + r->offset);
c5ed98b6
RK
240}
241
242static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
243{
596c471b
RK
244 const struct omap_dma_reg *r = c->reg_map + reg;
245
246 return omap_dma_read(r->type, c->channel_base + r->offset);
c5ed98b6
RK
247}
248
470b23f7
RK
249static void omap_dma_clear_csr(struct omap_chan *c)
250{
251 if (dma_omap1())
c5ed98b6 252 omap_dma_chan_read(c, CSR);
470b23f7 253 else
c5ed98b6 254 omap_dma_chan_write(c, CSR, ~0);
470b23f7
RK
255}
256
596c471b
RK
257static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
258 unsigned lch)
259{
260 c->channel_base = od->base + od->plat->channel_stride * lch;
261}
262
fa3ad86a
RK
263static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
264{
265 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
fa3ad86a
RK
266
267 if (__dma_omap15xx(od->plat->dma_attr))
c5ed98b6 268 omap_dma_chan_write(c, CPC, 0);
fa3ad86a 269 else
c5ed98b6 270 omap_dma_chan_write(c, CDAC, 0);
fa3ad86a 271
470b23f7 272 omap_dma_clear_csr(c);
fa3ad86a
RK
273
274 /* Enable interrupts */
c5ed98b6 275 omap_dma_chan_write(c, CICR, d->cicr);
fa3ad86a 276
45da7b04 277 /* Enable channel */
c5ed98b6 278 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
fa3ad86a
RK
279}
280
281static void omap_dma_stop(struct omap_chan *c)
282{
283 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
284 uint32_t val;
285
286 /* disable irq */
c5ed98b6 287 omap_dma_chan_write(c, CICR, 0);
fa3ad86a 288
470b23f7 289 omap_dma_clear_csr(c);
fa3ad86a 290
c5ed98b6 291 val = omap_dma_chan_read(c, CCR);
9043826d 292 if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
fa3ad86a
RK
293 uint32_t sysconfig;
294 unsigned i;
295
c5ed98b6 296 sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
fa3ad86a
RK
297 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
298 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
c5ed98b6 299 omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
fa3ad86a 300
c5ed98b6 301 val = omap_dma_chan_read(c, CCR);
9043826d 302 val &= ~CCR_ENABLE;
c5ed98b6 303 omap_dma_chan_write(c, CCR, val);
fa3ad86a
RK
304
305 /* Wait for sDMA FIFO to drain */
306 for (i = 0; ; i++) {
c5ed98b6 307 val = omap_dma_chan_read(c, CCR);
9043826d 308 if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
fa3ad86a
RK
309 break;
310
311 if (i > 100)
312 break;
313
314 udelay(5);
315 }
316
9043826d 317 if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
fa3ad86a
RK
318 dev_err(c->vc.chan.device->dev,
319 "DMA drain did not complete on lch %d\n",
320 c->dma_ch);
321
c5ed98b6 322 omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
fa3ad86a 323 } else {
9043826d 324 val &= ~CCR_ENABLE;
c5ed98b6 325 omap_dma_chan_write(c, CCR, val);
fa3ad86a
RK
326 }
327
328 mb();
329
330 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
c5ed98b6 331 val = omap_dma_chan_read(c, CLNK_CTRL);
fa3ad86a
RK
332
333 if (dma_omap1())
334 val |= 1 << 14; /* set the STOP_LNK bit */
335 else
9043826d 336 val &= ~CLNK_CTRL_ENABLE_LNK;
fa3ad86a 337
c5ed98b6 338 omap_dma_chan_write(c, CLNK_CTRL, val);
fa3ad86a
RK
339 }
340}
341
7bedaa55
RK
342static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
343 unsigned idx)
344{
345 struct omap_sg *sg = d->sg + idx;
893e63e3 346 unsigned cxsa, cxei, cxfi;
913a2d0c
RK
347
348 if (d->dir == DMA_DEV_TO_MEM) {
893e63e3
RK
349 cxsa = CDSA;
350 cxei = CDEI;
351 cxfi = CDFI;
913a2d0c 352 } else {
893e63e3
RK
353 cxsa = CSSA;
354 cxei = CSEI;
355 cxfi = CSFI;
913a2d0c
RK
356 }
357
c5ed98b6
RK
358 omap_dma_chan_write(c, cxsa, sg->addr);
359 omap_dma_chan_write(c, cxei, 0);
360 omap_dma_chan_write(c, cxfi, 0);
361 omap_dma_chan_write(c, CEN, sg->en);
362 omap_dma_chan_write(c, CFN, sg->fn);
913a2d0c 363
fa3ad86a 364 omap_dma_start(c, d);
913a2d0c
RK
365}
366
367static void omap_dma_start_desc(struct omap_chan *c)
368{
369 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
370 struct omap_desc *d;
893e63e3 371 unsigned cxsa, cxei, cxfi;
b9e97822 372
913a2d0c
RK
373 if (!vd) {
374 c->desc = NULL;
375 return;
376 }
377
378 list_del(&vd->node);
379
380 c->desc = d = to_omap_dma_desc(&vd->tx);
381 c->sgidx = 0;
382
59871902
RK
383 /*
384 * This provides the necessary barrier to ensure data held in
385 * DMA coherent memory is visible to the DMA engine prior to
386 * the transfer starting.
387 */
388 mb();
389
c5ed98b6 390 omap_dma_chan_write(c, CCR, d->ccr);
3ed4d18f 391 if (dma_omap1())
c5ed98b6 392 omap_dma_chan_write(c, CCR2, d->ccr >> 16);
b9e97822 393
3ed4d18f 394 if (d->dir == DMA_DEV_TO_MEM) {
893e63e3
RK
395 cxsa = CSSA;
396 cxei = CSEI;
397 cxfi = CSFI;
b9e97822 398 } else {
893e63e3
RK
399 cxsa = CDSA;
400 cxei = CDEI;
401 cxfi = CDFI;
b9e97822
RK
402 }
403
c5ed98b6
RK
404 omap_dma_chan_write(c, cxsa, d->dev_addr);
405 omap_dma_chan_write(c, cxei, 0);
406 omap_dma_chan_write(c, cxfi, d->fi);
407 omap_dma_chan_write(c, CSDP, d->csdp);
408 omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
b9e97822 409
7bedaa55
RK
410 omap_dma_start_sg(c, d, 0);
411}
412
413static void omap_dma_callback(int ch, u16 status, void *data)
414{
415 struct omap_chan *c = data;
416 struct omap_desc *d;
417 unsigned long flags;
418
419 spin_lock_irqsave(&c->vc.lock, flags);
420 d = c->desc;
421 if (d) {
3a774ea9
RK
422 if (!c->cyclic) {
423 if (++c->sgidx < d->sglen) {
424 omap_dma_start_sg(c, d, c->sgidx);
425 } else {
426 omap_dma_start_desc(c);
427 vchan_cookie_complete(&d->vd);
428 }
7bedaa55 429 } else {
3a774ea9 430 vchan_cyclic_callback(&d->vd);
7bedaa55
RK
431 }
432 }
433 spin_unlock_irqrestore(&c->vc.lock, flags);
434}
435
436/*
437 * This callback schedules all pending channels. We could be more
438 * clever here by postponing allocation of the real DMA channels to
439 * this point, and freeing them when our virtual channel becomes idle.
440 *
441 * We would then need to deal with 'all channels in-use'
442 */
443static void omap_dma_sched(unsigned long data)
444{
445 struct omap_dmadev *d = (struct omap_dmadev *)data;
446 LIST_HEAD(head);
447
448 spin_lock_irq(&d->lock);
449 list_splice_tail_init(&d->pending, &head);
450 spin_unlock_irq(&d->lock);
451
452 while (!list_empty(&head)) {
453 struct omap_chan *c = list_first_entry(&head,
454 struct omap_chan, node);
455
456 spin_lock_irq(&c->vc.lock);
457 list_del_init(&c->node);
458 omap_dma_start_desc(c);
459 spin_unlock_irq(&c->vc.lock);
460 }
461}
462
463static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
464{
596c471b 465 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
7bedaa55 466 struct omap_chan *c = to_omap_dma_chan(chan);
596c471b
RK
467 int ret;
468
469 dev_dbg(od->ddev.dev, "allocating channel for %u\n", c->dma_sig);
7bedaa55 470
596c471b
RK
471 ret = omap_request_dma(c->dma_sig, "DMA engine", omap_dma_callback,
472 c, &c->dma_ch);
7bedaa55 473
596c471b
RK
474 if (ret >= 0)
475 omap_dma_assign(od, c, c->dma_ch);
476
477 return ret;
7bedaa55
RK
478}
479
480static void omap_dma_free_chan_resources(struct dma_chan *chan)
481{
482 struct omap_chan *c = to_omap_dma_chan(chan);
483
596c471b 484 c->channel_base = NULL;
7bedaa55
RK
485 vchan_free_chan_resources(&c->vc);
486 omap_free_dma(c->dma_ch);
487
9e2f7d82 488 dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
7bedaa55
RK
489}
490
3850e22f
RK
491static size_t omap_dma_sg_size(struct omap_sg *sg)
492{
493 return sg->en * sg->fn;
494}
495
496static size_t omap_dma_desc_size(struct omap_desc *d)
497{
498 unsigned i;
499 size_t size;
500
501 for (size = i = 0; i < d->sglen; i++)
502 size += omap_dma_sg_size(&d->sg[i]);
503
504 return size * es_bytes[d->es];
505}
506
507static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
508{
509 unsigned i;
510 size_t size, es_size = es_bytes[d->es];
511
512 for (size = i = 0; i < d->sglen; i++) {
513 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
514
515 if (size)
516 size += this_size;
517 else if (addr >= d->sg[i].addr &&
518 addr < d->sg[i].addr + this_size)
519 size += d->sg[i].addr + this_size - addr;
520 }
521 return size;
522}
523
b07fd625
RK
524/*
525 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
526 * read before the DMA controller finished disabling the channel.
527 */
528static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
529{
530 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
531 uint32_t val;
532
533 val = omap_dma_chan_read(c, reg);
534 if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
535 val = omap_dma_chan_read(c, reg);
536
537 return val;
538}
539
3997cab3
RK
540static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
541{
542 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
b07fd625 543 dma_addr_t addr, cdac;
3997cab3 544
b07fd625 545 if (__dma_omap15xx(od->plat->dma_attr)) {
c5ed98b6 546 addr = omap_dma_chan_read(c, CPC);
b07fd625
RK
547 } else {
548 addr = omap_dma_chan_read_3_3(c, CSAC);
549 cdac = omap_dma_chan_read_3_3(c, CDAC);
3997cab3 550
3997cab3
RK
551 /*
552 * CDAC == 0 indicates that the DMA transfer on the channel has
553 * not been started (no data has been transferred so far).
554 * Return the programmed source start address in this case.
555 */
b07fd625 556 if (cdac == 0)
c5ed98b6 557 addr = omap_dma_chan_read(c, CSSA);
3997cab3
RK
558 }
559
560 if (dma_omap1())
c5ed98b6 561 addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
3997cab3
RK
562
563 return addr;
564}
565
566static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
567{
568 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
569 dma_addr_t addr;
570
b07fd625 571 if (__dma_omap15xx(od->plat->dma_attr)) {
c5ed98b6 572 addr = omap_dma_chan_read(c, CPC);
b07fd625
RK
573 } else {
574 addr = omap_dma_chan_read_3_3(c, CDAC);
3997cab3 575
3997cab3 576 /*
b07fd625
RK
577 * CDAC == 0 indicates that the DMA transfer on the channel
578 * has not been started (no data has been transferred so
579 * far). Return the programmed destination start address in
580 * this case.
3997cab3
RK
581 */
582 if (addr == 0)
c5ed98b6 583 addr = omap_dma_chan_read(c, CDSA);
3997cab3
RK
584 }
585
586 if (dma_omap1())
c5ed98b6 587 addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
3997cab3
RK
588
589 return addr;
590}
591
7bedaa55
RK
592static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
593 dma_cookie_t cookie, struct dma_tx_state *txstate)
594{
3850e22f
RK
595 struct omap_chan *c = to_omap_dma_chan(chan);
596 struct virt_dma_desc *vd;
597 enum dma_status ret;
598 unsigned long flags;
599
600 ret = dma_cookie_status(chan, cookie, txstate);
7cce5083 601 if (ret == DMA_COMPLETE || !txstate)
3850e22f
RK
602 return ret;
603
604 spin_lock_irqsave(&c->vc.lock, flags);
605 vd = vchan_find_desc(&c->vc, cookie);
606 if (vd) {
607 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
608 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
609 struct omap_desc *d = c->desc;
610 dma_addr_t pos;
611
612 if (d->dir == DMA_MEM_TO_DEV)
3997cab3 613 pos = omap_dma_get_src_pos(c);
3850e22f 614 else if (d->dir == DMA_DEV_TO_MEM)
3997cab3 615 pos = omap_dma_get_dst_pos(c);
3850e22f
RK
616 else
617 pos = 0;
618
619 txstate->residue = omap_dma_desc_size_pos(d, pos);
620 } else {
621 txstate->residue = 0;
622 }
623 spin_unlock_irqrestore(&c->vc.lock, flags);
624
625 return ret;
7bedaa55
RK
626}
627
628static void omap_dma_issue_pending(struct dma_chan *chan)
629{
630 struct omap_chan *c = to_omap_dma_chan(chan);
631 unsigned long flags;
632
633 spin_lock_irqsave(&c->vc.lock, flags);
634 if (vchan_issue_pending(&c->vc) && !c->desc) {
76502469
PU
635 /*
636 * c->cyclic is used only by audio and in this case the DMA need
637 * to be started without delay.
638 */
639 if (!c->cyclic) {
640 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
641 spin_lock(&d->lock);
642 if (list_empty(&c->node))
643 list_add_tail(&c->node, &d->pending);
644 spin_unlock(&d->lock);
645 tasklet_schedule(&d->task);
646 } else {
647 omap_dma_start_desc(c);
648 }
7bedaa55
RK
649 }
650 spin_unlock_irqrestore(&c->vc.lock, flags);
651}
652
653static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
654 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
655 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
656{
49ae0b29 657 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
7bedaa55
RK
658 struct omap_chan *c = to_omap_dma_chan(chan);
659 enum dma_slave_buswidth dev_width;
660 struct scatterlist *sgent;
661 struct omap_desc *d;
662 dma_addr_t dev_addr;
3ed4d18f 663 unsigned i, j = 0, es, en, frame_bytes;
7bedaa55
RK
664 u32 burst;
665
666 if (dir == DMA_DEV_TO_MEM) {
667 dev_addr = c->cfg.src_addr;
668 dev_width = c->cfg.src_addr_width;
669 burst = c->cfg.src_maxburst;
7bedaa55
RK
670 } else if (dir == DMA_MEM_TO_DEV) {
671 dev_addr = c->cfg.dst_addr;
672 dev_width = c->cfg.dst_addr_width;
673 burst = c->cfg.dst_maxburst;
7bedaa55
RK
674 } else {
675 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
676 return NULL;
677 }
678
679 /* Bus width translates to the element size (ES) */
680 switch (dev_width) {
681 case DMA_SLAVE_BUSWIDTH_1_BYTE:
9043826d 682 es = CSDP_DATA_TYPE_8;
7bedaa55
RK
683 break;
684 case DMA_SLAVE_BUSWIDTH_2_BYTES:
9043826d 685 es = CSDP_DATA_TYPE_16;
7bedaa55
RK
686 break;
687 case DMA_SLAVE_BUSWIDTH_4_BYTES:
9043826d 688 es = CSDP_DATA_TYPE_32;
7bedaa55
RK
689 break;
690 default: /* not reached */
691 return NULL;
692 }
693
694 /* Now allocate and setup the descriptor. */
695 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
696 if (!d)
697 return NULL;
698
699 d->dir = dir;
700 d->dev_addr = dev_addr;
701 d->es = es;
3ed4d18f 702
9043826d 703 d->ccr = CCR_SYNC_FRAME;
3ed4d18f 704 if (dir == DMA_DEV_TO_MEM)
9043826d 705 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
3ed4d18f 706 else
9043826d 707 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
3ed4d18f 708
9043826d 709 d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
2f0d13bd 710 d->csdp = es;
fa3ad86a 711
2f0d13bd 712 if (dma_omap1()) {
3ed4d18f 713 if (__dma_omap16xx(od->plat->dma_attr)) {
9043826d 714 d->ccr |= CCR_OMAP31_DISABLE;
3ed4d18f
RK
715 /* Duplicate what plat-omap/dma.c does */
716 d->ccr |= c->dma_ch + 1;
717 } else {
718 d->ccr |= c->dma_sig & 0x1f;
719 }
720
9043826d 721 d->cicr |= CICR_TOUT_IE;
2f0d13bd
RK
722
723 if (dir == DMA_DEV_TO_MEM)
9043826d 724 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
2f0d13bd 725 else
9043826d 726 d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
2f0d13bd 727 } else {
3ed4d18f
RK
728 d->ccr |= (c->dma_sig & ~0x1f) << 14;
729 d->ccr |= c->dma_sig & 0x1f;
3ed4d18f
RK
730
731 if (dir == DMA_DEV_TO_MEM)
9043826d 732 d->ccr |= CCR_TRIGGER_SRC;
3ed4d18f 733
9043826d 734 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
2f0d13bd 735 }
49ae0b29
RK
736 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
737 d->ccr |= CCR_BUFFERING_DISABLE;
965aeb4d
RK
738 if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
739 d->clnk_ctrl = c->dma_ch;
7bedaa55
RK
740
741 /*
742 * Build our scatterlist entries: each contains the address,
743 * the number of elements (EN) in each frame, and the number of
744 * frames (FN). Number of bytes for this entry = ES * EN * FN.
745 *
746 * Burst size translates to number of elements with frame sync.
747 * Note: DMA engine defines burst to be the number of dev-width
748 * transfers.
749 */
750 en = burst;
751 frame_bytes = es_bytes[es] * en;
752 for_each_sg(sgl, sgent, sglen, i) {
753 d->sg[j].addr = sg_dma_address(sgent);
754 d->sg[j].en = en;
755 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
756 j++;
757 }
758
759 d->sglen = j;
760
761 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
762}
763
3a774ea9
RK
764static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
765 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
ec8b5e48
PU
766 size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
767 void *context)
3a774ea9 768{
fa3ad86a 769 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
3a774ea9
RK
770 struct omap_chan *c = to_omap_dma_chan(chan);
771 enum dma_slave_buswidth dev_width;
772 struct omap_desc *d;
773 dma_addr_t dev_addr;
3ed4d18f 774 unsigned es;
3a774ea9
RK
775 u32 burst;
776
777 if (dir == DMA_DEV_TO_MEM) {
778 dev_addr = c->cfg.src_addr;
779 dev_width = c->cfg.src_addr_width;
780 burst = c->cfg.src_maxburst;
3a774ea9
RK
781 } else if (dir == DMA_MEM_TO_DEV) {
782 dev_addr = c->cfg.dst_addr;
783 dev_width = c->cfg.dst_addr_width;
784 burst = c->cfg.dst_maxburst;
3a774ea9
RK
785 } else {
786 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
787 return NULL;
788 }
789
790 /* Bus width translates to the element size (ES) */
791 switch (dev_width) {
792 case DMA_SLAVE_BUSWIDTH_1_BYTE:
9043826d 793 es = CSDP_DATA_TYPE_8;
3a774ea9
RK
794 break;
795 case DMA_SLAVE_BUSWIDTH_2_BYTES:
9043826d 796 es = CSDP_DATA_TYPE_16;
3a774ea9
RK
797 break;
798 case DMA_SLAVE_BUSWIDTH_4_BYTES:
9043826d 799 es = CSDP_DATA_TYPE_32;
3a774ea9
RK
800 break;
801 default: /* not reached */
802 return NULL;
803 }
804
805 /* Now allocate and setup the descriptor. */
806 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
807 if (!d)
808 return NULL;
809
810 d->dir = dir;
811 d->dev_addr = dev_addr;
812 d->fi = burst;
813 d->es = es;
3a774ea9
RK
814 d->sg[0].addr = buf_addr;
815 d->sg[0].en = period_len / es_bytes[es];
816 d->sg[0].fn = buf_len / period_len;
817 d->sglen = 1;
3ed4d18f
RK
818
819 d->ccr = 0;
3ed4d18f 820 if (dir == DMA_DEV_TO_MEM)
9043826d 821 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
3ed4d18f 822 else
9043826d 823 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
3ed4d18f 824
9043826d 825 d->cicr = CICR_DROP_IE;
fa3ad86a 826 if (flags & DMA_PREP_INTERRUPT)
9043826d 827 d->cicr |= CICR_FRAME_IE;
fa3ad86a 828
2f0d13bd
RK
829 d->csdp = es;
830
831 if (dma_omap1()) {
3ed4d18f 832 if (__dma_omap16xx(od->plat->dma_attr)) {
9043826d 833 d->ccr |= CCR_OMAP31_DISABLE;
3ed4d18f
RK
834 /* Duplicate what plat-omap/dma.c does */
835 d->ccr |= c->dma_ch + 1;
836 } else {
837 d->ccr |= c->dma_sig & 0x1f;
838 }
839
9043826d 840 d->cicr |= CICR_TOUT_IE;
2f0d13bd
RK
841
842 if (dir == DMA_DEV_TO_MEM)
9043826d 843 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
2f0d13bd 844 else
9043826d 845 d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
2f0d13bd 846 } else {
3ed4d18f
RK
847 d->ccr |= (c->dma_sig & ~0x1f) << 14;
848 d->ccr |= c->dma_sig & 0x1f;
849
850 if (burst)
9043826d
RK
851 d->ccr |= CCR_SYNC_PACKET;
852 else
853 d->ccr |= CCR_SYNC_ELEMENT;
3ed4d18f
RK
854
855 if (dir == DMA_DEV_TO_MEM)
9043826d 856 d->ccr |= CCR_TRIGGER_SRC;
3ed4d18f 857
9043826d 858 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
3a774ea9 859
9043826d 860 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
2f0d13bd 861 }
49ae0b29
RK
862 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
863 d->ccr |= CCR_BUFFERING_DISABLE;
2f0d13bd 864
965aeb4d
RK
865 if (__dma_omap15xx(od->plat->dma_attr))
866 d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
867 else
868 d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
869
3ed4d18f 870 c->cyclic = true;
3a774ea9 871
2dde5b90 872 return vchan_tx_prep(&c->vc, &d->vd, flags);
3a774ea9
RK
873}
874
7bedaa55
RK
875static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
876{
877 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
878 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
879 return -EINVAL;
880
881 memcpy(&c->cfg, cfg, sizeof(c->cfg));
882
883 return 0;
884}
885
886static int omap_dma_terminate_all(struct omap_chan *c)
887{
888 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
889 unsigned long flags;
890 LIST_HEAD(head);
891
892 spin_lock_irqsave(&c->vc.lock, flags);
893
894 /* Prevent this channel being scheduled */
895 spin_lock(&d->lock);
896 list_del_init(&c->node);
897 spin_unlock(&d->lock);
898
899 /*
900 * Stop DMA activity: we assume the callback will not be called
fa3ad86a 901 * after omap_dma_stop() returns (even if it does, it will see
7bedaa55
RK
902 * c->desc is NULL and exit.)
903 */
904 if (c->desc) {
905 c->desc = NULL;
2dcdf570
PU
906 /* Avoid stopping the dma twice */
907 if (!c->paused)
fa3ad86a 908 omap_dma_stop(c);
7bedaa55
RK
909 }
910
3a774ea9
RK
911 if (c->cyclic) {
912 c->cyclic = false;
2dcdf570 913 c->paused = false;
3a774ea9
RK
914 }
915
7bedaa55
RK
916 vchan_get_all_descriptors(&c->vc, &head);
917 spin_unlock_irqrestore(&c->vc.lock, flags);
918 vchan_dma_desc_free_list(&c->vc, &head);
919
920 return 0;
921}
922
923static int omap_dma_pause(struct omap_chan *c)
924{
2dcdf570
PU
925 /* Pause/Resume only allowed with cyclic mode */
926 if (!c->cyclic)
927 return -EINVAL;
928
929 if (!c->paused) {
fa3ad86a 930 omap_dma_stop(c);
2dcdf570
PU
931 c->paused = true;
932 }
933
934 return 0;
7bedaa55
RK
935}
936
937static int omap_dma_resume(struct omap_chan *c)
938{
2dcdf570
PU
939 /* Pause/Resume only allowed with cyclic mode */
940 if (!c->cyclic)
941 return -EINVAL;
942
943 if (c->paused) {
fa3ad86a 944 omap_dma_start(c, c->desc);
2dcdf570
PU
945 c->paused = false;
946 }
947
948 return 0;
7bedaa55
RK
949}
950
951static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
952 unsigned long arg)
953{
954 struct omap_chan *c = to_omap_dma_chan(chan);
955 int ret;
956
957 switch (cmd) {
958 case DMA_SLAVE_CONFIG:
959 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
960 break;
961
962 case DMA_TERMINATE_ALL:
963 ret = omap_dma_terminate_all(c);
964 break;
965
966 case DMA_PAUSE:
967 ret = omap_dma_pause(c);
968 break;
969
970 case DMA_RESUME:
971 ret = omap_dma_resume(c);
972 break;
973
974 default:
975 ret = -ENXIO;
976 break;
977 }
978
979 return ret;
980}
981
982static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
983{
984 struct omap_chan *c;
985
986 c = kzalloc(sizeof(*c), GFP_KERNEL);
987 if (!c)
988 return -ENOMEM;
989
596c471b 990 c->reg_map = od->reg_map;
7bedaa55
RK
991 c->dma_sig = dma_sig;
992 c->vc.desc_free = omap_dma_desc_free;
993 vchan_init(&c->vc, &od->ddev);
994 INIT_LIST_HEAD(&c->node);
995
996 od->ddev.chancnt++;
997
998 return 0;
999}
1000
1001static void omap_dma_free(struct omap_dmadev *od)
1002{
1003 tasklet_kill(&od->task);
1004 while (!list_empty(&od->ddev.channels)) {
1005 struct omap_chan *c = list_first_entry(&od->ddev.channels,
1006 struct omap_chan, vc.chan.device_node);
1007
1008 list_del(&c->vc.chan.device_node);
1009 tasklet_kill(&c->vc.task);
1010 kfree(c);
1011 }
7bedaa55
RK
1012}
1013
1014static int omap_dma_probe(struct platform_device *pdev)
1015{
1016 struct omap_dmadev *od;
596c471b 1017 struct resource *res;
7bedaa55
RK
1018 int rc, i;
1019
104fce73 1020 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
7bedaa55
RK
1021 if (!od)
1022 return -ENOMEM;
1023
596c471b
RK
1024 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1025 od->base = devm_ioremap_resource(&pdev->dev, res);
1026 if (IS_ERR(od->base))
1027 return PTR_ERR(od->base);
1028
1b416c4b
RK
1029 od->plat = omap_get_plat_info();
1030 if (!od->plat)
1031 return -EPROBE_DEFER;
1032
596c471b
RK
1033 od->reg_map = od->plat->reg_map;
1034
7bedaa55 1035 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
3a774ea9 1036 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
7bedaa55
RK
1037 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
1038 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
1039 od->ddev.device_tx_status = omap_dma_tx_status;
1040 od->ddev.device_issue_pending = omap_dma_issue_pending;
1041 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
3a774ea9 1042 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
7bedaa55
RK
1043 od->ddev.device_control = omap_dma_control;
1044 od->ddev.dev = &pdev->dev;
1045 INIT_LIST_HEAD(&od->ddev.channels);
1046 INIT_LIST_HEAD(&od->pending);
1047 spin_lock_init(&od->lock);
1048
1049 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
1050
1051 for (i = 0; i < 127; i++) {
1052 rc = omap_dma_chan_init(od, i);
1053 if (rc) {
1054 omap_dma_free(od);
1055 return rc;
1056 }
1057 }
1058
1059 rc = dma_async_device_register(&od->ddev);
1060 if (rc) {
1061 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1062 rc);
1063 omap_dma_free(od);
8d30662a
JH
1064 return rc;
1065 }
1066
1067 platform_set_drvdata(pdev, od);
1068
1069 if (pdev->dev.of_node) {
1070 omap_dma_info.dma_cap = od->ddev.cap_mask;
1071
1072 /* Device-tree DMA controller registration */
1073 rc = of_dma_controller_register(pdev->dev.of_node,
1074 of_dma_simple_xlate, &omap_dma_info);
1075 if (rc) {
1076 pr_warn("OMAP-DMA: failed to register DMA controller\n");
1077 dma_async_device_unregister(&od->ddev);
1078 omap_dma_free(od);
1079 }
7bedaa55
RK
1080 }
1081
1082 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
1083
1084 return rc;
1085}
1086
1087static int omap_dma_remove(struct platform_device *pdev)
1088{
1089 struct omap_dmadev *od = platform_get_drvdata(pdev);
1090
8d30662a
JH
1091 if (pdev->dev.of_node)
1092 of_dma_controller_free(pdev->dev.of_node);
1093
7bedaa55
RK
1094 dma_async_device_unregister(&od->ddev);
1095 omap_dma_free(od);
1096
1097 return 0;
1098}
1099
8d30662a
JH
1100static const struct of_device_id omap_dma_match[] = {
1101 { .compatible = "ti,omap2420-sdma", },
1102 { .compatible = "ti,omap2430-sdma", },
1103 { .compatible = "ti,omap3430-sdma", },
1104 { .compatible = "ti,omap3630-sdma", },
1105 { .compatible = "ti,omap4430-sdma", },
1106 {},
1107};
1108MODULE_DEVICE_TABLE(of, omap_dma_match);
1109
7bedaa55
RK
1110static struct platform_driver omap_dma_driver = {
1111 .probe = omap_dma_probe,
1112 .remove = omap_dma_remove,
1113 .driver = {
1114 .name = "omap-dma-engine",
1115 .owner = THIS_MODULE,
8d30662a 1116 .of_match_table = of_match_ptr(omap_dma_match),
7bedaa55
RK
1117 },
1118};
1119
1120bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
1121{
1122 if (chan->device->dev->driver == &omap_dma_driver.driver) {
1123 struct omap_chan *c = to_omap_dma_chan(chan);
1124 unsigned req = *(unsigned *)param;
1125
1126 return req == c->dma_sig;
1127 }
1128 return false;
1129}
1130EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
1131
7bedaa55
RK
1132static int omap_dma_init(void)
1133{
be1f9481 1134 return platform_driver_register(&omap_dma_driver);
7bedaa55
RK
1135}
1136subsys_initcall(omap_dma_init);
1137
1138static void __exit omap_dma_exit(void)
1139{
7bedaa55
RK
1140 platform_driver_unregister(&omap_dma_driver);
1141}
1142module_exit(omap_dma_exit);
1143
1144MODULE_AUTHOR("Russell King");
1145MODULE_LICENSE("GPL");
This page took 0.143725 seconds and 5 git commands to generate.