dma/imx-sdma: save irq flags when use spin_lock in sdma_tx_submit
[deliverable/linux.git] / drivers / dma / shdma.c
CommitLineData
d8902adc
NI
1/*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
5a0e3ad6 22#include <linux/slab.h>
d8902adc
NI
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
d8902adc 26#include <linux/platform_device.h>
20f2a3b5 27#include <linux/pm_runtime.h>
b2623a61 28#include <linux/sh_dma.h>
03aa18f5
PM
29#include <linux/notifier.h>
30#include <linux/kdebug.h>
31#include <linux/spinlock.h>
32#include <linux/rculist.h>
d8902adc
NI
33#include "shdma.h"
34
35/* DMA descriptor control */
3542a113
GL
36enum sh_dmae_desc_status {
37 DESC_IDLE,
38 DESC_PREPARED,
39 DESC_SUBMITTED,
40 DESC_COMPLETED, /* completed, have to call callback */
41 DESC_WAITING, /* callback called, waiting for ack / re-submit */
42};
d8902adc
NI
43
44#define NR_DESCS_PER_CHANNEL 32
8b1935e6
GL
45/* Default MEMCPY transfer size = 2^2 = 4 bytes */
46#define LOG2_DEFAULT_XFER_SIZE 2
d8902adc 47
03aa18f5
PM
48/*
49 * Used for write-side mutual exclusion for the global device list,
2dc66667 50 * read-side synchronization by way of RCU, and per-controller data.
03aa18f5
PM
51 */
52static DEFINE_SPINLOCK(sh_dmae_lock);
53static LIST_HEAD(sh_dmae_devices);
54
cfefe997 55/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
02ca5083 56static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
cfefe997 57
3542a113
GL
58static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
59
d8902adc
NI
60static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
61{
027811b9 62 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
d8902adc
NI
63}
64
65static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
66{
027811b9
GL
67 return __raw_readl(sh_dc->base + reg / sizeof(u32));
68}
69
70static u16 dmaor_read(struct sh_dmae_device *shdev)
71{
e76c3af8
KM
72 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
73
74 if (shdev->pdata->dmaor_is_32bit)
75 return __raw_readl(addr);
76 else
77 return __raw_readw(addr);
027811b9
GL
78}
79
80static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
81{
e76c3af8
KM
82 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
83
84 if (shdev->pdata->dmaor_is_32bit)
85 __raw_writel(data, addr);
86 else
87 __raw_writew(data, addr);
d8902adc
NI
88}
89
5899a723
KM
90static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
91{
92 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
93
94 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
95}
96
97static u32 chcr_read(struct sh_dmae_chan *sh_dc)
98{
99 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
100
101 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
d8902adc
NI
102}
103
d8902adc
NI
104/*
105 * Reset DMA controller
106 *
107 * SH7780 has two DMAOR register
108 */
027811b9 109static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
d8902adc 110{
2dc66667
GL
111 unsigned short dmaor;
112 unsigned long flags;
113
114 spin_lock_irqsave(&sh_dmae_lock, flags);
d8902adc 115
2dc66667 116 dmaor = dmaor_read(shdev);
027811b9 117 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
2dc66667
GL
118
119 spin_unlock_irqrestore(&sh_dmae_lock, flags);
d8902adc
NI
120}
121
027811b9 122static int sh_dmae_rst(struct sh_dmae_device *shdev)
d8902adc
NI
123{
124 unsigned short dmaor;
2dc66667 125 unsigned long flags;
d8902adc 126
2dc66667 127 spin_lock_irqsave(&sh_dmae_lock, flags);
d8902adc 128
2dc66667
GL
129 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
130
131 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
132
133 dmaor = dmaor_read(shdev);
134
135 spin_unlock_irqrestore(&sh_dmae_lock, flags);
136
137 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
138 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
139 return -EIO;
d8902adc
NI
140 }
141 return 0;
142}
143
fc461857 144static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
d8902adc 145{
5899a723 146 u32 chcr = chcr_read(sh_chan);
fc461857
GL
147
148 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
149 return true; /* working */
150
151 return false; /* waiting */
d8902adc
NI
152}
153
8b1935e6 154static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
d8902adc 155{
c4e0dd78 156 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
8b1935e6
GL
157 struct sh_dmae_pdata *pdata = shdev->pdata;
158 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
159 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
160
161 if (cnt >= pdata->ts_shift_num)
162 cnt = 0;
623b4ac4 163
8b1935e6
GL
164 return pdata->ts_shift[cnt];
165}
166
167static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
168{
c4e0dd78 169 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
8b1935e6
GL
170 struct sh_dmae_pdata *pdata = shdev->pdata;
171 int i;
172
173 for (i = 0; i < pdata->ts_shift_num; i++)
174 if (pdata->ts_shift[i] == l2size)
175 break;
176
177 if (i == pdata->ts_shift_num)
178 i = 0;
179
180 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
181 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
d8902adc
NI
182}
183
3542a113 184static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
d8902adc 185{
3542a113
GL
186 sh_dmae_writel(sh_chan, hw->sar, SAR);
187 sh_dmae_writel(sh_chan, hw->dar, DAR);
cfefe997 188 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
d8902adc
NI
189}
190
191static void dmae_start(struct sh_dmae_chan *sh_chan)
192{
67c6269e 193 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
5899a723 194 u32 chcr = chcr_read(sh_chan);
d8902adc 195
260bf2c5
KM
196 if (shdev->pdata->needs_tend_set)
197 sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
198
67c6269e 199 chcr |= CHCR_DE | shdev->chcr_ie_bit;
5899a723 200 chcr_write(sh_chan, chcr & ~CHCR_TE);
d8902adc
NI
201}
202
203static void dmae_halt(struct sh_dmae_chan *sh_chan)
204{
67c6269e 205 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
5899a723 206 u32 chcr = chcr_read(sh_chan);
d8902adc 207
67c6269e 208 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
5899a723 209 chcr_write(sh_chan, chcr);
d8902adc
NI
210}
211
cfefe997
GL
212static void dmae_init(struct sh_dmae_chan *sh_chan)
213{
8b1935e6
GL
214 /*
215 * Default configuration for dual address memory-memory transfer.
216 * 0x400 represents auto-request.
217 */
218 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
219 LOG2_DEFAULT_XFER_SIZE);
220 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
5899a723 221 chcr_write(sh_chan, chcr);
cfefe997
GL
222}
223
d8902adc
NI
224static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
225{
2dc66667 226 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
fc461857
GL
227 if (dmae_is_busy(sh_chan))
228 return -EBUSY;
d8902adc 229
8b1935e6 230 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
5899a723 231 chcr_write(sh_chan, val);
cfefe997 232
d8902adc
NI
233 return 0;
234}
235
d8902adc
NI
236static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
237{
c4e0dd78 238 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
027811b9 239 struct sh_dmae_pdata *pdata = shdev->pdata;
5bac942d 240 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
26fc02ab 241 u16 __iomem *addr = shdev->dmars;
090b9180 242 unsigned int shift = chan_pdata->dmars_bit;
fc461857
GL
243
244 if (dmae_is_busy(sh_chan))
245 return -EBUSY;
d8902adc 246
260bf2c5
KM
247 if (pdata->no_dmars)
248 return 0;
249
26fc02ab
MD
250 /* in the case of a missing DMARS resource use first memory window */
251 if (!addr)
252 addr = (u16 __iomem *)shdev->chan_reg;
253 addr += chan_pdata->dmars / sizeof(u16);
254
027811b9
GL
255 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
256 addr);
d8902adc
NI
257
258 return 0;
259}
260
7a1cd9ad
GL
261static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
262
d8902adc
NI
263static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
264{
3542a113 265 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
d8902adc 266 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
7a1cd9ad 267 struct sh_dmae_slave *param = tx->chan->private;
3542a113 268 dma_async_tx_callback callback = tx->callback;
d8902adc 269 dma_cookie_t cookie;
7a1cd9ad 270 bool power_up;
d8902adc 271
7a1cd9ad
GL
272 spin_lock_irq(&sh_chan->desc_lock);
273
274 if (list_empty(&sh_chan->ld_queue))
275 power_up = true;
276 else
277 power_up = false;
d8902adc
NI
278
279 cookie = sh_chan->common.cookie;
280 cookie++;
281 if (cookie < 0)
282 cookie = 1;
283
3542a113
GL
284 sh_chan->common.cookie = cookie;
285 tx->cookie = cookie;
286
287 /* Mark all chunks of this descriptor as submitted, move to the queue */
288 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
289 /*
290 * All chunks are on the global ld_free, so, we have to find
291 * the end of the chain ourselves
292 */
293 if (chunk != desc && (chunk->mark == DESC_IDLE ||
294 chunk->async_tx.cookie > 0 ||
295 chunk->async_tx.cookie == -EBUSY ||
296 &chunk->node == &sh_chan->ld_free))
297 break;
298 chunk->mark = DESC_SUBMITTED;
299 /* Callback goes to the last chunk */
300 chunk->async_tx.callback = NULL;
301 chunk->cookie = cookie;
302 list_move_tail(&chunk->node, &sh_chan->ld_queue);
303 last = chunk;
304 }
d8902adc 305
3542a113
GL
306 last->async_tx.callback = callback;
307 last->async_tx.callback_param = tx->callback_param;
308
309 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
310 tx->cookie, &last->async_tx, sh_chan->id,
311 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
d8902adc 312
7a1cd9ad
GL
313 if (power_up) {
314 sh_chan->pm_state = DMAE_PM_BUSY;
315
316 pm_runtime_get(sh_chan->dev);
317
318 spin_unlock_irq(&sh_chan->desc_lock);
319
320 pm_runtime_barrier(sh_chan->dev);
321
322 spin_lock_irq(&sh_chan->desc_lock);
323
324 /* Have we been reset, while waiting? */
325 if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
326 dev_dbg(sh_chan->dev, "Bring up channel %d\n",
327 sh_chan->id);
328 if (param) {
329 const struct sh_dmae_slave_config *cfg =
330 param->config;
331
332 dmae_set_dmars(sh_chan, cfg->mid_rid);
333 dmae_set_chcr(sh_chan, cfg->chcr);
334 } else {
335 dmae_init(sh_chan);
336 }
337
338 if (sh_chan->pm_state == DMAE_PM_PENDING)
339 sh_chan_xfer_ld_queue(sh_chan);
340 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
341 }
342 }
343
344 spin_unlock_irq(&sh_chan->desc_lock);
d8902adc
NI
345
346 return cookie;
347}
348
3542a113 349/* Called with desc_lock held */
d8902adc
NI
350static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
351{
3542a113 352 struct sh_desc *desc;
d8902adc 353
3542a113
GL
354 list_for_each_entry(desc, &sh_chan->ld_free, node)
355 if (desc->mark != DESC_PREPARED) {
356 BUG_ON(desc->mark != DESC_IDLE);
d8902adc 357 list_del(&desc->node);
3542a113 358 return desc;
d8902adc 359 }
d8902adc 360
3542a113 361 return NULL;
d8902adc
NI
362}
363
5bac942d 364static const struct sh_dmae_slave_config *sh_dmae_find_slave(
4bab9d42 365 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
cfefe997 366{
c4e0dd78 367 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
027811b9 368 struct sh_dmae_pdata *pdata = shdev->pdata;
cfefe997
GL
369 int i;
370
02ca5083 371 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
cfefe997
GL
372 return NULL;
373
027811b9 374 for (i = 0; i < pdata->slave_num; i++)
4bab9d42 375 if (pdata->slave[i].slave_id == param->slave_id)
027811b9 376 return pdata->slave + i;
cfefe997
GL
377
378 return NULL;
379}
380
d8902adc
NI
381static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
382{
383 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
384 struct sh_desc *desc;
cfefe997 385 struct sh_dmae_slave *param = chan->private;
83515bc7 386 int ret;
cfefe997
GL
387
388 /*
389 * This relies on the guarantee from dmaengine that alloc_chan_resources
390 * never runs concurrently with itself or free_chan_resources.
391 */
392 if (param) {
5bac942d 393 const struct sh_dmae_slave_config *cfg;
cfefe997 394
4bab9d42 395 cfg = sh_dmae_find_slave(sh_chan, param);
83515bc7
GL
396 if (!cfg) {
397 ret = -EINVAL;
398 goto efindslave;
399 }
cfefe997 400
83515bc7
GL
401 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
402 ret = -EBUSY;
403 goto etestused;
404 }
cfefe997
GL
405
406 param->config = cfg;
cfefe997 407 }
d8902adc 408
d8902adc 409 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
d8902adc 410 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
b4dae6e1 411 if (!desc)
d8902adc 412 break;
d8902adc
NI
413 dma_async_tx_descriptor_init(&desc->async_tx,
414 &sh_chan->common);
415 desc->async_tx.tx_submit = sh_dmae_tx_submit;
3542a113 416 desc->mark = DESC_IDLE;
d8902adc 417
3542a113 418 list_add(&desc->node, &sh_chan->ld_free);
d8902adc
NI
419 sh_chan->descs_allocated++;
420 }
d8902adc 421
83515bc7
GL
422 if (!sh_chan->descs_allocated) {
423 ret = -ENOMEM;
424 goto edescalloc;
425 }
20f2a3b5 426
d8902adc 427 return sh_chan->descs_allocated;
83515bc7
GL
428
429edescalloc:
430 if (param)
431 clear_bit(param->slave_id, sh_dmae_slave_used);
432etestused:
433efindslave:
b4dae6e1 434 chan->private = NULL;
83515bc7 435 return ret;
d8902adc
NI
436}
437
438/*
439 * sh_dma_free_chan_resources - Free all resources of the channel.
440 */
441static void sh_dmae_free_chan_resources(struct dma_chan *chan)
442{
443 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
444 struct sh_desc *desc, *_desc;
445 LIST_HEAD(list);
446
2dc66667
GL
447 /* Protect against ISR */
448 spin_lock_irq(&sh_chan->desc_lock);
cfefe997 449 dmae_halt(sh_chan);
2dc66667
GL
450 spin_unlock_irq(&sh_chan->desc_lock);
451
452 /* Now no new interrupts will occur */
cfefe997 453
3542a113
GL
454 /* Prepared and not submitted descriptors can still be on the queue */
455 if (!list_empty(&sh_chan->ld_queue))
456 sh_dmae_chan_ld_cleanup(sh_chan, true);
457
cfefe997
GL
458 if (chan->private) {
459 /* The caller is holding dma_list_mutex */
460 struct sh_dmae_slave *param = chan->private;
461 clear_bit(param->slave_id, sh_dmae_slave_used);
2dc66667 462 chan->private = NULL;
cfefe997
GL
463 }
464
b4dae6e1 465 spin_lock_irq(&sh_chan->desc_lock);
d8902adc
NI
466
467 list_splice_init(&sh_chan->ld_free, &list);
468 sh_chan->descs_allocated = 0;
469
b4dae6e1 470 spin_unlock_irq(&sh_chan->desc_lock);
d8902adc
NI
471
472 list_for_each_entry_safe(desc, _desc, &list, node)
473 kfree(desc);
474}
475
cfefe997 476/**
fc461857
GL
477 * sh_dmae_add_desc - get, set up and return one transfer descriptor
478 * @sh_chan: DMA channel
479 * @flags: DMA transfer flags
480 * @dest: destination DMA address, incremented when direction equals
db8196df 481 * DMA_DEV_TO_MEM
fc461857 482 * @src: source DMA address, incremented when direction equals
db8196df 483 * DMA_MEM_TO_DEV
fc461857
GL
484 * @len: DMA transfer length
485 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
486 * @direction: needed for slave DMA to decide which address to keep constant,
db8196df 487 * equals DMA_MEM_TO_MEM for MEMCPY
fc461857
GL
488 * Returns 0 or an error
489 * Locks: called with desc_lock held
490 */
491static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
492 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
db8196df 493 struct sh_desc **first, enum dma_transfer_direction direction)
d8902adc 494{
fc461857 495 struct sh_desc *new;
d8902adc
NI
496 size_t copy_size;
497
fc461857 498 if (!*len)
d8902adc
NI
499 return NULL;
500
fc461857
GL
501 /* Allocate the link descriptor from the free list */
502 new = sh_dmae_get_desc(sh_chan);
503 if (!new) {
504 dev_err(sh_chan->dev, "No free link descriptor available\n");
d8902adc 505 return NULL;
fc461857 506 }
d8902adc 507
fc461857
GL
508 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
509
510 new->hw.sar = *src;
511 new->hw.dar = *dest;
512 new->hw.tcr = copy_size;
513
514 if (!*first) {
515 /* First desc */
516 new->async_tx.cookie = -EBUSY;
517 *first = new;
518 } else {
519 /* Other desc - invisible to the user */
520 new->async_tx.cookie = -EINVAL;
521 }
522
cfefe997
GL
523 dev_dbg(sh_chan->dev,
524 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
fc461857 525 copy_size, *len, *src, *dest, &new->async_tx,
cfefe997 526 new->async_tx.cookie, sh_chan->xmit_shift);
fc461857
GL
527
528 new->mark = DESC_PREPARED;
529 new->async_tx.flags = flags;
cfefe997 530 new->direction = direction;
fc461857
GL
531
532 *len -= copy_size;
db8196df 533 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
fc461857 534 *src += copy_size;
db8196df 535 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
fc461857
GL
536 *dest += copy_size;
537
538 return new;
539}
540
541/*
542 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
543 *
544 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
545 * converted to scatter-gather to guarantee consistent locking and a correct
546 * list manipulation. For slave DMA direction carries the usual meaning, and,
547 * logically, the SG list is RAM and the addr variable contains slave address,
db8196df 548 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
fc461857
GL
549 * and the SG list contains only one element and points at the source buffer.
550 */
551static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
552 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
db8196df 553 enum dma_transfer_direction direction, unsigned long flags)
fc461857
GL
554{
555 struct scatterlist *sg;
556 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
557 LIST_HEAD(tx_list);
558 int chunks = 0;
b4dae6e1 559 unsigned long irq_flags;
fc461857
GL
560 int i;
561
562 if (!sg_len)
563 return NULL;
564
565 for_each_sg(sgl, sg, sg_len, i)
566 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
567 (SH_DMA_TCR_MAX + 1);
d8902adc 568
3542a113 569 /* Have to lock the whole loop to protect against concurrent release */
b4dae6e1 570 spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
3542a113
GL
571
572 /*
573 * Chaining:
574 * first descriptor is what user is dealing with in all API calls, its
575 * cookie is at first set to -EBUSY, at tx-submit to a positive
576 * number
577 * if more than one chunk is needed further chunks have cookie = -EINVAL
578 * the last chunk, if not equal to the first, has cookie = -ENOSPC
579 * all chunks are linked onto the tx_list head with their .node heads
580 * only during this function, then they are immediately spliced
581 * back onto the free list in form of a chain
582 */
fc461857
GL
583 for_each_sg(sgl, sg, sg_len, i) {
584 dma_addr_t sg_addr = sg_dma_address(sg);
585 size_t len = sg_dma_len(sg);
586
587 if (!len)
588 goto err_get_desc;
589
590 do {
591 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
592 i, sg, len, (unsigned long long)sg_addr);
593
db8196df 594 if (direction == DMA_DEV_TO_MEM)
fc461857
GL
595 new = sh_dmae_add_desc(sh_chan, flags,
596 &sg_addr, addr, &len, &first,
597 direction);
598 else
599 new = sh_dmae_add_desc(sh_chan, flags,
600 addr, &sg_addr, &len, &first,
601 direction);
602 if (!new)
603 goto err_get_desc;
604
605 new->chunks = chunks--;
606 list_add_tail(&new->node, &tx_list);
607 } while (len);
608 }
d8902adc 609
3542a113
GL
610 if (new != first)
611 new->async_tx.cookie = -ENOSPC;
d8902adc 612
3542a113
GL
613 /* Put them back on the free list, so, they don't get lost */
614 list_splice_tail(&tx_list, &sh_chan->ld_free);
d8902adc 615
b4dae6e1 616 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
d8902adc 617
3542a113 618 return &first->async_tx;
fc461857
GL
619
620err_get_desc:
621 list_for_each_entry(new, &tx_list, node)
622 new->mark = DESC_IDLE;
623 list_splice(&tx_list, &sh_chan->ld_free);
624
b4dae6e1 625 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
fc461857
GL
626
627 return NULL;
628}
629
630static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
631 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
632 size_t len, unsigned long flags)
633{
634 struct sh_dmae_chan *sh_chan;
635 struct scatterlist sg;
636
637 if (!chan || !len)
638 return NULL;
639
640 sh_chan = to_sh_chan(chan);
641
642 sg_init_table(&sg, 1);
643 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
644 offset_in_page(dma_src));
645 sg_dma_address(&sg) = dma_src;
646 sg_dma_len(&sg) = len;
647
db8196df 648 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
fc461857 649 flags);
d8902adc
NI
650}
651
cfefe997
GL
652static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
653 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
db8196df 654 enum dma_transfer_direction direction, unsigned long flags)
cfefe997
GL
655{
656 struct sh_dmae_slave *param;
657 struct sh_dmae_chan *sh_chan;
5bac942d 658 dma_addr_t slave_addr;
cfefe997
GL
659
660 if (!chan)
661 return NULL;
662
663 sh_chan = to_sh_chan(chan);
664 param = chan->private;
665
666 /* Someone calling slave DMA on a public channel? */
667 if (!param || !sg_len) {
668 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
669 __func__, param, sg_len, param ? param->slave_id : -1);
670 return NULL;
671 }
672
9f9ff20d
DC
673 slave_addr = param->config->addr;
674
cfefe997
GL
675 /*
676 * if (param != NULL), this is a successfully requested slave channel,
677 * therefore param->config != NULL too.
678 */
5bac942d 679 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
cfefe997
GL
680 direction, flags);
681}
682
05827630
LW
683static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
684 unsigned long arg)
cfefe997
GL
685{
686 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
b4dae6e1 687 unsigned long flags;
cfefe997 688
c3635c78
LW
689 /* Only supports DMA_TERMINATE_ALL */
690 if (cmd != DMA_TERMINATE_ALL)
691 return -ENXIO;
692
cfefe997 693 if (!chan)
c3635c78 694 return -EINVAL;
cfefe997 695
b4dae6e1 696 spin_lock_irqsave(&sh_chan->desc_lock, flags);
c014906a
GL
697 dmae_halt(sh_chan);
698
c014906a
GL
699 if (!list_empty(&sh_chan->ld_queue)) {
700 /* Record partial transfer */
701 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
702 struct sh_desc, node);
703 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
704 sh_chan->xmit_shift;
c014906a 705 }
b4dae6e1 706 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
c014906a 707
cfefe997 708 sh_dmae_chan_ld_cleanup(sh_chan, true);
c3635c78
LW
709
710 return 0;
cfefe997
GL
711}
712
3542a113 713static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
d8902adc
NI
714{
715 struct sh_desc *desc, *_desc;
3542a113
GL
716 /* Is the "exposed" head of a chain acked? */
717 bool head_acked = false;
718 dma_cookie_t cookie = 0;
719 dma_async_tx_callback callback = NULL;
720 void *param = NULL;
b4dae6e1 721 unsigned long flags;
d8902adc 722
b4dae6e1 723 spin_lock_irqsave(&sh_chan->desc_lock, flags);
d8902adc 724 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
3542a113
GL
725 struct dma_async_tx_descriptor *tx = &desc->async_tx;
726
727 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
728 BUG_ON(desc->mark != DESC_SUBMITTED &&
729 desc->mark != DESC_COMPLETED &&
730 desc->mark != DESC_WAITING);
731
732 /*
733 * queue is ordered, and we use this loop to (1) clean up all
734 * completed descriptors, and to (2) update descriptor flags of
735 * any chunks in a (partially) completed chain
736 */
737 if (!all && desc->mark == DESC_SUBMITTED &&
738 desc->cookie != cookie)
d8902adc
NI
739 break;
740
3542a113
GL
741 if (tx->cookie > 0)
742 cookie = tx->cookie;
d8902adc 743
3542a113 744 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
cfefe997
GL
745 if (sh_chan->completed_cookie != desc->cookie - 1)
746 dev_dbg(sh_chan->dev,
747 "Completing cookie %d, expected %d\n",
748 desc->cookie,
749 sh_chan->completed_cookie + 1);
3542a113
GL
750 sh_chan->completed_cookie = desc->cookie;
751 }
d8902adc 752
3542a113
GL
753 /* Call callback on the last chunk */
754 if (desc->mark == DESC_COMPLETED && tx->callback) {
755 desc->mark = DESC_WAITING;
756 callback = tx->callback;
757 param = tx->callback_param;
758 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
759 tx->cookie, tx, sh_chan->id);
760 BUG_ON(desc->chunks != 1);
761 break;
762 }
d8902adc 763
3542a113
GL
764 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
765 if (desc->mark == DESC_COMPLETED) {
766 BUG_ON(tx->cookie < 0);
767 desc->mark = DESC_WAITING;
768 }
769 head_acked = async_tx_test_ack(tx);
770 } else {
771 switch (desc->mark) {
772 case DESC_COMPLETED:
773 desc->mark = DESC_WAITING;
774 /* Fall through */
775 case DESC_WAITING:
776 if (head_acked)
777 async_tx_ack(&desc->async_tx);
778 }
779 }
780
781 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
782 tx, tx->cookie);
783
784 if (((desc->mark == DESC_COMPLETED ||
785 desc->mark == DESC_WAITING) &&
786 async_tx_test_ack(&desc->async_tx)) || all) {
787 /* Remove from ld_queue list */
788 desc->mark = DESC_IDLE;
7a1cd9ad 789
3542a113 790 list_move(&desc->node, &sh_chan->ld_free);
7a1cd9ad
GL
791
792 if (list_empty(&sh_chan->ld_queue)) {
793 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
794 pm_runtime_put(sh_chan->dev);
795 }
d8902adc
NI
796 }
797 }
2dc66667
GL
798
799 if (all && !callback)
800 /*
801 * Terminating and the loop completed normally: forgive
802 * uncompleted cookies
803 */
804 sh_chan->completed_cookie = sh_chan->common.cookie;
805
b4dae6e1 806 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
3542a113
GL
807
808 if (callback)
809 callback(param);
810
811 return callback;
812}
813
814/*
815 * sh_chan_ld_cleanup - Clean up link descriptors
816 *
817 * This function cleans up the ld_queue of DMA channel.
818 */
819static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
820{
821 while (__ld_cleanup(sh_chan, all))
822 ;
d8902adc
NI
823}
824
7a1cd9ad 825/* Called under spin_lock_irq(&sh_chan->desc_lock) */
d8902adc
NI
826static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
827{
47a4dc26 828 struct sh_desc *desc;
d8902adc
NI
829
830 /* DMA work check */
7a1cd9ad 831 if (dmae_is_busy(sh_chan))
b4dae6e1 832 return;
d8902adc 833
5a3a7658 834 /* Find the first not transferred descriptor */
47a4dc26
GL
835 list_for_each_entry(desc, &sh_chan->ld_queue, node)
836 if (desc->mark == DESC_SUBMITTED) {
c014906a
GL
837 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
838 desc->async_tx.cookie, sh_chan->id,
839 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
3542a113 840 /* Get the ld start address from ld_queue */
47a4dc26 841 dmae_set_reg(sh_chan, &desc->hw);
3542a113
GL
842 dmae_start(sh_chan);
843 break;
844 }
d8902adc
NI
845}
846
847static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
848{
849 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
7a1cd9ad
GL
850
851 spin_lock_irq(&sh_chan->desc_lock);
852 if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
853 sh_chan_xfer_ld_queue(sh_chan);
854 else
855 sh_chan->pm_state = DMAE_PM_PENDING;
856 spin_unlock_irq(&sh_chan->desc_lock);
d8902adc
NI
857}
858
07934481 859static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
d8902adc 860 dma_cookie_t cookie,
07934481 861 struct dma_tx_state *txstate)
d8902adc
NI
862{
863 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
864 dma_cookie_t last_used;
865 dma_cookie_t last_complete;
47a4dc26 866 enum dma_status status;
b4dae6e1 867 unsigned long flags;
d8902adc 868
3542a113 869 sh_dmae_chan_ld_cleanup(sh_chan, false);
d8902adc 870
2dc66667 871 /* First read completed cookie to avoid a skew */
d8902adc 872 last_complete = sh_chan->completed_cookie;
2dc66667
GL
873 rmb();
874 last_used = chan->cookie;
3542a113 875 BUG_ON(last_complete < 0);
bca34692 876 dma_set_tx_state(txstate, last_complete, last_used, 0);
d8902adc 877
b4dae6e1 878 spin_lock_irqsave(&sh_chan->desc_lock, flags);
47a4dc26
GL
879
880 status = dma_async_is_complete(cookie, last_complete, last_used);
881
882 /*
883 * If we don't find cookie on the queue, it has been aborted and we have
884 * to report error
885 */
886 if (status != DMA_SUCCESS) {
887 struct sh_desc *desc;
888 status = DMA_ERROR;
889 list_for_each_entry(desc, &sh_chan->ld_queue, node)
890 if (desc->cookie == cookie) {
891 status = DMA_IN_PROGRESS;
892 break;
893 }
894 }
895
b4dae6e1 896 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
47a4dc26
GL
897
898 return status;
d8902adc
NI
899}
900
901static irqreturn_t sh_dmae_interrupt(int irq, void *data)
902{
903 irqreturn_t ret = IRQ_NONE;
2dc66667
GL
904 struct sh_dmae_chan *sh_chan = data;
905 u32 chcr;
906
907 spin_lock(&sh_chan->desc_lock);
908
5899a723 909 chcr = chcr_read(sh_chan);
d8902adc
NI
910
911 if (chcr & CHCR_TE) {
912 /* DMA stop */
913 dmae_halt(sh_chan);
914
915 ret = IRQ_HANDLED;
916 tasklet_schedule(&sh_chan->tasklet);
917 }
918
2dc66667
GL
919 spin_unlock(&sh_chan->desc_lock);
920
d8902adc
NI
921 return ret;
922}
923
2dc66667
GL
924/* Called from error IRQ or NMI */
925static bool sh_dmae_reset(struct sh_dmae_device *shdev)
d8902adc 926{
03aa18f5 927 unsigned int handled = 0;
47a4dc26 928 int i;
d8902adc 929
47a4dc26 930 /* halt the dma controller */
027811b9 931 sh_dmae_ctl_stop(shdev);
47a4dc26
GL
932
933 /* We cannot detect, which channel caused the error, have to reset all */
8b1935e6 934 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
47a4dc26 935 struct sh_dmae_chan *sh_chan = shdev->chan[i];
03aa18f5 936 struct sh_desc *desc;
2dc66667 937 LIST_HEAD(dl);
03aa18f5
PM
938
939 if (!sh_chan)
940 continue;
941
2dc66667
GL
942 spin_lock(&sh_chan->desc_lock);
943
03aa18f5
PM
944 /* Stop the channel */
945 dmae_halt(sh_chan);
946
2dc66667
GL
947 list_splice_init(&sh_chan->ld_queue, &dl);
948
7a1cd9ad
GL
949 if (!list_empty(&dl)) {
950 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
951 pm_runtime_put(sh_chan->dev);
952 }
953 sh_chan->pm_state = DMAE_PM_ESTABLISHED;
954
2dc66667
GL
955 spin_unlock(&sh_chan->desc_lock);
956
03aa18f5 957 /* Complete all */
2dc66667 958 list_for_each_entry(desc, &dl, node) {
03aa18f5
PM
959 struct dma_async_tx_descriptor *tx = &desc->async_tx;
960 desc->mark = DESC_IDLE;
961 if (tx->callback)
962 tx->callback(tx->callback_param);
d8902adc 963 }
03aa18f5 964
2dc66667
GL
965 spin_lock(&sh_chan->desc_lock);
966 list_splice(&dl, &sh_chan->ld_free);
967 spin_unlock(&sh_chan->desc_lock);
968
03aa18f5 969 handled++;
d8902adc 970 }
03aa18f5 971
027811b9 972 sh_dmae_rst(shdev);
47a4dc26 973
03aa18f5
PM
974 return !!handled;
975}
976
977static irqreturn_t sh_dmae_err(int irq, void *data)
978{
ff7690b4
YS
979 struct sh_dmae_device *shdev = data;
980
2dc66667 981 if (!(dmaor_read(shdev) & DMAOR_AE))
ff7690b4 982 return IRQ_NONE;
2dc66667
GL
983
984 sh_dmae_reset(data);
985 return IRQ_HANDLED;
d8902adc 986}
d8902adc
NI
987
988static void dmae_do_tasklet(unsigned long data)
989{
990 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
3542a113 991 struct sh_desc *desc;
d8902adc 992 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
cfefe997 993 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
86d61b33 994
b4dae6e1 995 spin_lock_irq(&sh_chan->desc_lock);
3542a113 996 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
cfefe997 997 if (desc->mark == DESC_SUBMITTED &&
db8196df 998 ((desc->direction == DMA_DEV_TO_MEM &&
cfefe997
GL
999 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
1000 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
3542a113
GL
1001 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
1002 desc->async_tx.cookie, &desc->async_tx,
1003 desc->hw.dar);
1004 desc->mark = DESC_COMPLETED;
d8902adc
NI
1005 break;
1006 }
1007 }
d8902adc
NI
1008 /* Next desc */
1009 sh_chan_xfer_ld_queue(sh_chan);
7a1cd9ad
GL
1010 spin_unlock_irq(&sh_chan->desc_lock);
1011
3542a113 1012 sh_dmae_chan_ld_cleanup(sh_chan, false);
d8902adc
NI
1013}
1014
03aa18f5
PM
1015static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
1016{
03aa18f5
PM
1017 /* Fast path out if NMIF is not asserted for this controller */
1018 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
1019 return false;
1020
2dc66667 1021 return sh_dmae_reset(shdev);
03aa18f5
PM
1022}
1023
1024static int sh_dmae_nmi_handler(struct notifier_block *self,
1025 unsigned long cmd, void *data)
1026{
1027 struct sh_dmae_device *shdev;
1028 int ret = NOTIFY_DONE;
1029 bool triggered;
1030
1031 /*
1032 * Only concern ourselves with NMI events.
1033 *
1034 * Normally we would check the die chain value, but as this needs
1035 * to be architecture independent, check for NMI context instead.
1036 */
1037 if (!in_nmi())
1038 return NOTIFY_DONE;
1039
1040 rcu_read_lock();
1041 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
1042 /*
1043 * Only stop if one of the controllers has NMIF asserted,
1044 * we do not want to interfere with regular address error
1045 * handling or NMI events that don't concern the DMACs.
1046 */
1047 triggered = sh_dmae_nmi_notify(shdev);
1048 if (triggered == true)
1049 ret = NOTIFY_OK;
1050 }
1051 rcu_read_unlock();
1052
1053 return ret;
1054}
1055
1056static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
1057 .notifier_call = sh_dmae_nmi_handler,
1058
1059 /* Run before NMI debug handler and KGDB */
1060 .priority = 1,
1061};
1062
027811b9
GL
1063static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1064 int irq, unsigned long flags)
d8902adc
NI
1065{
1066 int err;
5bac942d 1067 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
027811b9 1068 struct platform_device *pdev = to_platform_device(shdev->common.dev);
d8902adc
NI
1069 struct sh_dmae_chan *new_sh_chan;
1070
1071 /* alloc channel */
1072 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
1073 if (!new_sh_chan) {
86d61b33
GL
1074 dev_err(shdev->common.dev,
1075 "No free memory for allocating dma channels!\n");
d8902adc
NI
1076 return -ENOMEM;
1077 }
1078
7a1cd9ad
GL
1079 new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
1080
1081 /* reference struct dma_device */
8b1935e6
GL
1082 new_sh_chan->common.device = &shdev->common;
1083
d8902adc
NI
1084 new_sh_chan->dev = shdev->common.dev;
1085 new_sh_chan->id = id;
027811b9
GL
1086 new_sh_chan->irq = irq;
1087 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
d8902adc
NI
1088
1089 /* Init DMA tasklet */
1090 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
1091 (unsigned long)new_sh_chan);
1092
d8902adc
NI
1093 spin_lock_init(&new_sh_chan->desc_lock);
1094
1095 /* Init descripter manage list */
1096 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
1097 INIT_LIST_HEAD(&new_sh_chan->ld_free);
1098
d8902adc
NI
1099 /* Add the channel to DMA device channel list */
1100 list_add_tail(&new_sh_chan->common.device_node,
1101 &shdev->common.channels);
1102 shdev->common.chancnt++;
1103
027811b9
GL
1104 if (pdev->id >= 0)
1105 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1106 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
1107 else
1108 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1109 "sh-dma%d", new_sh_chan->id);
d8902adc
NI
1110
1111 /* set up channel irq */
027811b9 1112 err = request_irq(irq, &sh_dmae_interrupt, flags,
86d61b33 1113 new_sh_chan->dev_id, new_sh_chan);
d8902adc
NI
1114 if (err) {
1115 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1116 "with return %d\n", id, err);
1117 goto err_no_irq;
1118 }
1119
d8902adc
NI
1120 shdev->chan[id] = new_sh_chan;
1121 return 0;
1122
1123err_no_irq:
1124 /* remove from dmaengine device node */
1125 list_del(&new_sh_chan->common.device_node);
1126 kfree(new_sh_chan);
1127 return err;
1128}
1129
1130static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1131{
1132 int i;
1133
1134 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1135 if (shdev->chan[i]) {
027811b9
GL
1136 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1137
1138 free_irq(sh_chan->irq, sh_chan);
d8902adc 1139
027811b9
GL
1140 list_del(&sh_chan->common.device_node);
1141 kfree(sh_chan);
d8902adc
NI
1142 shdev->chan[i] = NULL;
1143 }
1144 }
1145 shdev->common.chancnt = 0;
1146}
1147
1148static int __init sh_dmae_probe(struct platform_device *pdev)
1149{
027811b9
GL
1150 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1151 unsigned long irqflags = IRQF_DISABLED,
8b1935e6
GL
1152 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1153 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
300e5f97 1154 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
d8902adc 1155 struct sh_dmae_device *shdev;
027811b9 1156 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
d8902adc 1157
56adf7e8 1158 /* get platform data */
027811b9 1159 if (!pdata || !pdata->channel_num)
56adf7e8
DW
1160 return -ENODEV;
1161
027811b9 1162 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
26fc02ab 1163 /* DMARS area is optional */
027811b9
GL
1164 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1165 /*
1166 * IRQ resources:
1167 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
1168 * the error IRQ, in which case it is the only IRQ in this resource:
1169 * start == end. If it is the only IRQ resource, all channels also
1170 * use the same IRQ.
1171 * 2. DMA channel IRQ resources can be specified one per resource or in
1172 * ranges (start != end)
1173 * 3. iff all events (channels and, optionally, error) on this
1174 * controller use the same IRQ, only one IRQ resource can be
1175 * specified, otherwise there must be one IRQ per channel, even if
1176 * some of them are equal
1177 * 4. if all IRQs on this controller are equal or if some specific IRQs
1178 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1179 * requested with the IRQF_SHARED flag
1180 */
1181 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1182 if (!chan || !errirq_res)
1183 return -ENODEV;
1184
1185 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1186 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1187 return -EBUSY;
1188 }
1189
1190 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1191 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1192 err = -EBUSY;
1193 goto ermrdmars;
1194 }
1195
1196 err = -ENOMEM;
d8902adc
NI
1197 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1198 if (!shdev) {
027811b9
GL
1199 dev_err(&pdev->dev, "Not enough memory\n");
1200 goto ealloc;
1201 }
1202
1203 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1204 if (!shdev->chan_reg)
1205 goto emapchan;
1206 if (dmars) {
1207 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1208 if (!shdev->dmars)
1209 goto emapdmars;
d8902adc
NI
1210 }
1211
d8902adc 1212 /* platform data */
027811b9 1213 shdev->pdata = pdata;
d8902adc 1214
5899a723
KM
1215 if (pdata->chcr_offset)
1216 shdev->chcr_offset = pdata->chcr_offset;
1217 else
1218 shdev->chcr_offset = CHCR;
1219
67c6269e
KM
1220 if (pdata->chcr_ie_bit)
1221 shdev->chcr_ie_bit = pdata->chcr_ie_bit;
1222 else
1223 shdev->chcr_ie_bit = CHCR_IE;
1224
5c2de444
PM
1225 platform_set_drvdata(pdev, shdev);
1226
20f2a3b5
GL
1227 pm_runtime_enable(&pdev->dev);
1228 pm_runtime_get_sync(&pdev->dev);
1229
31705e21 1230 spin_lock_irq(&sh_dmae_lock);
03aa18f5 1231 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
31705e21 1232 spin_unlock_irq(&sh_dmae_lock);
03aa18f5 1233
2dc66667 1234 /* reset dma controller - only needed as a test */
027811b9 1235 err = sh_dmae_rst(shdev);
d8902adc
NI
1236 if (err)
1237 goto rst_err;
1238
d8902adc
NI
1239 INIT_LIST_HEAD(&shdev->common.channels);
1240
1241 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
26fc02ab 1242 if (pdata->slave && pdata->slave_num)
027811b9 1243 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
cfefe997 1244
d8902adc
NI
1245 shdev->common.device_alloc_chan_resources
1246 = sh_dmae_alloc_chan_resources;
1247 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1248 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
07934481 1249 shdev->common.device_tx_status = sh_dmae_tx_status;
d8902adc 1250 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
cfefe997
GL
1251
1252 /* Compulsory for DMA_SLAVE fields */
1253 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
c3635c78 1254 shdev->common.device_control = sh_dmae_control;
cfefe997 1255
d8902adc 1256 shdev->common.dev = &pdev->dev;
ddb4f0f0 1257 /* Default transfer size of 32 bytes requires 32-byte alignment */
8b1935e6 1258 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
d8902adc 1259
927a7c9c 1260#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
027811b9
GL
1261 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1262
1263 if (!chanirq_res)
1264 chanirq_res = errirq_res;
1265 else
1266 irqres++;
1267
1268 if (chanirq_res == errirq_res ||
1269 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
d8902adc 1270 irqflags = IRQF_SHARED;
027811b9
GL
1271
1272 errirq = errirq_res->start;
1273
1274 err = request_irq(errirq, sh_dmae_err, irqflags,
1275 "DMAC Address Error", shdev);
1276 if (err) {
1277 dev_err(&pdev->dev,
1278 "DMA failed requesting irq #%d, error %d\n",
1279 errirq, err);
1280 goto eirq_err;
d8902adc
NI
1281 }
1282
027811b9
GL
1283#else
1284 chanirq_res = errirq_res;
927a7c9c 1285#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
027811b9
GL
1286
1287 if (chanirq_res->start == chanirq_res->end &&
1288 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1289 /* Special case - all multiplexed */
1290 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
300e5f97
MD
1291 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1292 chan_irq[irq_cnt] = chanirq_res->start;
1293 chan_flag[irq_cnt] = IRQF_SHARED;
1294 } else {
1295 irq_cap = 1;
1296 break;
1297 }
d8902adc 1298 }
027811b9
GL
1299 } else {
1300 do {
1301 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
dcee0bb7
MD
1302 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1303 irq_cap = 1;
1304 break;
1305 }
1306
027811b9
GL
1307 if ((errirq_res->flags & IORESOURCE_BITS) ==
1308 IORESOURCE_IRQ_SHAREABLE)
1309 chan_flag[irq_cnt] = IRQF_SHARED;
1310 else
1311 chan_flag[irq_cnt] = IRQF_DISABLED;
1312 dev_dbg(&pdev->dev,
1313 "Found IRQ %d for channel %d\n",
1314 i, irq_cnt);
1315 chan_irq[irq_cnt++] = i;
300e5f97
MD
1316 }
1317
dcee0bb7 1318 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
300e5f97 1319 break;
dcee0bb7 1320
027811b9
GL
1321 chanirq_res = platform_get_resource(pdev,
1322 IORESOURCE_IRQ, ++irqres);
1323 } while (irq_cnt < pdata->channel_num && chanirq_res);
d8902adc 1324 }
027811b9 1325
d8902adc 1326 /* Create DMA Channel */
300e5f97 1327 for (i = 0; i < irq_cnt; i++) {
027811b9 1328 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
d8902adc
NI
1329 if (err)
1330 goto chan_probe_err;
1331 }
1332
300e5f97
MD
1333 if (irq_cap)
1334 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1335 "channels when a maximum of %d are supported.\n",
1336 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1337
20f2a3b5
GL
1338 pm_runtime_put(&pdev->dev);
1339
d8902adc
NI
1340 dma_async_device_register(&shdev->common);
1341
1342 return err;
1343
1344chan_probe_err:
1345 sh_dmae_chan_remove(shdev);
300e5f97 1346
927a7c9c 1347#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
027811b9 1348 free_irq(errirq, shdev);
d8902adc 1349eirq_err:
027811b9 1350#endif
d8902adc 1351rst_err:
31705e21 1352 spin_lock_irq(&sh_dmae_lock);
03aa18f5 1353 list_del_rcu(&shdev->node);
31705e21 1354 spin_unlock_irq(&sh_dmae_lock);
03aa18f5 1355
20f2a3b5 1356 pm_runtime_put(&pdev->dev);
467017b8
GL
1357 pm_runtime_disable(&pdev->dev);
1358
027811b9
GL
1359 if (dmars)
1360 iounmap(shdev->dmars);
5c2de444
PM
1361
1362 platform_set_drvdata(pdev, NULL);
027811b9
GL
1363emapdmars:
1364 iounmap(shdev->chan_reg);
31705e21 1365 synchronize_rcu();
027811b9 1366emapchan:
d8902adc 1367 kfree(shdev);
027811b9
GL
1368ealloc:
1369 if (dmars)
1370 release_mem_region(dmars->start, resource_size(dmars));
1371ermrdmars:
1372 release_mem_region(chan->start, resource_size(chan));
d8902adc 1373
d8902adc
NI
1374 return err;
1375}
1376
1377static int __exit sh_dmae_remove(struct platform_device *pdev)
1378{
1379 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
027811b9
GL
1380 struct resource *res;
1381 int errirq = platform_get_irq(pdev, 0);
d8902adc
NI
1382
1383 dma_async_device_unregister(&shdev->common);
1384
027811b9
GL
1385 if (errirq > 0)
1386 free_irq(errirq, shdev);
d8902adc 1387
31705e21 1388 spin_lock_irq(&sh_dmae_lock);
03aa18f5 1389 list_del_rcu(&shdev->node);
31705e21 1390 spin_unlock_irq(&sh_dmae_lock);
03aa18f5 1391
d8902adc
NI
1392 /* channel data remove */
1393 sh_dmae_chan_remove(shdev);
1394
20f2a3b5
GL
1395 pm_runtime_disable(&pdev->dev);
1396
027811b9
GL
1397 if (shdev->dmars)
1398 iounmap(shdev->dmars);
1399 iounmap(shdev->chan_reg);
1400
5c2de444
PM
1401 platform_set_drvdata(pdev, NULL);
1402
31705e21 1403 synchronize_rcu();
d8902adc
NI
1404 kfree(shdev);
1405
027811b9
GL
1406 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1407 if (res)
1408 release_mem_region(res->start, resource_size(res));
1409 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1410 if (res)
1411 release_mem_region(res->start, resource_size(res));
1412
d8902adc
NI
1413 return 0;
1414}
1415
1416static void sh_dmae_shutdown(struct platform_device *pdev)
1417{
1418 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
027811b9 1419 sh_dmae_ctl_stop(shdev);
d8902adc
NI
1420}
1421
467017b8
GL
1422static int sh_dmae_runtime_suspend(struct device *dev)
1423{
1424 return 0;
1425}
1426
1427static int sh_dmae_runtime_resume(struct device *dev)
1428{
1429 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1430
1431 return sh_dmae_rst(shdev);
1432}
1433
1434#ifdef CONFIG_PM
1435static int sh_dmae_suspend(struct device *dev)
1436{
1437 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1438 int i;
1439
1440 for (i = 0; i < shdev->pdata->channel_num; i++) {
1441 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1442 if (sh_chan->descs_allocated)
1443 sh_chan->pm_error = pm_runtime_put_sync(dev);
1444 }
1445
1446 return 0;
1447}
1448
1449static int sh_dmae_resume(struct device *dev)
1450{
1451 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1452 int i;
1453
1454 for (i = 0; i < shdev->pdata->channel_num; i++) {
1455 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1456 struct sh_dmae_slave *param = sh_chan->common.private;
1457
1458 if (!sh_chan->descs_allocated)
1459 continue;
1460
1461 if (!sh_chan->pm_error)
1462 pm_runtime_get_sync(dev);
1463
1464 if (param) {
1465 const struct sh_dmae_slave_config *cfg = param->config;
1466 dmae_set_dmars(sh_chan, cfg->mid_rid);
1467 dmae_set_chcr(sh_chan, cfg->chcr);
1468 } else {
1469 dmae_init(sh_chan);
1470 }
1471 }
1472
1473 return 0;
1474}
1475#else
1476#define sh_dmae_suspend NULL
1477#define sh_dmae_resume NULL
1478#endif
1479
1480const struct dev_pm_ops sh_dmae_pm = {
1481 .suspend = sh_dmae_suspend,
1482 .resume = sh_dmae_resume,
1483 .runtime_suspend = sh_dmae_runtime_suspend,
1484 .runtime_resume = sh_dmae_runtime_resume,
1485};
1486
d8902adc
NI
1487static struct platform_driver sh_dmae_driver = {
1488 .remove = __exit_p(sh_dmae_remove),
1489 .shutdown = sh_dmae_shutdown,
1490 .driver = {
7a5c106a 1491 .owner = THIS_MODULE,
d8902adc 1492 .name = "sh-dma-engine",
467017b8 1493 .pm = &sh_dmae_pm,
d8902adc
NI
1494 },
1495};
1496
1497static int __init sh_dmae_init(void)
1498{
661382fe
GL
1499 /* Wire up NMI handling */
1500 int err = register_die_notifier(&sh_dmae_nmi_notifier);
1501 if (err)
1502 return err;
1503
d8902adc
NI
1504 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1505}
1506module_init(sh_dmae_init);
1507
1508static void __exit sh_dmae_exit(void)
1509{
1510 platform_driver_unregister(&sh_dmae_driver);
661382fe
GL
1511
1512 unregister_die_notifier(&sh_dmae_nmi_notifier);
d8902adc
NI
1513}
1514module_exit(sh_dmae_exit);
1515
1516MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1517MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1518MODULE_LICENSE("GPL");
e5843341 1519MODULE_ALIAS("platform:sh-dma-engine");
This page took 0.179067 seconds and 5 git commands to generate.