drivers: clean-up prom.h implicit includes
[deliverable/linux.git] / drivers / dma / fsldma.c
CommitLineData
173acc7c
ZW
1/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
e2c8e425 4 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
173acc7c
ZW
5 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
c2e07b3a 13 * The support for MPC8349 DMA controller is also added.
173acc7c 14 *
a7aea373
IS
15 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms.
19 *
173acc7c
ZW
20 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 */
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/pci.h>
5a0e3ad6 30#include <linux/slab.h>
173acc7c
ZW
31#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
34#include <linux/dma-mapping.h>
35#include <linux/dmapool.h>
5af50730
RH
36#include <linux/of_address.h>
37#include <linux/of_irq.h>
173acc7c
ZW
38#include <linux/of_platform.h>
39
d2ebfb33 40#include "dmaengine.h"
173acc7c
ZW
41#include "fsldma.h"
42
b158471e
IS
43#define chan_dbg(chan, fmt, arg...) \
44 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
45#define chan_err(chan, fmt, arg...) \
46 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
c1433041 47
b158471e 48static const char msg_ld_oom[] = "No free memory for link descriptor";
173acc7c 49
e8bd84df
IS
50/*
51 * Register Helpers
52 */
173acc7c 53
a1c03319 54static void set_sr(struct fsldma_chan *chan, u32 val)
173acc7c 55{
a1c03319 56 DMA_OUT(chan, &chan->regs->sr, val, 32);
173acc7c
ZW
57}
58
a1c03319 59static u32 get_sr(struct fsldma_chan *chan)
173acc7c 60{
a1c03319 61 return DMA_IN(chan, &chan->regs->sr, 32);
173acc7c
ZW
62}
63
e8bd84df
IS
64static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
65{
66 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
67}
68
69static dma_addr_t get_cdar(struct fsldma_chan *chan)
70{
71 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
72}
73
e8bd84df
IS
74static u32 get_bcr(struct fsldma_chan *chan)
75{
76 return DMA_IN(chan, &chan->regs->bcr, 32);
77}
78
79/*
80 * Descriptor Helpers
81 */
82
a1c03319 83static void set_desc_cnt(struct fsldma_chan *chan,
173acc7c
ZW
84 struct fsl_dma_ld_hw *hw, u32 count)
85{
a1c03319 86 hw->count = CPU_TO_DMA(chan, count, 32);
173acc7c
ZW
87}
88
9c4d1e7b
IS
89static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
90{
91 return DMA_TO_CPU(chan, desc->hw.count, 32);
92}
93
a1c03319 94static void set_desc_src(struct fsldma_chan *chan,
31f4306c 95 struct fsl_dma_ld_hw *hw, dma_addr_t src)
173acc7c
ZW
96{
97 u64 snoop_bits;
98
a1c03319 99 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
173acc7c 100 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
a1c03319 101 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
173acc7c
ZW
102}
103
9c4d1e7b
IS
104static dma_addr_t get_desc_src(struct fsldma_chan *chan,
105 struct fsl_desc_sw *desc)
106{
107 u64 snoop_bits;
108
109 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
110 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
111 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
112}
113
a1c03319 114static void set_desc_dst(struct fsldma_chan *chan,
31f4306c 115 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
173acc7c
ZW
116{
117 u64 snoop_bits;
118
a1c03319 119 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
173acc7c 120 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
a1c03319 121 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
173acc7c
ZW
122}
123
9c4d1e7b
IS
124static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
125 struct fsl_desc_sw *desc)
126{
127 u64 snoop_bits;
128
129 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
130 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
131 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
132}
133
a1c03319 134static void set_desc_next(struct fsldma_chan *chan,
31f4306c 135 struct fsl_dma_ld_hw *hw, dma_addr_t next)
173acc7c
ZW
136{
137 u64 snoop_bits;
138
a1c03319 139 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
173acc7c 140 ? FSL_DMA_SNEN : 0;
a1c03319 141 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
173acc7c
ZW
142}
143
31f4306c 144static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
173acc7c 145{
e8bd84df 146 u64 snoop_bits;
173acc7c 147
e8bd84df
IS
148 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
149 ? FSL_DMA_SNEN : 0;
173acc7c 150
e8bd84df
IS
151 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
152 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
153 | snoop_bits, 64);
173acc7c
ZW
154}
155
e8bd84df
IS
156/*
157 * DMA Engine Hardware Control Helpers
158 */
159
160static void dma_init(struct fsldma_chan *chan)
f79abb62 161{
e8bd84df
IS
162 /* Reset the channel */
163 DMA_OUT(chan, &chan->regs->mr, 0, 32);
164
165 switch (chan->feature & FSL_DMA_IP_MASK) {
166 case FSL_DMA_IP_85XX:
167 /* Set the channel to below modes:
168 * EIE - Error interrupt enable
e8bd84df
IS
169 * EOLNIE - End of links interrupt enable
170 * BWC - Bandwidth sharing among channels
171 */
172 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
f04cd407 173 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
e8bd84df
IS
174 break;
175 case FSL_DMA_IP_83XX:
176 /* Set the channel to below modes:
177 * EOTIE - End-of-transfer interrupt enable
178 * PRC_RM - PCI read multiple
179 */
180 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
181 | FSL_DMA_MR_PRC_RM, 32);
182 break;
183 }
f79abb62
ZW
184}
185
a1c03319 186static int dma_is_idle(struct fsldma_chan *chan)
173acc7c 187{
a1c03319 188 u32 sr = get_sr(chan);
173acc7c
ZW
189 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
190}
191
f04cd407
IS
192/*
193 * Start the DMA controller
194 *
195 * Preconditions:
196 * - the CDAR register must point to the start descriptor
197 * - the MRn[CS] bit must be cleared
198 */
a1c03319 199static void dma_start(struct fsldma_chan *chan)
173acc7c 200{
272ca655
IS
201 u32 mode;
202
a1c03319 203 mode = DMA_IN(chan, &chan->regs->mr, 32);
272ca655 204
f04cd407
IS
205 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
206 DMA_OUT(chan, &chan->regs->bcr, 0, 32);
207 mode |= FSL_DMA_MR_EMP_EN;
208 } else {
209 mode &= ~FSL_DMA_MR_EMP_EN;
43a1a3ed 210 }
173acc7c 211
f04cd407 212 if (chan->feature & FSL_DMA_CHAN_START_EXT) {
272ca655 213 mode |= FSL_DMA_MR_EMS_EN;
f04cd407
IS
214 } else {
215 mode &= ~FSL_DMA_MR_EMS_EN;
272ca655 216 mode |= FSL_DMA_MR_CS;
f04cd407 217 }
173acc7c 218
a1c03319 219 DMA_OUT(chan, &chan->regs->mr, mode, 32);
173acc7c
ZW
220}
221
a1c03319 222static void dma_halt(struct fsldma_chan *chan)
173acc7c 223{
272ca655 224 u32 mode;
900325a6
DW
225 int i;
226
a00ae34a 227 /* read the mode register */
a1c03319 228 mode = DMA_IN(chan, &chan->regs->mr, 32);
272ca655 229
a00ae34a
IS
230 /*
231 * The 85xx controller supports channel abort, which will stop
232 * the current transfer. On 83xx, this bit is the transfer error
233 * mask bit, which should not be changed.
234 */
235 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
236 mode |= FSL_DMA_MR_CA;
237 DMA_OUT(chan, &chan->regs->mr, mode, 32);
238
239 mode &= ~FSL_DMA_MR_CA;
240 }
241
242 /* stop the DMA controller */
243 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
a1c03319 244 DMA_OUT(chan, &chan->regs->mr, mode, 32);
173acc7c 245
a00ae34a 246 /* wait for the DMA controller to become idle */
900325a6 247 for (i = 0; i < 100; i++) {
a1c03319 248 if (dma_is_idle(chan))
9c3a50b7
IS
249 return;
250
173acc7c 251 udelay(10);
900325a6 252 }
272ca655 253
9c3a50b7 254 if (!dma_is_idle(chan))
b158471e 255 chan_err(chan, "DMA halt timeout!\n");
173acc7c
ZW
256}
257
173acc7c
ZW
258/**
259 * fsl_chan_set_src_loop_size - Set source address hold transfer size
a1c03319 260 * @chan : Freescale DMA channel
173acc7c
ZW
261 * @size : Address loop size, 0 for disable loop
262 *
263 * The set source address hold transfer size. The source
264 * address hold or loop transfer size is when the DMA transfer
265 * data from source address (SA), if the loop size is 4, the DMA will
266 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
267 * SA + 1 ... and so on.
268 */
a1c03319 269static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
173acc7c 270{
272ca655
IS
271 u32 mode;
272
a1c03319 273 mode = DMA_IN(chan, &chan->regs->mr, 32);
272ca655 274
173acc7c
ZW
275 switch (size) {
276 case 0:
272ca655 277 mode &= ~FSL_DMA_MR_SAHE;
173acc7c
ZW
278 break;
279 case 1:
280 case 2:
281 case 4:
282 case 8:
272ca655 283 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
173acc7c
ZW
284 break;
285 }
272ca655 286
a1c03319 287 DMA_OUT(chan, &chan->regs->mr, mode, 32);
173acc7c
ZW
288}
289
290/**
738f5f7e 291 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
a1c03319 292 * @chan : Freescale DMA channel
173acc7c
ZW
293 * @size : Address loop size, 0 for disable loop
294 *
295 * The set destination address hold transfer size. The destination
296 * address hold or loop transfer size is when the DMA transfer
297 * data to destination address (TA), if the loop size is 4, the DMA will
298 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
299 * TA + 1 ... and so on.
300 */
a1c03319 301static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
173acc7c 302{
272ca655
IS
303 u32 mode;
304
a1c03319 305 mode = DMA_IN(chan, &chan->regs->mr, 32);
272ca655 306
173acc7c
ZW
307 switch (size) {
308 case 0:
272ca655 309 mode &= ~FSL_DMA_MR_DAHE;
173acc7c
ZW
310 break;
311 case 1:
312 case 2:
313 case 4:
314 case 8:
272ca655 315 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
173acc7c
ZW
316 break;
317 }
272ca655 318
a1c03319 319 DMA_OUT(chan, &chan->regs->mr, mode, 32);
173acc7c
ZW
320}
321
322/**
e6c7ecb6 323 * fsl_chan_set_request_count - Set DMA Request Count for external control
a1c03319 324 * @chan : Freescale DMA channel
e6c7ecb6
IS
325 * @size : Number of bytes to transfer in a single request
326 *
327 * The Freescale DMA channel can be controlled by the external signal DREQ#.
328 * The DMA request count is how many bytes are allowed to transfer before
329 * pausing the channel, after which a new assertion of DREQ# resumes channel
330 * operation.
173acc7c 331 *
e6c7ecb6 332 * A size of 0 disables external pause control. The maximum size is 1024.
173acc7c 333 */
a1c03319 334static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
173acc7c 335{
272ca655
IS
336 u32 mode;
337
e6c7ecb6 338 BUG_ON(size > 1024);
272ca655 339
a1c03319 340 mode = DMA_IN(chan, &chan->regs->mr, 32);
272ca655
IS
341 mode |= (__ilog2(size) << 24) & 0x0f000000;
342
a1c03319 343 DMA_OUT(chan, &chan->regs->mr, mode, 32);
e6c7ecb6 344}
173acc7c 345
e6c7ecb6
IS
346/**
347 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
a1c03319 348 * @chan : Freescale DMA channel
e6c7ecb6
IS
349 * @enable : 0 is disabled, 1 is enabled.
350 *
351 * The Freescale DMA channel can be controlled by the external signal DREQ#.
352 * The DMA Request Count feature should be used in addition to this feature
353 * to set the number of bytes to transfer before pausing the channel.
354 */
a1c03319 355static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
e6c7ecb6
IS
356{
357 if (enable)
a1c03319 358 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
e6c7ecb6 359 else
a1c03319 360 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
173acc7c
ZW
361}
362
363/**
364 * fsl_chan_toggle_ext_start - Toggle channel external start status
a1c03319 365 * @chan : Freescale DMA channel
173acc7c
ZW
366 * @enable : 0 is disabled, 1 is enabled.
367 *
368 * If enable the external start, the channel can be started by an
369 * external DMA start pin. So the dma_start() does not start the
370 * transfer immediately. The DMA channel will wait for the
371 * control pin asserted.
372 */
a1c03319 373static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
173acc7c
ZW
374{
375 if (enable)
a1c03319 376 chan->feature |= FSL_DMA_CHAN_START_EXT;
173acc7c 377 else
a1c03319 378 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
173acc7c
ZW
379}
380
31f4306c 381static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
9c3a50b7
IS
382{
383 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
384
385 if (list_empty(&chan->ld_pending))
386 goto out_splice;
387
388 /*
389 * Add the hardware descriptor to the chain of hardware descriptors
390 * that already exists in memory.
391 *
392 * This will un-set the EOL bit of the existing transaction, and the
393 * last link in this transaction will become the EOL descriptor.
394 */
395 set_desc_next(chan, &tail->hw, desc->async_tx.phys);
396
397 /*
398 * Add the software descriptor and all children to the list
399 * of pending transactions
400 */
401out_splice:
402 list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
403}
404
173acc7c
ZW
405static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
406{
a1c03319 407 struct fsldma_chan *chan = to_fsl_chan(tx->chan);
eda34234
DW
408 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
409 struct fsl_desc_sw *child;
173acc7c
ZW
410 unsigned long flags;
411 dma_cookie_t cookie;
412
a1c03319 413 spin_lock_irqsave(&chan->desc_lock, flags);
173acc7c 414
9c3a50b7
IS
415 /*
416 * assign cookies to all of the software descriptors
417 * that make up this transaction
418 */
eda34234 419 list_for_each_entry(child, &desc->tx_list, node) {
884485e1 420 cookie = dma_cookie_assign(&child->async_tx);
bcfb7465
IS
421 }
422
9c3a50b7 423 /* put this transaction onto the tail of the pending queue */
a1c03319 424 append_ld_queue(chan, desc);
173acc7c 425
a1c03319 426 spin_unlock_irqrestore(&chan->desc_lock, flags);
173acc7c
ZW
427
428 return cookie;
429}
430
431/**
432 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
a1c03319 433 * @chan : Freescale DMA channel
173acc7c
ZW
434 *
435 * Return - The descriptor allocated. NULL for failed.
436 */
31f4306c 437static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
173acc7c 438{
9c3a50b7 439 struct fsl_desc_sw *desc;
173acc7c 440 dma_addr_t pdesc;
9c3a50b7
IS
441
442 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
443 if (!desc) {
b158471e 444 chan_dbg(chan, "out of memory for link descriptor\n");
9c3a50b7 445 return NULL;
173acc7c
ZW
446 }
447
9c3a50b7
IS
448 memset(desc, 0, sizeof(*desc));
449 INIT_LIST_HEAD(&desc->tx_list);
450 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
451 desc->async_tx.tx_submit = fsl_dma_tx_submit;
452 desc->async_tx.phys = pdesc;
453
0ab09c36
IS
454#ifdef FSL_DMA_LD_DEBUG
455 chan_dbg(chan, "LD %p allocated\n", desc);
456#endif
457
9c3a50b7 458 return desc;
173acc7c
ZW
459}
460
173acc7c
ZW
461/**
462 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
a1c03319 463 * @chan : Freescale DMA channel
173acc7c
ZW
464 *
465 * This function will create a dma pool for descriptor allocation.
466 *
467 * Return - The number of descriptors allocated.
468 */
a1c03319 469static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
173acc7c 470{
a1c03319 471 struct fsldma_chan *chan = to_fsl_chan(dchan);
77cd62e8
TT
472
473 /* Has this channel already been allocated? */
a1c03319 474 if (chan->desc_pool)
77cd62e8 475 return 1;
173acc7c 476
9c3a50b7
IS
477 /*
478 * We need the descriptor to be aligned to 32bytes
173acc7c
ZW
479 * for meeting FSL DMA specification requirement.
480 */
b158471e 481 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
9c3a50b7
IS
482 sizeof(struct fsl_desc_sw),
483 __alignof__(struct fsl_desc_sw), 0);
a1c03319 484 if (!chan->desc_pool) {
b158471e 485 chan_err(chan, "unable to allocate descriptor pool\n");
9c3a50b7 486 return -ENOMEM;
173acc7c
ZW
487 }
488
9c3a50b7 489 /* there is at least one descriptor free to be allocated */
173acc7c
ZW
490 return 1;
491}
492
9c3a50b7
IS
493/**
494 * fsldma_free_desc_list - Free all descriptors in a queue
495 * @chan: Freescae DMA channel
496 * @list: the list to free
497 *
498 * LOCKING: must hold chan->desc_lock
499 */
500static void fsldma_free_desc_list(struct fsldma_chan *chan,
501 struct list_head *list)
502{
503 struct fsl_desc_sw *desc, *_desc;
504
505 list_for_each_entry_safe(desc, _desc, list, node) {
506 list_del(&desc->node);
0ab09c36
IS
507#ifdef FSL_DMA_LD_DEBUG
508 chan_dbg(chan, "LD %p free\n", desc);
509#endif
9c3a50b7
IS
510 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
511 }
512}
513
514static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
515 struct list_head *list)
516{
517 struct fsl_desc_sw *desc, *_desc;
518
519 list_for_each_entry_safe_reverse(desc, _desc, list, node) {
520 list_del(&desc->node);
0ab09c36
IS
521#ifdef FSL_DMA_LD_DEBUG
522 chan_dbg(chan, "LD %p free\n", desc);
523#endif
9c3a50b7
IS
524 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
525 }
526}
527
173acc7c
ZW
528/**
529 * fsl_dma_free_chan_resources - Free all resources of the channel.
a1c03319 530 * @chan : Freescale DMA channel
173acc7c 531 */
a1c03319 532static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
173acc7c 533{
a1c03319 534 struct fsldma_chan *chan = to_fsl_chan(dchan);
173acc7c
ZW
535 unsigned long flags;
536
b158471e 537 chan_dbg(chan, "free all channel resources\n");
a1c03319 538 spin_lock_irqsave(&chan->desc_lock, flags);
9c3a50b7
IS
539 fsldma_free_desc_list(chan, &chan->ld_pending);
540 fsldma_free_desc_list(chan, &chan->ld_running);
a1c03319 541 spin_unlock_irqrestore(&chan->desc_lock, flags);
77cd62e8 542
9c3a50b7 543 dma_pool_destroy(chan->desc_pool);
a1c03319 544 chan->desc_pool = NULL;
173acc7c
ZW
545}
546
2187c269 547static struct dma_async_tx_descriptor *
a1c03319 548fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
2187c269 549{
a1c03319 550 struct fsldma_chan *chan;
2187c269
ZW
551 struct fsl_desc_sw *new;
552
a1c03319 553 if (!dchan)
2187c269
ZW
554 return NULL;
555
a1c03319 556 chan = to_fsl_chan(dchan);
2187c269 557
a1c03319 558 new = fsl_dma_alloc_descriptor(chan);
2187c269 559 if (!new) {
b158471e 560 chan_err(chan, "%s\n", msg_ld_oom);
2187c269
ZW
561 return NULL;
562 }
563
564 new->async_tx.cookie = -EBUSY;
636bdeaa 565 new->async_tx.flags = flags;
2187c269 566
f79abb62 567 /* Insert the link descriptor to the LD ring */
eda34234 568 list_add_tail(&new->node, &new->tx_list);
f79abb62 569
31f4306c 570 /* Set End-of-link to the last link descriptor of new list */
a1c03319 571 set_ld_eol(chan, new);
2187c269
ZW
572
573 return &new->async_tx;
574}
575
31f4306c
IS
576static struct dma_async_tx_descriptor *
577fsl_dma_prep_memcpy(struct dma_chan *dchan,
578 dma_addr_t dma_dst, dma_addr_t dma_src,
173acc7c
ZW
579 size_t len, unsigned long flags)
580{
a1c03319 581 struct fsldma_chan *chan;
173acc7c
ZW
582 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
583 size_t copy;
173acc7c 584
a1c03319 585 if (!dchan)
173acc7c
ZW
586 return NULL;
587
588 if (!len)
589 return NULL;
590
a1c03319 591 chan = to_fsl_chan(dchan);
173acc7c
ZW
592
593 do {
594
595 /* Allocate the link descriptor from DMA pool */
a1c03319 596 new = fsl_dma_alloc_descriptor(chan);
173acc7c 597 if (!new) {
b158471e 598 chan_err(chan, "%s\n", msg_ld_oom);
2e077f8e 599 goto fail;
173acc7c 600 }
173acc7c 601
56822843 602 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
173acc7c 603
a1c03319
IS
604 set_desc_cnt(chan, &new->hw, copy);
605 set_desc_src(chan, &new->hw, dma_src);
606 set_desc_dst(chan, &new->hw, dma_dst);
173acc7c
ZW
607
608 if (!first)
609 first = new;
610 else
a1c03319 611 set_desc_next(chan, &prev->hw, new->async_tx.phys);
173acc7c
ZW
612
613 new->async_tx.cookie = 0;
636bdeaa 614 async_tx_ack(&new->async_tx);
173acc7c
ZW
615
616 prev = new;
617 len -= copy;
618 dma_src += copy;
738f5f7e 619 dma_dst += copy;
173acc7c
ZW
620
621 /* Insert the link descriptor to the LD ring */
eda34234 622 list_add_tail(&new->node, &first->tx_list);
173acc7c
ZW
623 } while (len);
624
636bdeaa 625 new->async_tx.flags = flags; /* client is in control of this ack */
173acc7c
ZW
626 new->async_tx.cookie = -EBUSY;
627
31f4306c 628 /* Set End-of-link to the last link descriptor of new list */
a1c03319 629 set_ld_eol(chan, new);
173acc7c 630
2e077f8e
IS
631 return &first->async_tx;
632
633fail:
634 if (!first)
635 return NULL;
636
9c3a50b7 637 fsldma_free_desc_list_reverse(chan, &first->tx_list);
2e077f8e 638 return NULL;
173acc7c
ZW
639}
640
c1433041
IS
641static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
642 struct scatterlist *dst_sg, unsigned int dst_nents,
643 struct scatterlist *src_sg, unsigned int src_nents,
644 unsigned long flags)
645{
646 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
647 struct fsldma_chan *chan = to_fsl_chan(dchan);
648 size_t dst_avail, src_avail;
649 dma_addr_t dst, src;
650 size_t len;
651
652 /* basic sanity checks */
653 if (dst_nents == 0 || src_nents == 0)
654 return NULL;
655
656 if (dst_sg == NULL || src_sg == NULL)
657 return NULL;
658
659 /*
660 * TODO: should we check that both scatterlists have the same
661 * TODO: number of bytes in total? Is that really an error?
662 */
663
664 /* get prepared for the loop */
665 dst_avail = sg_dma_len(dst_sg);
666 src_avail = sg_dma_len(src_sg);
667
668 /* run until we are out of scatterlist entries */
669 while (true) {
670
671 /* create the largest transaction possible */
672 len = min_t(size_t, src_avail, dst_avail);
673 len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
674 if (len == 0)
675 goto fetch;
676
677 dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
678 src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
679
680 /* allocate and populate the descriptor */
681 new = fsl_dma_alloc_descriptor(chan);
682 if (!new) {
b158471e 683 chan_err(chan, "%s\n", msg_ld_oom);
c1433041
IS
684 goto fail;
685 }
c1433041
IS
686
687 set_desc_cnt(chan, &new->hw, len);
688 set_desc_src(chan, &new->hw, src);
689 set_desc_dst(chan, &new->hw, dst);
690
691 if (!first)
692 first = new;
693 else
694 set_desc_next(chan, &prev->hw, new->async_tx.phys);
695
696 new->async_tx.cookie = 0;
697 async_tx_ack(&new->async_tx);
698 prev = new;
699
700 /* Insert the link descriptor to the LD ring */
701 list_add_tail(&new->node, &first->tx_list);
702
703 /* update metadata */
704 dst_avail -= len;
705 src_avail -= len;
706
707fetch:
708 /* fetch the next dst scatterlist entry */
709 if (dst_avail == 0) {
710
711 /* no more entries: we're done */
712 if (dst_nents == 0)
713 break;
714
715 /* fetch the next entry: if there are no more: done */
716 dst_sg = sg_next(dst_sg);
717 if (dst_sg == NULL)
718 break;
719
720 dst_nents--;
721 dst_avail = sg_dma_len(dst_sg);
722 }
723
724 /* fetch the next src scatterlist entry */
725 if (src_avail == 0) {
726
727 /* no more entries: we're done */
728 if (src_nents == 0)
729 break;
730
731 /* fetch the next entry: if there are no more: done */
732 src_sg = sg_next(src_sg);
733 if (src_sg == NULL)
734 break;
735
736 src_nents--;
737 src_avail = sg_dma_len(src_sg);
738 }
739 }
740
741 new->async_tx.flags = flags; /* client is in control of this ack */
742 new->async_tx.cookie = -EBUSY;
743
744 /* Set End-of-link to the last link descriptor of new list */
745 set_ld_eol(chan, new);
746
747 return &first->async_tx;
748
749fail:
750 if (!first)
751 return NULL;
752
753 fsldma_free_desc_list_reverse(chan, &first->tx_list);
754 return NULL;
755}
756
bbea0b6e
IS
757/**
758 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
759 * @chan: DMA channel
760 * @sgl: scatterlist to transfer to/from
761 * @sg_len: number of entries in @scatterlist
762 * @direction: DMA direction
763 * @flags: DMAEngine flags
185ecb5f 764 * @context: transaction context (ignored)
bbea0b6e
IS
765 *
766 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
767 * DMA_SLAVE API, this gets the device-specific information from the
768 * chan->private variable.
769 */
770static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
a1c03319 771 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
185ecb5f
AB
772 enum dma_transfer_direction direction, unsigned long flags,
773 void *context)
bbea0b6e 774{
bbea0b6e 775 /*
968f19ae 776 * This operation is not supported on the Freescale DMA controller
bbea0b6e 777 *
968f19ae
IS
778 * However, we need to provide the function pointer to allow the
779 * device_control() method to work.
bbea0b6e 780 */
bbea0b6e
IS
781 return NULL;
782}
783
c3635c78 784static int fsl_dma_device_control(struct dma_chan *dchan,
05827630 785 enum dma_ctrl_cmd cmd, unsigned long arg)
bbea0b6e 786{
968f19ae 787 struct dma_slave_config *config;
a1c03319 788 struct fsldma_chan *chan;
bbea0b6e 789 unsigned long flags;
968f19ae 790 int size;
c3635c78 791
a1c03319 792 if (!dchan)
c3635c78 793 return -EINVAL;
bbea0b6e 794
a1c03319 795 chan = to_fsl_chan(dchan);
bbea0b6e 796
968f19ae
IS
797 switch (cmd) {
798 case DMA_TERMINATE_ALL:
f04cd407
IS
799 spin_lock_irqsave(&chan->desc_lock, flags);
800
968f19ae
IS
801 /* Halt the DMA engine */
802 dma_halt(chan);
bbea0b6e 803
968f19ae
IS
804 /* Remove and free all of the descriptors in the LD queue */
805 fsldma_free_desc_list(chan, &chan->ld_pending);
806 fsldma_free_desc_list(chan, &chan->ld_running);
f04cd407 807 chan->idle = true;
bbea0b6e 808
968f19ae
IS
809 spin_unlock_irqrestore(&chan->desc_lock, flags);
810 return 0;
811
812 case DMA_SLAVE_CONFIG:
813 config = (struct dma_slave_config *)arg;
814
815 /* make sure the channel supports setting burst size */
816 if (!chan->set_request_count)
817 return -ENXIO;
818
819 /* we set the controller burst size depending on direction */
db8196df 820 if (config->direction == DMA_MEM_TO_DEV)
968f19ae
IS
821 size = config->dst_addr_width * config->dst_maxburst;
822 else
823 size = config->src_addr_width * config->src_maxburst;
824
825 chan->set_request_count(chan, size);
826 return 0;
827
828 case FSLDMA_EXTERNAL_START:
829
830 /* make sure the channel supports external start */
831 if (!chan->toggle_ext_start)
832 return -ENXIO;
833
834 chan->toggle_ext_start(chan, arg);
835 return 0;
836
837 default:
838 return -ENXIO;
839 }
c3635c78
LW
840
841 return 0;
bbea0b6e
IS
842}
843
173acc7c 844/**
9c4d1e7b 845 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
9c3a50b7 846 * @chan: Freescale DMA channel
9c4d1e7b 847 * @desc: descriptor to cleanup and free
173acc7c 848 *
9c4d1e7b
IS
849 * This function is used on a descriptor which has been executed by the DMA
850 * controller. It will run any callbacks, submit any dependencies, and then
851 * free the descriptor.
173acc7c 852 */
9c4d1e7b
IS
853static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
854 struct fsl_desc_sw *desc)
173acc7c 855{
9c4d1e7b
IS
856 struct dma_async_tx_descriptor *txd = &desc->async_tx;
857 struct device *dev = chan->common.device->dev;
858 dma_addr_t src = get_desc_src(chan, desc);
859 dma_addr_t dst = get_desc_dst(chan, desc);
860 u32 len = get_desc_cnt(chan, desc);
861
862 /* Run the link descriptor callback function */
863 if (txd->callback) {
864#ifdef FSL_DMA_LD_DEBUG
865 chan_dbg(chan, "LD %p callback\n", desc);
866#endif
867 txd->callback(txd->callback_param);
868 }
173acc7c 869
9c4d1e7b
IS
870 /* Run any dependencies */
871 dma_run_dependencies(txd);
173acc7c 872
9c4d1e7b
IS
873 /* Unmap the dst buffer, if requested */
874 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
875 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
876 dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
877 else
878 dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
879 }
9c3a50b7 880
9c4d1e7b
IS
881 /* Unmap the src buffer, if requested */
882 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
883 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
884 dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
885 else
886 dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
173acc7c 887 }
9c3a50b7 888
9c4d1e7b
IS
889#ifdef FSL_DMA_LD_DEBUG
890 chan_dbg(chan, "LD %p free\n", desc);
891#endif
892 dma_pool_free(chan->desc_pool, desc, txd->phys);
173acc7c
ZW
893}
894
895/**
9c3a50b7 896 * fsl_chan_xfer_ld_queue - transfer any pending transactions
a1c03319 897 * @chan : Freescale DMA channel
9c3a50b7 898 *
f04cd407 899 * HARDWARE STATE: idle
dc8d4091 900 * LOCKING: must hold chan->desc_lock
173acc7c 901 */
a1c03319 902static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
173acc7c 903{
9c3a50b7 904 struct fsl_desc_sw *desc;
138ef018 905
9c3a50b7
IS
906 /*
907 * If the list of pending descriptors is empty, then we
908 * don't need to do any work at all
909 */
910 if (list_empty(&chan->ld_pending)) {
b158471e 911 chan_dbg(chan, "no pending LDs\n");
dc8d4091 912 return;
9c3a50b7 913 }
173acc7c 914
9c3a50b7 915 /*
f04cd407
IS
916 * The DMA controller is not idle, which means that the interrupt
917 * handler will start any queued transactions when it runs after
918 * this transaction finishes
9c3a50b7 919 */
f04cd407 920 if (!chan->idle) {
b158471e 921 chan_dbg(chan, "DMA controller still busy\n");
dc8d4091 922 return;
9c3a50b7
IS
923 }
924
9c3a50b7
IS
925 /*
926 * If there are some link descriptors which have not been
927 * transferred, we need to start the controller
173acc7c 928 */
173acc7c 929
9c3a50b7
IS
930 /*
931 * Move all elements from the queue of pending transactions
932 * onto the list of running transactions
933 */
f04cd407 934 chan_dbg(chan, "idle, starting controller\n");
9c3a50b7
IS
935 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
936 list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
937
f04cd407
IS
938 /*
939 * The 85xx DMA controller doesn't clear the channel start bit
940 * automatically at the end of a transfer. Therefore we must clear
941 * it in software before starting the transfer.
942 */
943 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
944 u32 mode;
945
946 mode = DMA_IN(chan, &chan->regs->mr, 32);
947 mode &= ~FSL_DMA_MR_CS;
948 DMA_OUT(chan, &chan->regs->mr, mode, 32);
949 }
950
9c3a50b7
IS
951 /*
952 * Program the descriptor's address into the DMA controller,
953 * then start the DMA transaction
954 */
955 set_cdar(chan, desc->async_tx.phys);
f04cd407 956 get_cdar(chan);
138ef018 957
9c3a50b7 958 dma_start(chan);
f04cd407 959 chan->idle = false;
173acc7c
ZW
960}
961
962/**
963 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
a1c03319 964 * @chan : Freescale DMA channel
173acc7c 965 */
a1c03319 966static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
173acc7c 967{
a1c03319 968 struct fsldma_chan *chan = to_fsl_chan(dchan);
dc8d4091
IS
969 unsigned long flags;
970
971 spin_lock_irqsave(&chan->desc_lock, flags);
a1c03319 972 fsl_chan_xfer_ld_queue(chan);
dc8d4091 973 spin_unlock_irqrestore(&chan->desc_lock, flags);
173acc7c
ZW
974}
975
173acc7c 976/**
07934481 977 * fsl_tx_status - Determine the DMA status
a1c03319 978 * @chan : Freescale DMA channel
173acc7c 979 */
07934481 980static enum dma_status fsl_tx_status(struct dma_chan *dchan,
173acc7c 981 dma_cookie_t cookie,
07934481 982 struct dma_tx_state *txstate)
173acc7c 983{
9b0b0bdc 984 return dma_cookie_status(dchan, cookie, txstate);
173acc7c
ZW
985}
986
d3f620b2
IS
987/*----------------------------------------------------------------------------*/
988/* Interrupt Handling */
989/*----------------------------------------------------------------------------*/
990
e7a29151 991static irqreturn_t fsldma_chan_irq(int irq, void *data)
173acc7c 992{
a1c03319 993 struct fsldma_chan *chan = data;
a1c03319 994 u32 stat;
173acc7c 995
9c3a50b7 996 /* save and clear the status register */
a1c03319 997 stat = get_sr(chan);
9c3a50b7 998 set_sr(chan, stat);
b158471e 999 chan_dbg(chan, "irq: stat = 0x%x\n", stat);
173acc7c 1000
f04cd407 1001 /* check that this was really our device */
173acc7c
ZW
1002 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
1003 if (!stat)
1004 return IRQ_NONE;
1005
1006 if (stat & FSL_DMA_SR_TE)
b158471e 1007 chan_err(chan, "Transfer Error!\n");
173acc7c 1008
9c3a50b7
IS
1009 /*
1010 * Programming Error
f79abb62 1011 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
d73111c6 1012 * trigger a PE interrupt.
f79abb62
ZW
1013 */
1014 if (stat & FSL_DMA_SR_PE) {
b158471e 1015 chan_dbg(chan, "irq: Programming Error INT\n");
f79abb62 1016 stat &= ~FSL_DMA_SR_PE;
f04cd407
IS
1017 if (get_bcr(chan) != 0)
1018 chan_err(chan, "Programming Error!\n");
1c62979e
ZW
1019 }
1020
9c3a50b7
IS
1021 /*
1022 * For MPC8349, EOCDI event need to update cookie
1c62979e
ZW
1023 * and start the next transfer if it exist.
1024 */
1025 if (stat & FSL_DMA_SR_EOCDI) {
b158471e 1026 chan_dbg(chan, "irq: End-of-Chain link INT\n");
1c62979e 1027 stat &= ~FSL_DMA_SR_EOCDI;
173acc7c
ZW
1028 }
1029
9c3a50b7
IS
1030 /*
1031 * If it current transfer is the end-of-transfer,
173acc7c
ZW
1032 * we should clear the Channel Start bit for
1033 * prepare next transfer.
1034 */
1c62979e 1035 if (stat & FSL_DMA_SR_EOLNI) {
b158471e 1036 chan_dbg(chan, "irq: End-of-link INT\n");
173acc7c 1037 stat &= ~FSL_DMA_SR_EOLNI;
173acc7c
ZW
1038 }
1039
f04cd407
IS
1040 /* check that the DMA controller is really idle */
1041 if (!dma_is_idle(chan))
1042 chan_err(chan, "irq: controller not idle!\n");
1043
1044 /* check that we handled all of the bits */
173acc7c 1045 if (stat)
f04cd407 1046 chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
173acc7c 1047
f04cd407
IS
1048 /*
1049 * Schedule the tasklet to handle all cleanup of the current
1050 * transaction. It will start a new transaction if there is
1051 * one pending.
1052 */
a1c03319 1053 tasklet_schedule(&chan->tasklet);
f04cd407 1054 chan_dbg(chan, "irq: Exit\n");
173acc7c
ZW
1055 return IRQ_HANDLED;
1056}
1057
d3f620b2
IS
1058static void dma_do_tasklet(unsigned long data)
1059{
a1c03319 1060 struct fsldma_chan *chan = (struct fsldma_chan *)data;
dc8d4091
IS
1061 struct fsl_desc_sw *desc, *_desc;
1062 LIST_HEAD(ld_cleanup);
f04cd407
IS
1063 unsigned long flags;
1064
1065 chan_dbg(chan, "tasklet entry\n");
1066
f04cd407 1067 spin_lock_irqsave(&chan->desc_lock, flags);
dc8d4091
IS
1068
1069 /* update the cookie if we have some descriptors to cleanup */
1070 if (!list_empty(&chan->ld_running)) {
1071 dma_cookie_t cookie;
1072
1073 desc = to_fsl_desc(chan->ld_running.prev);
1074 cookie = desc->async_tx.cookie;
f7fbce07 1075 dma_cookie_complete(&desc->async_tx);
dc8d4091 1076
dc8d4091
IS
1077 chan_dbg(chan, "completed_cookie=%d\n", cookie);
1078 }
1079
1080 /*
1081 * move the descriptors to a temporary list so we can drop the lock
1082 * during the entire cleanup operation
1083 */
1084 list_splice_tail_init(&chan->ld_running, &ld_cleanup);
1085
1086 /* the hardware is now idle and ready for more */
f04cd407 1087 chan->idle = true;
f04cd407 1088
dc8d4091
IS
1089 /*
1090 * Start any pending transactions automatically
1091 *
1092 * In the ideal case, we keep the DMA controller busy while we go
1093 * ahead and free the descriptors below.
1094 */
f04cd407 1095 fsl_chan_xfer_ld_queue(chan);
dc8d4091
IS
1096 spin_unlock_irqrestore(&chan->desc_lock, flags);
1097
1098 /* Run the callback for each descriptor, in order */
1099 list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
1100
1101 /* Remove from the list of transactions */
1102 list_del(&desc->node);
1103
1104 /* Run all cleanup for this descriptor */
1105 fsldma_cleanup_descriptor(chan, desc);
1106 }
1107
f04cd407 1108 chan_dbg(chan, "tasklet exit\n");
d3f620b2
IS
1109}
1110
1111static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
173acc7c 1112{
a4f56d4b 1113 struct fsldma_device *fdev = data;
d3f620b2
IS
1114 struct fsldma_chan *chan;
1115 unsigned int handled = 0;
1116 u32 gsr, mask;
1117 int i;
173acc7c 1118
e7a29151 1119 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
d3f620b2
IS
1120 : in_le32(fdev->regs);
1121 mask = 0xff000000;
1122 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
173acc7c 1123
d3f620b2
IS
1124 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1125 chan = fdev->chan[i];
1126 if (!chan)
1127 continue;
1128
1129 if (gsr & mask) {
1130 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1131 fsldma_chan_irq(irq, chan);
1132 handled++;
1133 }
1134
1135 gsr &= ~mask;
1136 mask >>= 8;
1137 }
1138
1139 return IRQ_RETVAL(handled);
173acc7c
ZW
1140}
1141
d3f620b2 1142static void fsldma_free_irqs(struct fsldma_device *fdev)
173acc7c 1143{
d3f620b2
IS
1144 struct fsldma_chan *chan;
1145 int i;
1146
1147 if (fdev->irq != NO_IRQ) {
1148 dev_dbg(fdev->dev, "free per-controller IRQ\n");
1149 free_irq(fdev->irq, fdev);
1150 return;
1151 }
1152
1153 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1154 chan = fdev->chan[i];
1155 if (chan && chan->irq != NO_IRQ) {
b158471e 1156 chan_dbg(chan, "free per-channel IRQ\n");
d3f620b2
IS
1157 free_irq(chan->irq, chan);
1158 }
1159 }
1160}
1161
1162static int fsldma_request_irqs(struct fsldma_device *fdev)
1163{
1164 struct fsldma_chan *chan;
1165 int ret;
1166 int i;
1167
1168 /* if we have a per-controller IRQ, use that */
1169 if (fdev->irq != NO_IRQ) {
1170 dev_dbg(fdev->dev, "request per-controller IRQ\n");
1171 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1172 "fsldma-controller", fdev);
1173 return ret;
1174 }
1175
1176 /* no per-controller IRQ, use the per-channel IRQs */
1177 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1178 chan = fdev->chan[i];
1179 if (!chan)
1180 continue;
1181
1182 if (chan->irq == NO_IRQ) {
b158471e 1183 chan_err(chan, "interrupts property missing in device tree\n");
d3f620b2
IS
1184 ret = -ENODEV;
1185 goto out_unwind;
1186 }
1187
b158471e 1188 chan_dbg(chan, "request per-channel IRQ\n");
d3f620b2
IS
1189 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1190 "fsldma-chan", chan);
1191 if (ret) {
b158471e 1192 chan_err(chan, "unable to request per-channel IRQ\n");
d3f620b2
IS
1193 goto out_unwind;
1194 }
1195 }
1196
1197 return 0;
1198
1199out_unwind:
1200 for (/* none */; i >= 0; i--) {
1201 chan = fdev->chan[i];
1202 if (!chan)
1203 continue;
1204
1205 if (chan->irq == NO_IRQ)
1206 continue;
1207
1208 free_irq(chan->irq, chan);
1209 }
1210
1211 return ret;
173acc7c
ZW
1212}
1213
a4f56d4b
IS
1214/*----------------------------------------------------------------------------*/
1215/* OpenFirmware Subsystem */
1216/*----------------------------------------------------------------------------*/
1217
463a1f8b 1218static int fsl_dma_chan_probe(struct fsldma_device *fdev,
77cd62e8 1219 struct device_node *node, u32 feature, const char *compatible)
173acc7c 1220{
a1c03319 1221 struct fsldma_chan *chan;
4ce0e953 1222 struct resource res;
173acc7c
ZW
1223 int err;
1224
173acc7c 1225 /* alloc channel */
a1c03319
IS
1226 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1227 if (!chan) {
e7a29151
IS
1228 dev_err(fdev->dev, "no free memory for DMA channels!\n");
1229 err = -ENOMEM;
1230 goto out_return;
1231 }
1232
1233 /* ioremap registers for use */
a1c03319
IS
1234 chan->regs = of_iomap(node, 0);
1235 if (!chan->regs) {
e7a29151
IS
1236 dev_err(fdev->dev, "unable to ioremap registers\n");
1237 err = -ENOMEM;
a1c03319 1238 goto out_free_chan;
173acc7c
ZW
1239 }
1240
4ce0e953 1241 err = of_address_to_resource(node, 0, &res);
173acc7c 1242 if (err) {
e7a29151
IS
1243 dev_err(fdev->dev, "unable to find 'reg' property\n");
1244 goto out_iounmap_regs;
173acc7c
ZW
1245 }
1246
a1c03319 1247 chan->feature = feature;
173acc7c 1248 if (!fdev->feature)
a1c03319 1249 fdev->feature = chan->feature;
173acc7c 1250
e7a29151
IS
1251 /*
1252 * If the DMA device's feature is different than the feature
1253 * of its channels, report the bug
173acc7c 1254 */
a1c03319 1255 WARN_ON(fdev->feature != chan->feature);
e7a29151 1256
a1c03319
IS
1257 chan->dev = fdev->dev;
1258 chan->id = ((res.start - 0x100) & 0xfff) >> 7;
1259 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
e7a29151 1260 dev_err(fdev->dev, "too many channels for device\n");
173acc7c 1261 err = -EINVAL;
e7a29151 1262 goto out_iounmap_regs;
173acc7c 1263 }
173acc7c 1264
a1c03319
IS
1265 fdev->chan[chan->id] = chan;
1266 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
b158471e 1267 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
e7a29151
IS
1268
1269 /* Initialize the channel */
a1c03319 1270 dma_init(chan);
173acc7c
ZW
1271
1272 /* Clear cdar registers */
a1c03319 1273 set_cdar(chan, 0);
173acc7c 1274
a1c03319 1275 switch (chan->feature & FSL_DMA_IP_MASK) {
173acc7c 1276 case FSL_DMA_IP_85XX:
a1c03319 1277 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
173acc7c 1278 case FSL_DMA_IP_83XX:
a1c03319
IS
1279 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1280 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1281 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1282 chan->set_request_count = fsl_chan_set_request_count;
173acc7c
ZW
1283 }
1284
a1c03319 1285 spin_lock_init(&chan->desc_lock);
9c3a50b7
IS
1286 INIT_LIST_HEAD(&chan->ld_pending);
1287 INIT_LIST_HEAD(&chan->ld_running);
f04cd407 1288 chan->idle = true;
173acc7c 1289
a1c03319 1290 chan->common.device = &fdev->common;
8ac69546 1291 dma_cookie_init(&chan->common);
173acc7c 1292
d3f620b2 1293 /* find the IRQ line, if it exists in the device tree */
a1c03319 1294 chan->irq = irq_of_parse_and_map(node, 0);
d3f620b2 1295
173acc7c 1296 /* Add the channel to DMA device channel list */
a1c03319 1297 list_add_tail(&chan->common.device_node, &fdev->common.channels);
173acc7c
ZW
1298 fdev->common.chancnt++;
1299
a1c03319
IS
1300 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1301 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
173acc7c
ZW
1302
1303 return 0;
51ee87f2 1304
e7a29151 1305out_iounmap_regs:
a1c03319
IS
1306 iounmap(chan->regs);
1307out_free_chan:
1308 kfree(chan);
e7a29151 1309out_return:
173acc7c
ZW
1310 return err;
1311}
1312
a1c03319 1313static void fsl_dma_chan_remove(struct fsldma_chan *chan)
173acc7c 1314{
a1c03319
IS
1315 irq_dispose_mapping(chan->irq);
1316 list_del(&chan->common.device_node);
1317 iounmap(chan->regs);
1318 kfree(chan);
173acc7c
ZW
1319}
1320
463a1f8b 1321static int fsldma_of_probe(struct platform_device *op)
173acc7c 1322{
a4f56d4b 1323 struct fsldma_device *fdev;
77cd62e8 1324 struct device_node *child;
e7a29151 1325 int err;
173acc7c 1326
a4f56d4b 1327 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
173acc7c 1328 if (!fdev) {
e7a29151
IS
1329 dev_err(&op->dev, "No enough memory for 'priv'\n");
1330 err = -ENOMEM;
1331 goto out_return;
173acc7c 1332 }
e7a29151
IS
1333
1334 fdev->dev = &op->dev;
173acc7c
ZW
1335 INIT_LIST_HEAD(&fdev->common.channels);
1336
e7a29151 1337 /* ioremap the registers for use */
61c7a080 1338 fdev->regs = of_iomap(op->dev.of_node, 0);
e7a29151
IS
1339 if (!fdev->regs) {
1340 dev_err(&op->dev, "unable to ioremap registers\n");
1341 err = -ENOMEM;
1342 goto out_free_fdev;
173acc7c
ZW
1343 }
1344
d3f620b2 1345 /* map the channel IRQ if it exists, but don't hookup the handler yet */
61c7a080 1346 fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
d3f620b2 1347
173acc7c
ZW
1348 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1349 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
c1433041 1350 dma_cap_set(DMA_SG, fdev->common.cap_mask);
bbea0b6e 1351 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
173acc7c
ZW
1352 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1353 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
2187c269 1354 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
173acc7c 1355 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
c1433041 1356 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
07934481 1357 fdev->common.device_tx_status = fsl_tx_status;
173acc7c 1358 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
bbea0b6e 1359 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
c3635c78 1360 fdev->common.device_control = fsl_dma_device_control;
e7a29151 1361 fdev->common.dev = &op->dev;
173acc7c 1362
e2c8e425
LY
1363 dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1364
dd3daca1 1365 platform_set_drvdata(op, fdev);
77cd62e8 1366
e7a29151
IS
1367 /*
1368 * We cannot use of_platform_bus_probe() because there is no
1369 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
77cd62e8
TT
1370 * channel object.
1371 */
61c7a080 1372 for_each_child_of_node(op->dev.of_node, child) {
e7a29151 1373 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
77cd62e8
TT
1374 fsl_dma_chan_probe(fdev, child,
1375 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1376 "fsl,eloplus-dma-channel");
e7a29151
IS
1377 }
1378
1379 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
77cd62e8
TT
1380 fsl_dma_chan_probe(fdev, child,
1381 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1382 "fsl,elo-dma-channel");
e7a29151 1383 }
77cd62e8 1384 }
173acc7c 1385
d3f620b2
IS
1386 /*
1387 * Hookup the IRQ handler(s)
1388 *
1389 * If we have a per-controller interrupt, we prefer that to the
1390 * per-channel interrupts to reduce the number of shared interrupt
1391 * handlers on the same IRQ line
1392 */
1393 err = fsldma_request_irqs(fdev);
1394 if (err) {
1395 dev_err(fdev->dev, "unable to request IRQs\n");
1396 goto out_free_fdev;
1397 }
1398
173acc7c
ZW
1399 dma_async_device_register(&fdev->common);
1400 return 0;
1401
e7a29151 1402out_free_fdev:
d3f620b2 1403 irq_dispose_mapping(fdev->irq);
173acc7c 1404 kfree(fdev);
e7a29151 1405out_return:
173acc7c
ZW
1406 return err;
1407}
1408
2dc11581 1409static int fsldma_of_remove(struct platform_device *op)
77cd62e8 1410{
a4f56d4b 1411 struct fsldma_device *fdev;
77cd62e8
TT
1412 unsigned int i;
1413
dd3daca1 1414 fdev = platform_get_drvdata(op);
77cd62e8
TT
1415 dma_async_device_unregister(&fdev->common);
1416
d3f620b2
IS
1417 fsldma_free_irqs(fdev);
1418
e7a29151 1419 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
77cd62e8
TT
1420 if (fdev->chan[i])
1421 fsl_dma_chan_remove(fdev->chan[i]);
e7a29151 1422 }
77cd62e8 1423
e7a29151 1424 iounmap(fdev->regs);
77cd62e8 1425 kfree(fdev);
77cd62e8
TT
1426
1427 return 0;
1428}
1429
4b1cf1fa 1430static const struct of_device_id fsldma_of_ids[] = {
049c9d45
KG
1431 { .compatible = "fsl,eloplus-dma", },
1432 { .compatible = "fsl,elo-dma", },
173acc7c
ZW
1433 {}
1434};
1435
8faa7cf8 1436static struct platform_driver fsldma_of_driver = {
4018294b
GL
1437 .driver = {
1438 .name = "fsl-elo-dma",
1439 .owner = THIS_MODULE,
1440 .of_match_table = fsldma_of_ids,
1441 },
1442 .probe = fsldma_of_probe,
1443 .remove = fsldma_of_remove,
173acc7c
ZW
1444};
1445
a4f56d4b
IS
1446/*----------------------------------------------------------------------------*/
1447/* Module Init / Exit */
1448/*----------------------------------------------------------------------------*/
1449
1450static __init int fsldma_init(void)
173acc7c 1451{
77cd62e8 1452 pr_info("Freescale Elo / Elo Plus DMA driver\n");
00006124 1453 return platform_driver_register(&fsldma_of_driver);
77cd62e8
TT
1454}
1455
a4f56d4b 1456static void __exit fsldma_exit(void)
77cd62e8 1457{
00006124 1458 platform_driver_unregister(&fsldma_of_driver);
173acc7c
ZW
1459}
1460
a4f56d4b
IS
1461subsys_initcall(fsldma_init);
1462module_exit(fsldma_exit);
77cd62e8
TT
1463
1464MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1465MODULE_LICENSE("GPL");
This page took 0.732407 seconds and 5 git commands to generate.