Commit | Line | Data |
---|---|---|
d8902adc NI |
1 | /* |
2 | * Renesas SuperH DMA Engine support | |
3 | * | |
4 | * base is drivers/dma/flsdma.c | |
5 | * | |
6 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | |
7 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | |
8 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | |
9 | * | |
10 | * This is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation; either version 2 of the License, or | |
13 | * (at your option) any later version. | |
14 | * | |
15 | * - DMA of SuperH does not have Hardware DMA chain mode. | |
16 | * - MAX DMA size is 16MB. | |
17 | * | |
18 | */ | |
19 | ||
20 | #include <linux/init.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/dmaengine.h> | |
24 | #include <linux/delay.h> | |
25 | #include <linux/dma-mapping.h> | |
d8902adc NI |
26 | #include <linux/platform_device.h> |
27 | #include <cpu/dma.h> | |
28 | #include <asm/dma-sh.h> | |
29 | #include "shdma.h" | |
30 | ||
31 | /* DMA descriptor control */ | |
3542a113 GL |
32 | enum sh_dmae_desc_status { |
33 | DESC_IDLE, | |
34 | DESC_PREPARED, | |
35 | DESC_SUBMITTED, | |
36 | DESC_COMPLETED, /* completed, have to call callback */ | |
37 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ | |
38 | }; | |
d8902adc NI |
39 | |
40 | #define NR_DESCS_PER_CHANNEL 32 | |
41 | /* | |
42 | * Define the default configuration for dual address memory-memory transfer. | |
43 | * The 0x400 value represents auto-request, external->external. | |
44 | * | |
45 | * And this driver set 4byte burst mode. | |
46 | * If you want to change mode, you need to change RS_DEFAULT of value. | |
47 | * (ex 1byte burst mode -> (RS_DUAL & ~TS_32) | |
48 | */ | |
49 | #define RS_DEFAULT (RS_DUAL) | |
50 | ||
3542a113 GL |
51 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); |
52 | ||
d8902adc NI |
53 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) |
54 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | |
55 | { | |
56 | ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | |
57 | } | |
58 | ||
59 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | |
60 | { | |
61 | return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | |
62 | } | |
63 | ||
64 | static void dmae_init(struct sh_dmae_chan *sh_chan) | |
65 | { | |
66 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ | |
67 | sh_dmae_writel(sh_chan, chcr, CHCR); | |
68 | } | |
69 | ||
70 | /* | |
71 | * Reset DMA controller | |
72 | * | |
73 | * SH7780 has two DMAOR register | |
74 | */ | |
75 | static void sh_dmae_ctl_stop(int id) | |
76 | { | |
77 | unsigned short dmaor = dmaor_read_reg(id); | |
78 | ||
79 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); | |
80 | dmaor_write_reg(id, dmaor); | |
81 | } | |
82 | ||
83 | static int sh_dmae_rst(int id) | |
84 | { | |
85 | unsigned short dmaor; | |
86 | ||
87 | sh_dmae_ctl_stop(id); | |
86d61b33 | 88 | dmaor = dmaor_read_reg(id) | DMAOR_INIT; |
d8902adc NI |
89 | |
90 | dmaor_write_reg(id, dmaor); | |
86d61b33 | 91 | if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) { |
d8902adc NI |
92 | pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); |
93 | return -EINVAL; | |
94 | } | |
95 | return 0; | |
96 | } | |
97 | ||
86d61b33 | 98 | static int dmae_is_busy(struct sh_dmae_chan *sh_chan) |
d8902adc NI |
99 | { |
100 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | |
101 | if (chcr & CHCR_DE) { | |
102 | if (!(chcr & CHCR_TE)) | |
103 | return -EBUSY; /* working */ | |
104 | } | |
105 | return 0; /* waiting */ | |
106 | } | |
107 | ||
108 | static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) | |
109 | { | |
110 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | |
111 | return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; | |
112 | } | |
113 | ||
3542a113 | 114 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) |
d8902adc | 115 | { |
3542a113 GL |
116 | sh_dmae_writel(sh_chan, hw->sar, SAR); |
117 | sh_dmae_writel(sh_chan, hw->dar, DAR); | |
118 | sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR); | |
d8902adc NI |
119 | } |
120 | ||
121 | static void dmae_start(struct sh_dmae_chan *sh_chan) | |
122 | { | |
123 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | |
124 | ||
86d61b33 | 125 | chcr |= CHCR_DE | CHCR_IE; |
d8902adc NI |
126 | sh_dmae_writel(sh_chan, chcr, CHCR); |
127 | } | |
128 | ||
129 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | |
130 | { | |
131 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | |
132 | ||
133 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); | |
134 | sh_dmae_writel(sh_chan, chcr, CHCR); | |
135 | } | |
136 | ||
137 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | |
138 | { | |
86d61b33 | 139 | int ret = dmae_is_busy(sh_chan); |
d8902adc NI |
140 | /* When DMA was working, can not set data to CHCR */ |
141 | if (ret) | |
142 | return ret; | |
143 | ||
144 | sh_dmae_writel(sh_chan, val, CHCR); | |
145 | return 0; | |
146 | } | |
147 | ||
148 | #define DMARS1_ADDR 0x04 | |
149 | #define DMARS2_ADDR 0x08 | |
150 | #define DMARS_SHIFT 8 | |
151 | #define DMARS_CHAN_MSK 0x01 | |
152 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | |
153 | { | |
154 | u32 addr; | |
155 | int shift = 0; | |
86d61b33 | 156 | int ret = dmae_is_busy(sh_chan); |
d8902adc NI |
157 | if (ret) |
158 | return ret; | |
159 | ||
160 | if (sh_chan->id & DMARS_CHAN_MSK) | |
161 | shift = DMARS_SHIFT; | |
162 | ||
163 | switch (sh_chan->id) { | |
164 | /* DMARS0 */ | |
165 | case 0: | |
166 | case 1: | |
167 | addr = SH_DMARS_BASE; | |
168 | break; | |
169 | /* DMARS1 */ | |
170 | case 2: | |
171 | case 3: | |
172 | addr = (SH_DMARS_BASE + DMARS1_ADDR); | |
173 | break; | |
174 | /* DMARS2 */ | |
175 | case 4: | |
176 | case 5: | |
177 | addr = (SH_DMARS_BASE + DMARS2_ADDR); | |
178 | break; | |
179 | default: | |
180 | return -EINVAL; | |
181 | } | |
182 | ||
183 | ctrl_outw((val << shift) | | |
184 | (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)), | |
185 | addr); | |
186 | ||
187 | return 0; | |
188 | } | |
189 | ||
190 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | |
191 | { | |
3542a113 | 192 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; |
d8902adc | 193 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); |
3542a113 | 194 | dma_async_tx_callback callback = tx->callback; |
d8902adc NI |
195 | dma_cookie_t cookie; |
196 | ||
197 | spin_lock_bh(&sh_chan->desc_lock); | |
198 | ||
199 | cookie = sh_chan->common.cookie; | |
200 | cookie++; | |
201 | if (cookie < 0) | |
202 | cookie = 1; | |
203 | ||
3542a113 GL |
204 | sh_chan->common.cookie = cookie; |
205 | tx->cookie = cookie; | |
206 | ||
207 | /* Mark all chunks of this descriptor as submitted, move to the queue */ | |
208 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | |
209 | /* | |
210 | * All chunks are on the global ld_free, so, we have to find | |
211 | * the end of the chain ourselves | |
212 | */ | |
213 | if (chunk != desc && (chunk->mark == DESC_IDLE || | |
214 | chunk->async_tx.cookie > 0 || | |
215 | chunk->async_tx.cookie == -EBUSY || | |
216 | &chunk->node == &sh_chan->ld_free)) | |
217 | break; | |
218 | chunk->mark = DESC_SUBMITTED; | |
219 | /* Callback goes to the last chunk */ | |
220 | chunk->async_tx.callback = NULL; | |
221 | chunk->cookie = cookie; | |
222 | list_move_tail(&chunk->node, &sh_chan->ld_queue); | |
223 | last = chunk; | |
224 | } | |
d8902adc | 225 | |
3542a113 GL |
226 | last->async_tx.callback = callback; |
227 | last->async_tx.callback_param = tx->callback_param; | |
228 | ||
229 | dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", | |
230 | tx->cookie, &last->async_tx, sh_chan->id, | |
231 | desc->hw.sar, desc->hw.tcr, desc->hw.dar); | |
d8902adc NI |
232 | |
233 | spin_unlock_bh(&sh_chan->desc_lock); | |
234 | ||
235 | return cookie; | |
236 | } | |
237 | ||
3542a113 | 238 | /* Called with desc_lock held */ |
d8902adc NI |
239 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) |
240 | { | |
3542a113 | 241 | struct sh_desc *desc; |
d8902adc | 242 | |
3542a113 GL |
243 | list_for_each_entry(desc, &sh_chan->ld_free, node) |
244 | if (desc->mark != DESC_PREPARED) { | |
245 | BUG_ON(desc->mark != DESC_IDLE); | |
d8902adc | 246 | list_del(&desc->node); |
3542a113 | 247 | return desc; |
d8902adc | 248 | } |
d8902adc | 249 | |
3542a113 | 250 | return NULL; |
d8902adc NI |
251 | } |
252 | ||
253 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |
254 | { | |
255 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | |
256 | struct sh_desc *desc; | |
257 | ||
258 | spin_lock_bh(&sh_chan->desc_lock); | |
259 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { | |
260 | spin_unlock_bh(&sh_chan->desc_lock); | |
261 | desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); | |
262 | if (!desc) { | |
263 | spin_lock_bh(&sh_chan->desc_lock); | |
264 | break; | |
265 | } | |
266 | dma_async_tx_descriptor_init(&desc->async_tx, | |
267 | &sh_chan->common); | |
268 | desc->async_tx.tx_submit = sh_dmae_tx_submit; | |
3542a113 | 269 | desc->mark = DESC_IDLE; |
d8902adc NI |
270 | |
271 | spin_lock_bh(&sh_chan->desc_lock); | |
3542a113 | 272 | list_add(&desc->node, &sh_chan->ld_free); |
d8902adc NI |
273 | sh_chan->descs_allocated++; |
274 | } | |
275 | spin_unlock_bh(&sh_chan->desc_lock); | |
276 | ||
277 | return sh_chan->descs_allocated; | |
278 | } | |
279 | ||
280 | /* | |
281 | * sh_dma_free_chan_resources - Free all resources of the channel. | |
282 | */ | |
283 | static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |
284 | { | |
285 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | |
286 | struct sh_desc *desc, *_desc; | |
287 | LIST_HEAD(list); | |
288 | ||
3542a113 GL |
289 | /* Prepared and not submitted descriptors can still be on the queue */ |
290 | if (!list_empty(&sh_chan->ld_queue)) | |
291 | sh_dmae_chan_ld_cleanup(sh_chan, true); | |
292 | ||
d8902adc NI |
293 | spin_lock_bh(&sh_chan->desc_lock); |
294 | ||
295 | list_splice_init(&sh_chan->ld_free, &list); | |
296 | sh_chan->descs_allocated = 0; | |
297 | ||
298 | spin_unlock_bh(&sh_chan->desc_lock); | |
299 | ||
300 | list_for_each_entry_safe(desc, _desc, &list, node) | |
301 | kfree(desc); | |
302 | } | |
303 | ||
304 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |
305 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | |
306 | size_t len, unsigned long flags) | |
307 | { | |
308 | struct sh_dmae_chan *sh_chan; | |
309 | struct sh_desc *first = NULL, *prev = NULL, *new; | |
310 | size_t copy_size; | |
3542a113 GL |
311 | LIST_HEAD(tx_list); |
312 | int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1); | |
d8902adc NI |
313 | |
314 | if (!chan) | |
315 | return NULL; | |
316 | ||
317 | if (!len) | |
318 | return NULL; | |
319 | ||
320 | sh_chan = to_sh_chan(chan); | |
321 | ||
3542a113 GL |
322 | /* Have to lock the whole loop to protect against concurrent release */ |
323 | spin_lock_bh(&sh_chan->desc_lock); | |
324 | ||
325 | /* | |
326 | * Chaining: | |
327 | * first descriptor is what user is dealing with in all API calls, its | |
328 | * cookie is at first set to -EBUSY, at tx-submit to a positive | |
329 | * number | |
330 | * if more than one chunk is needed further chunks have cookie = -EINVAL | |
331 | * the last chunk, if not equal to the first, has cookie = -ENOSPC | |
332 | * all chunks are linked onto the tx_list head with their .node heads | |
333 | * only during this function, then they are immediately spliced | |
334 | * back onto the free list in form of a chain | |
335 | */ | |
d8902adc | 336 | do { |
3542a113 | 337 | /* Allocate the link descriptor from the free list */ |
d8902adc NI |
338 | new = sh_dmae_get_desc(sh_chan); |
339 | if (!new) { | |
340 | dev_err(sh_chan->dev, | |
86d61b33 | 341 | "No free memory for link descriptor\n"); |
3542a113 GL |
342 | list_for_each_entry(new, &tx_list, node) |
343 | new->mark = DESC_IDLE; | |
344 | list_splice(&tx_list, &sh_chan->ld_free); | |
345 | spin_unlock_bh(&sh_chan->desc_lock); | |
346 | return NULL; | |
d8902adc NI |
347 | } |
348 | ||
3542a113 | 349 | copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1); |
d8902adc NI |
350 | |
351 | new->hw.sar = dma_src; | |
352 | new->hw.dar = dma_dest; | |
353 | new->hw.tcr = copy_size; | |
3542a113 GL |
354 | if (!first) { |
355 | /* First desc */ | |
356 | new->async_tx.cookie = -EBUSY; | |
d8902adc | 357 | first = new; |
3542a113 GL |
358 | } else { |
359 | /* Other desc - invisible to the user */ | |
360 | new->async_tx.cookie = -EINVAL; | |
361 | } | |
362 | ||
363 | dev_dbg(sh_chan->dev, | |
364 | "chaining %u of %u with %p, dst %x, cookie %d\n", | |
365 | copy_size, len, &new->async_tx, dma_dest, | |
366 | new->async_tx.cookie); | |
d8902adc | 367 | |
3542a113 GL |
368 | new->mark = DESC_PREPARED; |
369 | new->async_tx.flags = flags; | |
370 | new->chunks = chunks--; | |
d8902adc NI |
371 | |
372 | prev = new; | |
373 | len -= copy_size; | |
374 | dma_src += copy_size; | |
375 | dma_dest += copy_size; | |
376 | /* Insert the link descriptor to the LD ring */ | |
3542a113 | 377 | list_add_tail(&new->node, &tx_list); |
d8902adc NI |
378 | } while (len); |
379 | ||
3542a113 GL |
380 | if (new != first) |
381 | new->async_tx.cookie = -ENOSPC; | |
d8902adc | 382 | |
3542a113 GL |
383 | /* Put them back on the free list, so, they don't get lost */ |
384 | list_splice_tail(&tx_list, &sh_chan->ld_free); | |
d8902adc | 385 | |
3542a113 | 386 | spin_unlock_bh(&sh_chan->desc_lock); |
d8902adc | 387 | |
3542a113 | 388 | return &first->async_tx; |
d8902adc NI |
389 | } |
390 | ||
3542a113 | 391 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) |
d8902adc NI |
392 | { |
393 | struct sh_desc *desc, *_desc; | |
3542a113 GL |
394 | /* Is the "exposed" head of a chain acked? */ |
395 | bool head_acked = false; | |
396 | dma_cookie_t cookie = 0; | |
397 | dma_async_tx_callback callback = NULL; | |
398 | void *param = NULL; | |
d8902adc NI |
399 | |
400 | spin_lock_bh(&sh_chan->desc_lock); | |
401 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { | |
3542a113 GL |
402 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
403 | ||
404 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); | |
405 | BUG_ON(desc->mark != DESC_SUBMITTED && | |
406 | desc->mark != DESC_COMPLETED && | |
407 | desc->mark != DESC_WAITING); | |
408 | ||
409 | /* | |
410 | * queue is ordered, and we use this loop to (1) clean up all | |
411 | * completed descriptors, and to (2) update descriptor flags of | |
412 | * any chunks in a (partially) completed chain | |
413 | */ | |
414 | if (!all && desc->mark == DESC_SUBMITTED && | |
415 | desc->cookie != cookie) | |
d8902adc NI |
416 | break; |
417 | ||
3542a113 GL |
418 | if (tx->cookie > 0) |
419 | cookie = tx->cookie; | |
d8902adc | 420 | |
3542a113 GL |
421 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { |
422 | BUG_ON(sh_chan->completed_cookie != desc->cookie - 1); | |
423 | sh_chan->completed_cookie = desc->cookie; | |
424 | } | |
d8902adc | 425 | |
3542a113 GL |
426 | /* Call callback on the last chunk */ |
427 | if (desc->mark == DESC_COMPLETED && tx->callback) { | |
428 | desc->mark = DESC_WAITING; | |
429 | callback = tx->callback; | |
430 | param = tx->callback_param; | |
431 | dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", | |
432 | tx->cookie, tx, sh_chan->id); | |
433 | BUG_ON(desc->chunks != 1); | |
434 | break; | |
435 | } | |
d8902adc | 436 | |
3542a113 GL |
437 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { |
438 | if (desc->mark == DESC_COMPLETED) { | |
439 | BUG_ON(tx->cookie < 0); | |
440 | desc->mark = DESC_WAITING; | |
441 | } | |
442 | head_acked = async_tx_test_ack(tx); | |
443 | } else { | |
444 | switch (desc->mark) { | |
445 | case DESC_COMPLETED: | |
446 | desc->mark = DESC_WAITING; | |
447 | /* Fall through */ | |
448 | case DESC_WAITING: | |
449 | if (head_acked) | |
450 | async_tx_ack(&desc->async_tx); | |
451 | } | |
452 | } | |
453 | ||
454 | dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", | |
455 | tx, tx->cookie); | |
456 | ||
457 | if (((desc->mark == DESC_COMPLETED || | |
458 | desc->mark == DESC_WAITING) && | |
459 | async_tx_test_ack(&desc->async_tx)) || all) { | |
460 | /* Remove from ld_queue list */ | |
461 | desc->mark = DESC_IDLE; | |
462 | list_move(&desc->node, &sh_chan->ld_free); | |
d8902adc NI |
463 | } |
464 | } | |
465 | spin_unlock_bh(&sh_chan->desc_lock); | |
3542a113 GL |
466 | |
467 | if (callback) | |
468 | callback(param); | |
469 | ||
470 | return callback; | |
471 | } | |
472 | ||
473 | /* | |
474 | * sh_chan_ld_cleanup - Clean up link descriptors | |
475 | * | |
476 | * This function cleans up the ld_queue of DMA channel. | |
477 | */ | |
478 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | |
479 | { | |
480 | while (__ld_cleanup(sh_chan, all)) | |
481 | ; | |
d8902adc NI |
482 | } |
483 | ||
484 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | |
485 | { | |
3542a113 | 486 | struct sh_desc *sd; |
d8902adc | 487 | |
3542a113 | 488 | spin_lock_bh(&sh_chan->desc_lock); |
d8902adc | 489 | /* DMA work check */ |
3542a113 GL |
490 | if (dmae_is_busy(sh_chan)) { |
491 | spin_unlock_bh(&sh_chan->desc_lock); | |
d8902adc | 492 | return; |
3542a113 | 493 | } |
d8902adc NI |
494 | |
495 | /* Find the first un-transfer desciptor */ | |
3542a113 GL |
496 | list_for_each_entry(sd, &sh_chan->ld_queue, node) |
497 | if (sd->mark == DESC_SUBMITTED) { | |
498 | /* Get the ld start address from ld_queue */ | |
499 | dmae_set_reg(sh_chan, &sd->hw); | |
500 | dmae_start(sh_chan); | |
501 | break; | |
502 | } | |
503 | ||
504 | spin_unlock_bh(&sh_chan->desc_lock); | |
d8902adc NI |
505 | } |
506 | ||
507 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) | |
508 | { | |
509 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | |
510 | sh_chan_xfer_ld_queue(sh_chan); | |
511 | } | |
512 | ||
513 | static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, | |
514 | dma_cookie_t cookie, | |
515 | dma_cookie_t *done, | |
516 | dma_cookie_t *used) | |
517 | { | |
518 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | |
519 | dma_cookie_t last_used; | |
520 | dma_cookie_t last_complete; | |
521 | ||
3542a113 | 522 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
d8902adc NI |
523 | |
524 | last_used = chan->cookie; | |
525 | last_complete = sh_chan->completed_cookie; | |
3542a113 | 526 | BUG_ON(last_complete < 0); |
d8902adc NI |
527 | |
528 | if (done) | |
529 | *done = last_complete; | |
530 | ||
531 | if (used) | |
532 | *used = last_used; | |
533 | ||
534 | return dma_async_is_complete(cookie, last_complete, last_used); | |
535 | } | |
536 | ||
537 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) | |
538 | { | |
539 | irqreturn_t ret = IRQ_NONE; | |
540 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | |
541 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | |
542 | ||
543 | if (chcr & CHCR_TE) { | |
544 | /* DMA stop */ | |
545 | dmae_halt(sh_chan); | |
546 | ||
547 | ret = IRQ_HANDLED; | |
548 | tasklet_schedule(&sh_chan->tasklet); | |
549 | } | |
550 | ||
551 | return ret; | |
552 | } | |
553 | ||
554 | #if defined(CONFIG_CPU_SH4) | |
555 | static irqreturn_t sh_dmae_err(int irq, void *data) | |
556 | { | |
557 | int err = 0; | |
558 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; | |
559 | ||
560 | /* IRQ Multi */ | |
561 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | |
562 | int cnt = 0; | |
563 | switch (irq) { | |
564 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | |
565 | case DMTE6_IRQ: | |
566 | cnt++; | |
567 | #endif | |
568 | case DMTE0_IRQ: | |
569 | if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { | |
570 | disable_irq(irq); | |
571 | return IRQ_HANDLED; | |
572 | } | |
573 | default: | |
574 | return IRQ_NONE; | |
575 | } | |
576 | } else { | |
577 | /* reset dma controller */ | |
578 | err = sh_dmae_rst(0); | |
579 | if (err) | |
580 | return err; | |
3542a113 | 581 | #ifdef SH_DMAC_BASE1 |
d8902adc NI |
582 | if (shdev->pdata.mode & SHDMA_DMAOR1) { |
583 | err = sh_dmae_rst(1); | |
584 | if (err) | |
585 | return err; | |
586 | } | |
3542a113 | 587 | #endif |
d8902adc NI |
588 | disable_irq(irq); |
589 | return IRQ_HANDLED; | |
590 | } | |
591 | } | |
592 | #endif | |
593 | ||
594 | static void dmae_do_tasklet(unsigned long data) | |
595 | { | |
596 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | |
3542a113 | 597 | struct sh_desc *desc; |
d8902adc | 598 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); |
86d61b33 | 599 | |
3542a113 GL |
600 | spin_lock(&sh_chan->desc_lock); |
601 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | |
602 | if ((desc->hw.sar + desc->hw.tcr) == sar_buf && | |
603 | desc->mark == DESC_SUBMITTED) { | |
604 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", | |
605 | desc->async_tx.cookie, &desc->async_tx, | |
606 | desc->hw.dar); | |
607 | desc->mark = DESC_COMPLETED; | |
d8902adc NI |
608 | break; |
609 | } | |
610 | } | |
3542a113 | 611 | spin_unlock(&sh_chan->desc_lock); |
d8902adc | 612 | |
d8902adc NI |
613 | /* Next desc */ |
614 | sh_chan_xfer_ld_queue(sh_chan); | |
3542a113 | 615 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
d8902adc NI |
616 | } |
617 | ||
618 | static unsigned int get_dmae_irq(unsigned int id) | |
619 | { | |
620 | unsigned int irq = 0; | |
621 | if (id < ARRAY_SIZE(dmte_irq_map)) | |
622 | irq = dmte_irq_map[id]; | |
623 | return irq; | |
624 | } | |
625 | ||
626 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |
627 | { | |
628 | int err; | |
629 | unsigned int irq = get_dmae_irq(id); | |
630 | unsigned long irqflags = IRQF_DISABLED; | |
631 | struct sh_dmae_chan *new_sh_chan; | |
632 | ||
633 | /* alloc channel */ | |
634 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); | |
635 | if (!new_sh_chan) { | |
86d61b33 GL |
636 | dev_err(shdev->common.dev, |
637 | "No free memory for allocating dma channels!\n"); | |
d8902adc NI |
638 | return -ENOMEM; |
639 | } | |
640 | ||
641 | new_sh_chan->dev = shdev->common.dev; | |
642 | new_sh_chan->id = id; | |
643 | ||
644 | /* Init DMA tasklet */ | |
645 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, | |
646 | (unsigned long)new_sh_chan); | |
647 | ||
648 | /* Init the channel */ | |
649 | dmae_init(new_sh_chan); | |
650 | ||
651 | spin_lock_init(&new_sh_chan->desc_lock); | |
652 | ||
653 | /* Init descripter manage list */ | |
654 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); | |
655 | INIT_LIST_HEAD(&new_sh_chan->ld_free); | |
656 | ||
657 | /* copy struct dma_device */ | |
658 | new_sh_chan->common.device = &shdev->common; | |
659 | ||
660 | /* Add the channel to DMA device channel list */ | |
661 | list_add_tail(&new_sh_chan->common.device_node, | |
662 | &shdev->common.channels); | |
663 | shdev->common.chancnt++; | |
664 | ||
665 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | |
666 | irqflags = IRQF_SHARED; | |
667 | #if defined(DMTE6_IRQ) | |
668 | if (irq >= DMTE6_IRQ) | |
669 | irq = DMTE6_IRQ; | |
670 | else | |
671 | #endif | |
672 | irq = DMTE0_IRQ; | |
673 | } | |
674 | ||
675 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | |
676 | "sh-dmae%d", new_sh_chan->id); | |
677 | ||
678 | /* set up channel irq */ | |
86d61b33 GL |
679 | err = request_irq(irq, &sh_dmae_interrupt, irqflags, |
680 | new_sh_chan->dev_id, new_sh_chan); | |
d8902adc NI |
681 | if (err) { |
682 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " | |
683 | "with return %d\n", id, err); | |
684 | goto err_no_irq; | |
685 | } | |
686 | ||
687 | /* CHCR register control function */ | |
688 | new_sh_chan->set_chcr = dmae_set_chcr; | |
689 | /* DMARS register control function */ | |
690 | new_sh_chan->set_dmars = dmae_set_dmars; | |
691 | ||
692 | shdev->chan[id] = new_sh_chan; | |
693 | return 0; | |
694 | ||
695 | err_no_irq: | |
696 | /* remove from dmaengine device node */ | |
697 | list_del(&new_sh_chan->common.device_node); | |
698 | kfree(new_sh_chan); | |
699 | return err; | |
700 | } | |
701 | ||
702 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | |
703 | { | |
704 | int i; | |
705 | ||
706 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { | |
707 | if (shdev->chan[i]) { | |
708 | struct sh_dmae_chan *shchan = shdev->chan[i]; | |
709 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) | |
710 | free_irq(dmte_irq_map[i], shchan); | |
711 | ||
712 | list_del(&shchan->common.device_node); | |
713 | kfree(shchan); | |
714 | shdev->chan[i] = NULL; | |
715 | } | |
716 | } | |
717 | shdev->common.chancnt = 0; | |
718 | } | |
719 | ||
720 | static int __init sh_dmae_probe(struct platform_device *pdev) | |
721 | { | |
722 | int err = 0, cnt, ecnt; | |
723 | unsigned long irqflags = IRQF_DISABLED; | |
724 | #if defined(CONFIG_CPU_SH4) | |
725 | int eirq[] = { DMAE0_IRQ, | |
726 | #if defined(DMAE1_IRQ) | |
727 | DMAE1_IRQ | |
728 | #endif | |
729 | }; | |
730 | #endif | |
731 | struct sh_dmae_device *shdev; | |
732 | ||
733 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | |
734 | if (!shdev) { | |
735 | dev_err(&pdev->dev, "No enough memory\n"); | |
736 | err = -ENOMEM; | |
737 | goto shdev_err; | |
738 | } | |
739 | ||
740 | /* get platform data */ | |
741 | if (!pdev->dev.platform_data) | |
742 | goto shdev_err; | |
743 | ||
744 | /* platform data */ | |
745 | memcpy(&shdev->pdata, pdev->dev.platform_data, | |
746 | sizeof(struct sh_dmae_pdata)); | |
747 | ||
748 | /* reset dma controller */ | |
749 | err = sh_dmae_rst(0); | |
750 | if (err) | |
751 | goto rst_err; | |
752 | ||
753 | /* SH7780/85/23 has DMAOR1 */ | |
754 | if (shdev->pdata.mode & SHDMA_DMAOR1) { | |
755 | err = sh_dmae_rst(1); | |
756 | if (err) | |
757 | goto rst_err; | |
758 | } | |
759 | ||
760 | INIT_LIST_HEAD(&shdev->common.channels); | |
761 | ||
762 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | |
763 | shdev->common.device_alloc_chan_resources | |
764 | = sh_dmae_alloc_chan_resources; | |
765 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; | |
766 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; | |
767 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; | |
768 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; | |
769 | shdev->common.dev = &pdev->dev; | |
ddb4f0f0 GL |
770 | /* Default transfer size of 32 bytes requires 32-byte alignment */ |
771 | shdev->common.copy_align = 5; | |
d8902adc NI |
772 | |
773 | #if defined(CONFIG_CPU_SH4) | |
774 | /* Non Mix IRQ mode SH7722/SH7730 etc... */ | |
775 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | |
776 | irqflags = IRQF_SHARED; | |
777 | eirq[0] = DMTE0_IRQ; | |
778 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | |
779 | eirq[1] = DMTE6_IRQ; | |
780 | #endif | |
781 | } | |
782 | ||
783 | for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { | |
86d61b33 GL |
784 | err = request_irq(eirq[ecnt], sh_dmae_err, irqflags, |
785 | "DMAC Address Error", shdev); | |
d8902adc NI |
786 | if (err) { |
787 | dev_err(&pdev->dev, "DMA device request_irq" | |
788 | "error (irq %d) with return %d\n", | |
789 | eirq[ecnt], err); | |
790 | goto eirq_err; | |
791 | } | |
792 | } | |
793 | #endif /* CONFIG_CPU_SH4 */ | |
794 | ||
795 | /* Create DMA Channel */ | |
796 | for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { | |
797 | err = sh_dmae_chan_probe(shdev, cnt); | |
798 | if (err) | |
799 | goto chan_probe_err; | |
800 | } | |
801 | ||
802 | platform_set_drvdata(pdev, shdev); | |
803 | dma_async_device_register(&shdev->common); | |
804 | ||
805 | return err; | |
806 | ||
807 | chan_probe_err: | |
808 | sh_dmae_chan_remove(shdev); | |
809 | ||
810 | eirq_err: | |
811 | for (ecnt-- ; ecnt >= 0; ecnt--) | |
812 | free_irq(eirq[ecnt], shdev); | |
813 | ||
814 | rst_err: | |
815 | kfree(shdev); | |
816 | ||
817 | shdev_err: | |
818 | return err; | |
819 | } | |
820 | ||
821 | static int __exit sh_dmae_remove(struct platform_device *pdev) | |
822 | { | |
823 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | |
824 | ||
825 | dma_async_device_unregister(&shdev->common); | |
826 | ||
827 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | |
828 | free_irq(DMTE0_IRQ, shdev); | |
829 | #if defined(DMTE6_IRQ) | |
830 | free_irq(DMTE6_IRQ, shdev); | |
831 | #endif | |
832 | } | |
833 | ||
834 | /* channel data remove */ | |
835 | sh_dmae_chan_remove(shdev); | |
836 | ||
837 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { | |
838 | free_irq(DMAE0_IRQ, shdev); | |
839 | #if defined(DMAE1_IRQ) | |
840 | free_irq(DMAE1_IRQ, shdev); | |
841 | #endif | |
842 | } | |
843 | kfree(shdev); | |
844 | ||
845 | return 0; | |
846 | } | |
847 | ||
848 | static void sh_dmae_shutdown(struct platform_device *pdev) | |
849 | { | |
850 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | |
851 | sh_dmae_ctl_stop(0); | |
852 | if (shdev->pdata.mode & SHDMA_DMAOR1) | |
853 | sh_dmae_ctl_stop(1); | |
854 | } | |
855 | ||
856 | static struct platform_driver sh_dmae_driver = { | |
857 | .remove = __exit_p(sh_dmae_remove), | |
858 | .shutdown = sh_dmae_shutdown, | |
859 | .driver = { | |
860 | .name = "sh-dma-engine", | |
861 | }, | |
862 | }; | |
863 | ||
864 | static int __init sh_dmae_init(void) | |
865 | { | |
866 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); | |
867 | } | |
868 | module_init(sh_dmae_init); | |
869 | ||
870 | static void __exit sh_dmae_exit(void) | |
871 | { | |
872 | platform_driver_unregister(&sh_dmae_driver); | |
873 | } | |
874 | module_exit(sh_dmae_exit); | |
875 | ||
876 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); | |
877 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); | |
878 | MODULE_LICENSE("GPL"); |