Commit | Line | Data |
---|---|---|
ea76f0b3 AN |
1 | /* |
2 | * Driver for the TXx9 SoC DMA Controller | |
3 | * | |
4 | * Copyright (C) 2009 Atsushi Nemoto | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/io.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/platform_device.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/scatterlist.h> | |
18 | #include "txx9dmac.h" | |
19 | ||
20 | static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) | |
21 | { | |
22 | return container_of(chan, struct txx9dmac_chan, chan); | |
23 | } | |
24 | ||
25 | static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc) | |
26 | { | |
27 | return dc->ch_regs; | |
28 | } | |
29 | ||
30 | static struct txx9dmac_cregs32 __iomem *__dma_regs32( | |
31 | const struct txx9dmac_chan *dc) | |
32 | { | |
33 | return dc->ch_regs; | |
34 | } | |
35 | ||
36 | #define channel64_readq(dc, name) \ | |
37 | __raw_readq(&(__dma_regs(dc)->name)) | |
38 | #define channel64_writeq(dc, name, val) \ | |
39 | __raw_writeq((val), &(__dma_regs(dc)->name)) | |
40 | #define channel64_readl(dc, name) \ | |
41 | __raw_readl(&(__dma_regs(dc)->name)) | |
42 | #define channel64_writel(dc, name, val) \ | |
43 | __raw_writel((val), &(__dma_regs(dc)->name)) | |
44 | ||
45 | #define channel32_readl(dc, name) \ | |
46 | __raw_readl(&(__dma_regs32(dc)->name)) | |
47 | #define channel32_writel(dc, name, val) \ | |
48 | __raw_writel((val), &(__dma_regs32(dc)->name)) | |
49 | ||
50 | #define channel_readq(dc, name) channel64_readq(dc, name) | |
51 | #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val) | |
52 | #define channel_readl(dc, name) \ | |
53 | (is_dmac64(dc) ? \ | |
54 | channel64_readl(dc, name) : channel32_readl(dc, name)) | |
55 | #define channel_writel(dc, name, val) \ | |
56 | (is_dmac64(dc) ? \ | |
57 | channel64_writel(dc, name, val) : channel32_writel(dc, name, val)) | |
58 | ||
59 | static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc) | |
60 | { | |
61 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) | |
62 | return channel64_readq(dc, CHAR); | |
63 | else | |
64 | return channel64_readl(dc, CHAR); | |
65 | } | |
66 | ||
67 | static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) | |
68 | { | |
69 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) | |
70 | channel64_writeq(dc, CHAR, val); | |
71 | else | |
72 | channel64_writel(dc, CHAR, val); | |
73 | } | |
74 | ||
75 | static void channel64_clear_CHAR(const struct txx9dmac_chan *dc) | |
76 | { | |
77 | #if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) | |
78 | channel64_writel(dc, CHAR, 0); | |
79 | channel64_writel(dc, __pad_CHAR, 0); | |
80 | #else | |
81 | channel64_writeq(dc, CHAR, 0); | |
82 | #endif | |
83 | } | |
84 | ||
85 | static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc) | |
86 | { | |
87 | if (is_dmac64(dc)) | |
88 | return channel64_read_CHAR(dc); | |
89 | else | |
90 | return channel32_readl(dc, CHAR); | |
91 | } | |
92 | ||
93 | static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) | |
94 | { | |
95 | if (is_dmac64(dc)) | |
96 | channel64_write_CHAR(dc, val); | |
97 | else | |
98 | channel32_writel(dc, CHAR, val); | |
99 | } | |
100 | ||
101 | static struct txx9dmac_regs __iomem *__txx9dmac_regs( | |
102 | const struct txx9dmac_dev *ddev) | |
103 | { | |
104 | return ddev->regs; | |
105 | } | |
106 | ||
107 | static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32( | |
108 | const struct txx9dmac_dev *ddev) | |
109 | { | |
110 | return ddev->regs; | |
111 | } | |
112 | ||
113 | #define dma64_readl(ddev, name) \ | |
114 | __raw_readl(&(__txx9dmac_regs(ddev)->name)) | |
115 | #define dma64_writel(ddev, name, val) \ | |
116 | __raw_writel((val), &(__txx9dmac_regs(ddev)->name)) | |
117 | ||
118 | #define dma32_readl(ddev, name) \ | |
119 | __raw_readl(&(__txx9dmac_regs32(ddev)->name)) | |
120 | #define dma32_writel(ddev, name, val) \ | |
121 | __raw_writel((val), &(__txx9dmac_regs32(ddev)->name)) | |
122 | ||
123 | #define dma_readl(ddev, name) \ | |
124 | (__is_dmac64(ddev) ? \ | |
125 | dma64_readl(ddev, name) : dma32_readl(ddev, name)) | |
126 | #define dma_writel(ddev, name, val) \ | |
127 | (__is_dmac64(ddev) ? \ | |
128 | dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val)) | |
129 | ||
130 | static struct device *chan2dev(struct dma_chan *chan) | |
131 | { | |
132 | return &chan->dev->device; | |
133 | } | |
134 | static struct device *chan2parent(struct dma_chan *chan) | |
135 | { | |
136 | return chan->dev->device.parent; | |
137 | } | |
138 | ||
139 | static struct txx9dmac_desc * | |
140 | txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd) | |
141 | { | |
142 | return container_of(txd, struct txx9dmac_desc, txd); | |
143 | } | |
144 | ||
145 | static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc, | |
146 | const struct txx9dmac_desc *desc) | |
147 | { | |
148 | return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR; | |
149 | } | |
150 | ||
151 | static void desc_write_CHAR(const struct txx9dmac_chan *dc, | |
152 | struct txx9dmac_desc *desc, dma_addr_t val) | |
153 | { | |
154 | if (is_dmac64(dc)) | |
155 | desc->hwdesc.CHAR = val; | |
156 | else | |
157 | desc->hwdesc32.CHAR = val; | |
158 | } | |
159 | ||
160 | #define TXX9_DMA_MAX_COUNT 0x04000000 | |
161 | ||
162 | #define TXX9_DMA_INITIAL_DESC_COUNT 64 | |
163 | ||
164 | static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc) | |
165 | { | |
166 | return list_entry(dc->active_list.next, | |
167 | struct txx9dmac_desc, desc_node); | |
168 | } | |
169 | ||
170 | static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc) | |
171 | { | |
172 | return list_entry(dc->active_list.prev, | |
173 | struct txx9dmac_desc, desc_node); | |
174 | } | |
175 | ||
176 | static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc) | |
177 | { | |
178 | return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node); | |
179 | } | |
180 | ||
181 | static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) | |
182 | { | |
183 | if (!list_empty(&desc->txd.tx_list)) | |
184 | desc = list_entry(desc->txd.tx_list.prev, | |
185 | struct txx9dmac_desc, desc_node); | |
186 | return desc; | |
187 | } | |
188 | ||
189 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx); | |
190 | ||
191 | static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc, | |
192 | gfp_t flags) | |
193 | { | |
194 | struct txx9dmac_dev *ddev = dc->ddev; | |
195 | struct txx9dmac_desc *desc; | |
196 | ||
197 | desc = kzalloc(sizeof(*desc), flags); | |
198 | if (!desc) | |
199 | return NULL; | |
200 | dma_async_tx_descriptor_init(&desc->txd, &dc->chan); | |
201 | desc->txd.tx_submit = txx9dmac_tx_submit; | |
202 | /* txd.flags will be overwritten in prep funcs */ | |
203 | desc->txd.flags = DMA_CTRL_ACK; | |
204 | desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc, | |
205 | ddev->descsize, DMA_TO_DEVICE); | |
206 | return desc; | |
207 | } | |
208 | ||
209 | static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc) | |
210 | { | |
211 | struct txx9dmac_desc *desc, *_desc; | |
212 | struct txx9dmac_desc *ret = NULL; | |
213 | unsigned int i = 0; | |
214 | ||
215 | spin_lock_bh(&dc->lock); | |
216 | list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) { | |
217 | if (async_tx_test_ack(&desc->txd)) { | |
218 | list_del(&desc->desc_node); | |
219 | ret = desc; | |
220 | break; | |
221 | } | |
222 | dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc); | |
223 | i++; | |
224 | } | |
225 | spin_unlock_bh(&dc->lock); | |
226 | ||
227 | dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n", | |
228 | i); | |
229 | if (!ret) { | |
230 | ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC); | |
231 | if (ret) { | |
232 | spin_lock_bh(&dc->lock); | |
233 | dc->descs_allocated++; | |
234 | spin_unlock_bh(&dc->lock); | |
235 | } else | |
236 | dev_err(chan2dev(&dc->chan), | |
237 | "not enough descriptors available\n"); | |
238 | } | |
239 | return ret; | |
240 | } | |
241 | ||
242 | static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, | |
243 | struct txx9dmac_desc *desc) | |
244 | { | |
245 | struct txx9dmac_dev *ddev = dc->ddev; | |
246 | struct txx9dmac_desc *child; | |
247 | ||
248 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | |
249 | dma_sync_single_for_cpu(chan2parent(&dc->chan), | |
250 | child->txd.phys, ddev->descsize, | |
251 | DMA_TO_DEVICE); | |
252 | dma_sync_single_for_cpu(chan2parent(&dc->chan), | |
253 | desc->txd.phys, ddev->descsize, | |
254 | DMA_TO_DEVICE); | |
255 | } | |
256 | ||
257 | /* | |
258 | * Move a descriptor, including any children, to the free list. | |
259 | * `desc' must not be on any lists. | |
260 | */ | |
261 | static void txx9dmac_desc_put(struct txx9dmac_chan *dc, | |
262 | struct txx9dmac_desc *desc) | |
263 | { | |
264 | if (desc) { | |
265 | struct txx9dmac_desc *child; | |
266 | ||
267 | txx9dmac_sync_desc_for_cpu(dc, desc); | |
268 | ||
269 | spin_lock_bh(&dc->lock); | |
270 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | |
271 | dev_vdbg(chan2dev(&dc->chan), | |
272 | "moving child desc %p to freelist\n", | |
273 | child); | |
274 | list_splice_init(&desc->txd.tx_list, &dc->free_list); | |
275 | dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", | |
276 | desc); | |
277 | list_add(&desc->desc_node, &dc->free_list); | |
278 | spin_unlock_bh(&dc->lock); | |
279 | } | |
280 | } | |
281 | ||
282 | /* Called with dc->lock held and bh disabled */ | |
283 | static dma_cookie_t | |
284 | txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) | |
285 | { | |
286 | dma_cookie_t cookie = dc->chan.cookie; | |
287 | ||
288 | if (++cookie < 0) | |
289 | cookie = 1; | |
290 | ||
291 | dc->chan.cookie = cookie; | |
292 | desc->txd.cookie = cookie; | |
293 | ||
294 | return cookie; | |
295 | } | |
296 | ||
297 | /*----------------------------------------------------------------------*/ | |
298 | ||
299 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) | |
300 | { | |
301 | if (is_dmac64(dc)) | |
302 | dev_err(chan2dev(&dc->chan), | |
303 | " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x" | |
304 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", | |
305 | (u64)channel64_read_CHAR(dc), | |
306 | channel64_readq(dc, SAR), | |
307 | channel64_readq(dc, DAR), | |
308 | channel64_readl(dc, CNTR), | |
309 | channel64_readl(dc, SAIR), | |
310 | channel64_readl(dc, DAIR), | |
311 | channel64_readl(dc, CCR), | |
312 | channel64_readl(dc, CSR)); | |
313 | else | |
314 | dev_err(chan2dev(&dc->chan), | |
315 | " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x" | |
316 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", | |
317 | channel32_readl(dc, CHAR), | |
318 | channel32_readl(dc, SAR), | |
319 | channel32_readl(dc, DAR), | |
320 | channel32_readl(dc, CNTR), | |
321 | channel32_readl(dc, SAIR), | |
322 | channel32_readl(dc, DAIR), | |
323 | channel32_readl(dc, CCR), | |
324 | channel32_readl(dc, CSR)); | |
325 | } | |
326 | ||
327 | static void txx9dmac_reset_chan(struct txx9dmac_chan *dc) | |
328 | { | |
329 | channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST); | |
330 | if (is_dmac64(dc)) { | |
331 | channel64_clear_CHAR(dc); | |
332 | channel_writeq(dc, SAR, 0); | |
333 | channel_writeq(dc, DAR, 0); | |
334 | } else { | |
335 | channel_writel(dc, CHAR, 0); | |
336 | channel_writel(dc, SAR, 0); | |
337 | channel_writel(dc, DAR, 0); | |
338 | } | |
339 | channel_writel(dc, CNTR, 0); | |
340 | channel_writel(dc, SAIR, 0); | |
341 | channel_writel(dc, DAIR, 0); | |
342 | channel_writel(dc, CCR, 0); | |
343 | mmiowb(); | |
344 | } | |
345 | ||
346 | /* Called with dc->lock held and bh disabled */ | |
347 | static void txx9dmac_dostart(struct txx9dmac_chan *dc, | |
348 | struct txx9dmac_desc *first) | |
349 | { | |
350 | struct txx9dmac_slave *ds = dc->chan.private; | |
351 | u32 sai, dai; | |
352 | ||
353 | dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n", | |
354 | first->txd.cookie, first); | |
355 | /* ASSERT: channel is idle */ | |
356 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { | |
357 | dev_err(chan2dev(&dc->chan), | |
358 | "BUG: Attempted to start non-idle channel\n"); | |
359 | txx9dmac_dump_regs(dc); | |
360 | /* The tasklet will hopefully advance the queue... */ | |
361 | return; | |
362 | } | |
363 | ||
364 | if (is_dmac64(dc)) { | |
365 | channel64_writel(dc, CNTR, 0); | |
366 | channel64_writel(dc, CSR, 0xffffffff); | |
367 | if (ds) { | |
368 | if (ds->tx_reg) { | |
369 | sai = ds->reg_width; | |
370 | dai = 0; | |
371 | } else { | |
372 | sai = 0; | |
373 | dai = ds->reg_width; | |
374 | } | |
375 | } else { | |
376 | sai = 8; | |
377 | dai = 8; | |
378 | } | |
379 | channel64_writel(dc, SAIR, sai); | |
380 | channel64_writel(dc, DAIR, dai); | |
381 | /* All 64-bit DMAC supports SMPCHN */ | |
382 | channel64_writel(dc, CCR, dc->ccr); | |
383 | /* Writing a non zero value to CHAR will assert XFACT */ | |
384 | channel64_write_CHAR(dc, first->txd.phys); | |
385 | } else { | |
386 | channel32_writel(dc, CNTR, 0); | |
387 | channel32_writel(dc, CSR, 0xffffffff); | |
388 | if (ds) { | |
389 | if (ds->tx_reg) { | |
390 | sai = ds->reg_width; | |
391 | dai = 0; | |
392 | } else { | |
393 | sai = 0; | |
394 | dai = ds->reg_width; | |
395 | } | |
396 | } else { | |
397 | sai = 4; | |
398 | dai = 4; | |
399 | } | |
400 | channel32_writel(dc, SAIR, sai); | |
401 | channel32_writel(dc, DAIR, dai); | |
402 | if (txx9_dma_have_SMPCHN()) { | |
403 | channel32_writel(dc, CCR, dc->ccr); | |
404 | /* Writing a non zero value to CHAR will assert XFACT */ | |
405 | channel32_writel(dc, CHAR, first->txd.phys); | |
406 | } else { | |
407 | channel32_writel(dc, CHAR, first->txd.phys); | |
408 | channel32_writel(dc, CCR, dc->ccr); | |
409 | } | |
410 | } | |
411 | } | |
412 | ||
413 | /*----------------------------------------------------------------------*/ | |
414 | ||
415 | static void | |
416 | txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |
417 | struct txx9dmac_desc *desc) | |
418 | { | |
419 | dma_async_tx_callback callback; | |
420 | void *param; | |
421 | struct dma_async_tx_descriptor *txd = &desc->txd; | |
422 | struct txx9dmac_slave *ds = dc->chan.private; | |
423 | ||
424 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", | |
425 | txd->cookie, desc); | |
426 | ||
427 | dc->completed = txd->cookie; | |
428 | callback = txd->callback; | |
429 | param = txd->callback_param; | |
430 | ||
431 | txx9dmac_sync_desc_for_cpu(dc, desc); | |
432 | list_splice_init(&txd->tx_list, &dc->free_list); | |
433 | list_move(&desc->desc_node, &dc->free_list); | |
434 | ||
435 | /* | |
436 | * We use dma_unmap_page() regardless of how the buffers were | |
437 | * mapped before they were submitted... | |
438 | */ | |
439 | if (!ds) { | |
440 | dma_addr_t dmaaddr; | |
441 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | |
442 | dmaaddr = is_dmac64(dc) ? | |
443 | desc->hwdesc.DAR : desc->hwdesc32.DAR; | |
444 | dma_unmap_page(chan2parent(&dc->chan), dmaaddr, | |
445 | desc->len, DMA_FROM_DEVICE); | |
446 | } | |
447 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
448 | dmaaddr = is_dmac64(dc) ? | |
449 | desc->hwdesc.SAR : desc->hwdesc32.SAR; | |
450 | dma_unmap_page(chan2parent(&dc->chan), dmaaddr, | |
451 | desc->len, DMA_TO_DEVICE); | |
452 | } | |
453 | } | |
454 | ||
455 | /* | |
456 | * The API requires that no submissions are done from a | |
457 | * callback, so we don't need to drop the lock here | |
458 | */ | |
459 | if (callback) | |
460 | callback(param); | |
461 | dma_run_dependencies(txd); | |
462 | } | |
463 | ||
464 | static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list) | |
465 | { | |
466 | struct txx9dmac_dev *ddev = dc->ddev; | |
467 | struct txx9dmac_desc *desc; | |
468 | struct txx9dmac_desc *prev = NULL; | |
469 | ||
470 | BUG_ON(!list_empty(list)); | |
471 | do { | |
472 | desc = txx9dmac_first_queued(dc); | |
473 | if (prev) { | |
474 | desc_write_CHAR(dc, prev, desc->txd.phys); | |
475 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
476 | prev->txd.phys, ddev->descsize, | |
477 | DMA_TO_DEVICE); | |
478 | } | |
479 | prev = txx9dmac_last_child(desc); | |
480 | list_move_tail(&desc->desc_node, list); | |
481 | /* Make chain-completion interrupt happen */ | |
482 | if ((desc->txd.flags & DMA_PREP_INTERRUPT) && | |
483 | !txx9dmac_chan_INTENT(dc)) | |
484 | break; | |
485 | } while (!list_empty(&dc->queue)); | |
486 | } | |
487 | ||
488 | static void txx9dmac_complete_all(struct txx9dmac_chan *dc) | |
489 | { | |
490 | struct txx9dmac_desc *desc, *_desc; | |
491 | LIST_HEAD(list); | |
492 | ||
493 | /* | |
494 | * Submit queued descriptors ASAP, i.e. before we go through | |
495 | * the completed ones. | |
496 | */ | |
497 | list_splice_init(&dc->active_list, &list); | |
498 | if (!list_empty(&dc->queue)) { | |
499 | txx9dmac_dequeue(dc, &dc->active_list); | |
500 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | |
501 | } | |
502 | ||
503 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
504 | txx9dmac_descriptor_complete(dc, desc); | |
505 | } | |
506 | ||
507 | static void txx9dmac_dump_desc(struct txx9dmac_chan *dc, | |
508 | struct txx9dmac_hwdesc *desc) | |
509 | { | |
510 | if (is_dmac64(dc)) { | |
511 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN | |
512 | dev_crit(chan2dev(&dc->chan), | |
513 | " desc: ch%#llx s%#llx d%#llx c%#x\n", | |
514 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR); | |
515 | #else | |
516 | dev_crit(chan2dev(&dc->chan), | |
517 | " desc: ch%#llx s%#llx d%#llx c%#x" | |
518 | " si%#x di%#x cc%#x cs%#x\n", | |
519 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR, | |
520 | desc->SAIR, desc->DAIR, desc->CCR, desc->CSR); | |
521 | #endif | |
522 | } else { | |
523 | struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc; | |
524 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN | |
525 | dev_crit(chan2dev(&dc->chan), | |
526 | " desc: ch%#x s%#x d%#x c%#x\n", | |
527 | d->CHAR, d->SAR, d->DAR, d->CNTR); | |
528 | #else | |
529 | dev_crit(chan2dev(&dc->chan), | |
530 | " desc: ch%#x s%#x d%#x c%#x" | |
531 | " si%#x di%#x cc%#x cs%#x\n", | |
532 | d->CHAR, d->SAR, d->DAR, d->CNTR, | |
533 | d->SAIR, d->DAIR, d->CCR, d->CSR); | |
534 | #endif | |
535 | } | |
536 | } | |
537 | ||
538 | static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr) | |
539 | { | |
540 | struct txx9dmac_desc *bad_desc; | |
541 | struct txx9dmac_desc *child; | |
542 | u32 errors; | |
543 | ||
544 | /* | |
545 | * The descriptor currently at the head of the active list is | |
546 | * borked. Since we don't have any way to report errors, we'll | |
547 | * just have to scream loudly and try to carry on. | |
548 | */ | |
549 | dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n"); | |
550 | txx9dmac_dump_regs(dc); | |
551 | ||
552 | bad_desc = txx9dmac_first_active(dc); | |
553 | list_del_init(&bad_desc->desc_node); | |
554 | ||
555 | /* Clear all error flags and try to restart the controller */ | |
556 | errors = csr & (TXX9_DMA_CSR_ABCHC | | |
557 | TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR | | |
558 | TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR); | |
559 | channel_writel(dc, CSR, errors); | |
560 | ||
561 | if (list_empty(&dc->active_list) && !list_empty(&dc->queue)) | |
562 | txx9dmac_dequeue(dc, &dc->active_list); | |
563 | if (!list_empty(&dc->active_list)) | |
564 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | |
565 | ||
566 | dev_crit(chan2dev(&dc->chan), | |
567 | "Bad descriptor submitted for DMA! (cookie: %d)\n", | |
568 | bad_desc->txd.cookie); | |
569 | txx9dmac_dump_desc(dc, &bad_desc->hwdesc); | |
570 | list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) | |
571 | txx9dmac_dump_desc(dc, &child->hwdesc); | |
572 | /* Pretend the descriptor completed successfully */ | |
573 | txx9dmac_descriptor_complete(dc, bad_desc); | |
574 | } | |
575 | ||
576 | static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc) | |
577 | { | |
578 | dma_addr_t chain; | |
579 | struct txx9dmac_desc *desc, *_desc; | |
580 | struct txx9dmac_desc *child; | |
581 | u32 csr; | |
582 | ||
583 | if (is_dmac64(dc)) { | |
584 | chain = channel64_read_CHAR(dc); | |
585 | csr = channel64_readl(dc, CSR); | |
586 | channel64_writel(dc, CSR, csr); | |
587 | } else { | |
588 | chain = channel32_readl(dc, CHAR); | |
589 | csr = channel32_readl(dc, CSR); | |
590 | channel32_writel(dc, CSR, csr); | |
591 | } | |
592 | /* For dynamic chain, we should look at XFACT instead of NCHNC */ | |
593 | if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) { | |
594 | /* Everything we've submitted is done */ | |
595 | txx9dmac_complete_all(dc); | |
596 | return; | |
597 | } | |
598 | if (!(csr & TXX9_DMA_CSR_CHNEN)) | |
599 | chain = 0; /* last descriptor of this chain */ | |
600 | ||
601 | dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n", | |
602 | (u64)chain); | |
603 | ||
604 | list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) { | |
605 | if (desc_read_CHAR(dc, desc) == chain) { | |
606 | /* This one is currently in progress */ | |
607 | if (csr & TXX9_DMA_CSR_ABCHC) | |
608 | goto scan_done; | |
609 | return; | |
610 | } | |
611 | ||
612 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | |
613 | if (desc_read_CHAR(dc, child) == chain) { | |
614 | /* Currently in progress */ | |
615 | if (csr & TXX9_DMA_CSR_ABCHC) | |
616 | goto scan_done; | |
617 | return; | |
618 | } | |
619 | ||
620 | /* | |
621 | * No descriptors so far seem to be in progress, i.e. | |
622 | * this one must be done. | |
623 | */ | |
624 | txx9dmac_descriptor_complete(dc, desc); | |
625 | } | |
626 | scan_done: | |
627 | if (csr & TXX9_DMA_CSR_ABCHC) { | |
628 | txx9dmac_handle_error(dc, csr); | |
629 | return; | |
630 | } | |
631 | ||
632 | dev_err(chan2dev(&dc->chan), | |
633 | "BUG: All descriptors done, but channel not idle!\n"); | |
634 | ||
635 | /* Try to continue after resetting the channel... */ | |
636 | txx9dmac_reset_chan(dc); | |
637 | ||
638 | if (!list_empty(&dc->queue)) { | |
639 | txx9dmac_dequeue(dc, &dc->active_list); | |
640 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | |
641 | } | |
642 | } | |
643 | ||
644 | static void txx9dmac_chan_tasklet(unsigned long data) | |
645 | { | |
646 | int irq; | |
647 | u32 csr; | |
648 | struct txx9dmac_chan *dc; | |
649 | ||
650 | dc = (struct txx9dmac_chan *)data; | |
651 | csr = channel_readl(dc, CSR); | |
652 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr); | |
653 | ||
654 | spin_lock(&dc->lock); | |
655 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | | |
656 | TXX9_DMA_CSR_NTRNFC)) | |
657 | txx9dmac_scan_descriptors(dc); | |
658 | spin_unlock(&dc->lock); | |
659 | irq = dc->irq; | |
660 | ||
661 | enable_irq(irq); | |
662 | } | |
663 | ||
664 | static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id) | |
665 | { | |
666 | struct txx9dmac_chan *dc = dev_id; | |
667 | ||
668 | dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n", | |
669 | channel_readl(dc, CSR)); | |
670 | ||
671 | tasklet_schedule(&dc->tasklet); | |
672 | /* | |
673 | * Just disable the interrupts. We'll turn them back on in the | |
674 | * softirq handler. | |
675 | */ | |
676 | disable_irq_nosync(irq); | |
677 | ||
678 | return IRQ_HANDLED; | |
679 | } | |
680 | ||
681 | static void txx9dmac_tasklet(unsigned long data) | |
682 | { | |
683 | int irq; | |
684 | u32 csr; | |
685 | struct txx9dmac_chan *dc; | |
686 | ||
687 | struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data; | |
688 | u32 mcr; | |
689 | int i; | |
690 | ||
691 | mcr = dma_readl(ddev, MCR); | |
692 | dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr); | |
693 | for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) { | |
694 | if ((mcr >> (24 + i)) & 0x11) { | |
695 | dc = ddev->chan[i]; | |
696 | csr = channel_readl(dc, CSR); | |
697 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", | |
698 | csr); | |
699 | spin_lock(&dc->lock); | |
700 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | | |
701 | TXX9_DMA_CSR_NTRNFC)) | |
702 | txx9dmac_scan_descriptors(dc); | |
703 | spin_unlock(&dc->lock); | |
704 | } | |
705 | } | |
706 | irq = ddev->irq; | |
707 | ||
708 | enable_irq(irq); | |
709 | } | |
710 | ||
711 | static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id) | |
712 | { | |
713 | struct txx9dmac_dev *ddev = dev_id; | |
714 | ||
715 | dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n", | |
716 | dma_readl(ddev, MCR)); | |
717 | ||
718 | tasklet_schedule(&ddev->tasklet); | |
719 | /* | |
720 | * Just disable the interrupts. We'll turn them back on in the | |
721 | * softirq handler. | |
722 | */ | |
723 | disable_irq_nosync(irq); | |
724 | ||
725 | return IRQ_HANDLED; | |
726 | } | |
727 | ||
728 | /*----------------------------------------------------------------------*/ | |
729 | ||
730 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) | |
731 | { | |
732 | struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx); | |
733 | struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan); | |
734 | dma_cookie_t cookie; | |
735 | ||
736 | spin_lock_bh(&dc->lock); | |
737 | cookie = txx9dmac_assign_cookie(dc, desc); | |
738 | ||
739 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", | |
740 | desc->txd.cookie, desc); | |
741 | ||
742 | list_add_tail(&desc->desc_node, &dc->queue); | |
743 | spin_unlock_bh(&dc->lock); | |
744 | ||
745 | return cookie; | |
746 | } | |
747 | ||
748 | static struct dma_async_tx_descriptor * | |
749 | txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
750 | size_t len, unsigned long flags) | |
751 | { | |
752 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
753 | struct txx9dmac_dev *ddev = dc->ddev; | |
754 | struct txx9dmac_desc *desc; | |
755 | struct txx9dmac_desc *first; | |
756 | struct txx9dmac_desc *prev; | |
757 | size_t xfer_count; | |
758 | size_t offset; | |
759 | ||
760 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n", | |
761 | (u64)dest, (u64)src, len, flags); | |
762 | ||
763 | if (unlikely(!len)) { | |
764 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | |
765 | return NULL; | |
766 | } | |
767 | ||
768 | prev = first = NULL; | |
769 | ||
770 | for (offset = 0; offset < len; offset += xfer_count) { | |
771 | xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT); | |
772 | /* | |
773 | * Workaround for ERT-TX49H2-033, ERT-TX49H3-020, | |
774 | * ERT-TX49H4-016 (slightly conservative) | |
775 | */ | |
776 | if (__is_dmac64(ddev)) { | |
777 | if (xfer_count > 0x100 && | |
778 | (xfer_count & 0xff) >= 0xfa && | |
779 | (xfer_count & 0xff) <= 0xff) | |
780 | xfer_count -= 0x20; | |
781 | } else { | |
782 | if (xfer_count > 0x80 && | |
783 | (xfer_count & 0x7f) >= 0x7e && | |
784 | (xfer_count & 0x7f) <= 0x7f) | |
785 | xfer_count -= 0x20; | |
786 | } | |
787 | ||
788 | desc = txx9dmac_desc_get(dc); | |
789 | if (!desc) { | |
790 | txx9dmac_desc_put(dc, first); | |
791 | return NULL; | |
792 | } | |
793 | ||
794 | if (__is_dmac64(ddev)) { | |
795 | desc->hwdesc.SAR = src + offset; | |
796 | desc->hwdesc.DAR = dest + offset; | |
797 | desc->hwdesc.CNTR = xfer_count; | |
798 | txx9dmac_desc_set_nosimple(ddev, desc, 8, 8, | |
799 | dc->ccr | TXX9_DMA_CCR_XFACT); | |
800 | } else { | |
801 | desc->hwdesc32.SAR = src + offset; | |
802 | desc->hwdesc32.DAR = dest + offset; | |
803 | desc->hwdesc32.CNTR = xfer_count; | |
804 | txx9dmac_desc_set_nosimple(ddev, desc, 4, 4, | |
805 | dc->ccr | TXX9_DMA_CCR_XFACT); | |
806 | } | |
807 | ||
808 | /* | |
809 | * The descriptors on tx_list are not reachable from | |
810 | * the dc->queue list or dc->active_list after a | |
811 | * submit. If we put all descriptors on active_list, | |
812 | * calling of callback on the completion will be more | |
813 | * complex. | |
814 | */ | |
815 | if (!first) { | |
816 | first = desc; | |
817 | } else { | |
818 | desc_write_CHAR(dc, prev, desc->txd.phys); | |
819 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
820 | prev->txd.phys, ddev->descsize, | |
821 | DMA_TO_DEVICE); | |
822 | list_add_tail(&desc->desc_node, | |
823 | &first->txd.tx_list); | |
824 | } | |
825 | prev = desc; | |
826 | } | |
827 | ||
828 | /* Trigger interrupt after last block */ | |
829 | if (flags & DMA_PREP_INTERRUPT) | |
830 | txx9dmac_desc_set_INTENT(ddev, prev); | |
831 | ||
832 | desc_write_CHAR(dc, prev, 0); | |
833 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
834 | prev->txd.phys, ddev->descsize, | |
835 | DMA_TO_DEVICE); | |
836 | ||
837 | first->txd.flags = flags; | |
838 | first->len = len; | |
839 | ||
840 | return &first->txd; | |
841 | } | |
842 | ||
843 | static struct dma_async_tx_descriptor * | |
844 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
845 | unsigned int sg_len, enum dma_data_direction direction, | |
846 | unsigned long flags) | |
847 | { | |
848 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
849 | struct txx9dmac_dev *ddev = dc->ddev; | |
850 | struct txx9dmac_slave *ds = chan->private; | |
851 | struct txx9dmac_desc *prev; | |
852 | struct txx9dmac_desc *first; | |
853 | unsigned int i; | |
854 | struct scatterlist *sg; | |
855 | ||
856 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); | |
857 | ||
858 | BUG_ON(!ds || !ds->reg_width); | |
859 | if (ds->tx_reg) | |
860 | BUG_ON(direction != DMA_TO_DEVICE); | |
861 | else | |
862 | BUG_ON(direction != DMA_FROM_DEVICE); | |
863 | if (unlikely(!sg_len)) | |
864 | return NULL; | |
865 | ||
866 | prev = first = NULL; | |
867 | ||
868 | for_each_sg(sgl, sg, sg_len, i) { | |
869 | struct txx9dmac_desc *desc; | |
870 | dma_addr_t mem; | |
871 | u32 sai, dai; | |
872 | ||
873 | desc = txx9dmac_desc_get(dc); | |
874 | if (!desc) { | |
875 | txx9dmac_desc_put(dc, first); | |
876 | return NULL; | |
877 | } | |
878 | ||
879 | mem = sg_dma_address(sg); | |
880 | ||
881 | if (__is_dmac64(ddev)) { | |
882 | if (direction == DMA_TO_DEVICE) { | |
883 | desc->hwdesc.SAR = mem; | |
884 | desc->hwdesc.DAR = ds->tx_reg; | |
885 | } else { | |
886 | desc->hwdesc.SAR = ds->rx_reg; | |
887 | desc->hwdesc.DAR = mem; | |
888 | } | |
889 | desc->hwdesc.CNTR = sg_dma_len(sg); | |
890 | } else { | |
891 | if (direction == DMA_TO_DEVICE) { | |
892 | desc->hwdesc32.SAR = mem; | |
893 | desc->hwdesc32.DAR = ds->tx_reg; | |
894 | } else { | |
895 | desc->hwdesc32.SAR = ds->rx_reg; | |
896 | desc->hwdesc32.DAR = mem; | |
897 | } | |
898 | desc->hwdesc32.CNTR = sg_dma_len(sg); | |
899 | } | |
900 | if (direction == DMA_TO_DEVICE) { | |
901 | sai = ds->reg_width; | |
902 | dai = 0; | |
903 | } else { | |
904 | sai = 0; | |
905 | dai = ds->reg_width; | |
906 | } | |
907 | txx9dmac_desc_set_nosimple(ddev, desc, sai, dai, | |
908 | dc->ccr | TXX9_DMA_CCR_XFACT); | |
909 | ||
910 | if (!first) { | |
911 | first = desc; | |
912 | } else { | |
913 | desc_write_CHAR(dc, prev, desc->txd.phys); | |
914 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
915 | prev->txd.phys, | |
916 | ddev->descsize, | |
917 | DMA_TO_DEVICE); | |
918 | list_add_tail(&desc->desc_node, | |
919 | &first->txd.tx_list); | |
920 | } | |
921 | prev = desc; | |
922 | } | |
923 | ||
924 | /* Trigger interrupt after last block */ | |
925 | if (flags & DMA_PREP_INTERRUPT) | |
926 | txx9dmac_desc_set_INTENT(ddev, prev); | |
927 | ||
928 | desc_write_CHAR(dc, prev, 0); | |
929 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
930 | prev->txd.phys, ddev->descsize, | |
931 | DMA_TO_DEVICE); | |
932 | ||
933 | first->txd.flags = flags; | |
934 | first->len = 0; | |
935 | ||
936 | return &first->txd; | |
937 | } | |
938 | ||
939 | static void txx9dmac_terminate_all(struct dma_chan *chan) | |
940 | { | |
941 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
942 | struct txx9dmac_desc *desc, *_desc; | |
943 | LIST_HEAD(list); | |
944 | ||
945 | dev_vdbg(chan2dev(chan), "terminate_all\n"); | |
946 | spin_lock_bh(&dc->lock); | |
947 | ||
948 | txx9dmac_reset_chan(dc); | |
949 | ||
950 | /* active_list entries will end up before queued entries */ | |
951 | list_splice_init(&dc->queue, &list); | |
952 | list_splice_init(&dc->active_list, &list); | |
953 | ||
954 | spin_unlock_bh(&dc->lock); | |
955 | ||
956 | /* Flush all pending and queued descriptors */ | |
957 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
958 | txx9dmac_descriptor_complete(dc, desc); | |
959 | } | |
960 | ||
961 | static enum dma_status | |
962 | txx9dmac_is_tx_complete(struct dma_chan *chan, | |
963 | dma_cookie_t cookie, | |
964 | dma_cookie_t *done, dma_cookie_t *used) | |
965 | { | |
966 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
967 | dma_cookie_t last_used; | |
968 | dma_cookie_t last_complete; | |
969 | int ret; | |
970 | ||
971 | last_complete = dc->completed; | |
972 | last_used = chan->cookie; | |
973 | ||
974 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
975 | if (ret != DMA_SUCCESS) { | |
976 | spin_lock_bh(&dc->lock); | |
977 | txx9dmac_scan_descriptors(dc); | |
978 | spin_unlock_bh(&dc->lock); | |
979 | ||
980 | last_complete = dc->completed; | |
981 | last_used = chan->cookie; | |
982 | ||
983 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
984 | } | |
985 | ||
986 | if (done) | |
987 | *done = last_complete; | |
988 | if (used) | |
989 | *used = last_used; | |
990 | ||
991 | return ret; | |
992 | } | |
993 | ||
994 | static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, | |
995 | struct txx9dmac_desc *prev) | |
996 | { | |
997 | struct txx9dmac_dev *ddev = dc->ddev; | |
998 | struct txx9dmac_desc *desc; | |
999 | LIST_HEAD(list); | |
1000 | ||
1001 | prev = txx9dmac_last_child(prev); | |
1002 | txx9dmac_dequeue(dc, &list); | |
1003 | desc = list_entry(list.next, struct txx9dmac_desc, desc_node); | |
1004 | desc_write_CHAR(dc, prev, desc->txd.phys); | |
1005 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
1006 | prev->txd.phys, ddev->descsize, | |
1007 | DMA_TO_DEVICE); | |
1008 | mmiowb(); | |
1009 | if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) && | |
1010 | channel_read_CHAR(dc) == prev->txd.phys) | |
1011 | /* Restart chain DMA */ | |
1012 | channel_write_CHAR(dc, desc->txd.phys); | |
1013 | list_splice_tail(&list, &dc->active_list); | |
1014 | } | |
1015 | ||
1016 | static void txx9dmac_issue_pending(struct dma_chan *chan) | |
1017 | { | |
1018 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
1019 | ||
1020 | spin_lock_bh(&dc->lock); | |
1021 | ||
1022 | if (!list_empty(&dc->active_list)) | |
1023 | txx9dmac_scan_descriptors(dc); | |
1024 | if (!list_empty(&dc->queue)) { | |
1025 | if (list_empty(&dc->active_list)) { | |
1026 | txx9dmac_dequeue(dc, &dc->active_list); | |
1027 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | |
1028 | } else if (txx9_dma_have_SMPCHN()) { | |
1029 | struct txx9dmac_desc *prev = txx9dmac_last_active(dc); | |
1030 | ||
1031 | if (!(prev->txd.flags & DMA_PREP_INTERRUPT) || | |
1032 | txx9dmac_chan_INTENT(dc)) | |
1033 | txx9dmac_chain_dynamic(dc, prev); | |
1034 | } | |
1035 | } | |
1036 | ||
1037 | spin_unlock_bh(&dc->lock); | |
1038 | } | |
1039 | ||
1040 | static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) | |
1041 | { | |
1042 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
1043 | struct txx9dmac_slave *ds = chan->private; | |
1044 | struct txx9dmac_desc *desc; | |
1045 | int i; | |
1046 | ||
1047 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | |
1048 | ||
1049 | /* ASSERT: channel is idle */ | |
1050 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { | |
1051 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); | |
1052 | return -EIO; | |
1053 | } | |
1054 | ||
1055 | dc->completed = chan->cookie = 1; | |
1056 | ||
1057 | dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; | |
1058 | txx9dmac_chan_set_SMPCHN(dc); | |
1059 | if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN)) | |
1060 | dc->ccr |= TXX9_DMA_CCR_INTENC; | |
1061 | if (chan->device->device_prep_dma_memcpy) { | |
1062 | if (ds) | |
1063 | return -EINVAL; | |
1064 | dc->ccr |= TXX9_DMA_CCR_XFSZ_X8; | |
1065 | } else { | |
1066 | if (!ds || | |
1067 | (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg)) | |
1068 | return -EINVAL; | |
1069 | dc->ccr |= TXX9_DMA_CCR_EXTRQ | | |
1070 | TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width)); | |
1071 | txx9dmac_chan_set_INTENT(dc); | |
1072 | } | |
1073 | ||
1074 | spin_lock_bh(&dc->lock); | |
1075 | i = dc->descs_allocated; | |
1076 | while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) { | |
1077 | spin_unlock_bh(&dc->lock); | |
1078 | ||
1079 | desc = txx9dmac_desc_alloc(dc, GFP_KERNEL); | |
1080 | if (!desc) { | |
1081 | dev_info(chan2dev(chan), | |
1082 | "only allocated %d descriptors\n", i); | |
1083 | spin_lock_bh(&dc->lock); | |
1084 | break; | |
1085 | } | |
1086 | txx9dmac_desc_put(dc, desc); | |
1087 | ||
1088 | spin_lock_bh(&dc->lock); | |
1089 | i = ++dc->descs_allocated; | |
1090 | } | |
1091 | spin_unlock_bh(&dc->lock); | |
1092 | ||
1093 | dev_dbg(chan2dev(chan), | |
1094 | "alloc_chan_resources allocated %d descriptors\n", i); | |
1095 | ||
1096 | return i; | |
1097 | } | |
1098 | ||
1099 | static void txx9dmac_free_chan_resources(struct dma_chan *chan) | |
1100 | { | |
1101 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
1102 | struct txx9dmac_dev *ddev = dc->ddev; | |
1103 | struct txx9dmac_desc *desc, *_desc; | |
1104 | LIST_HEAD(list); | |
1105 | ||
1106 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", | |
1107 | dc->descs_allocated); | |
1108 | ||
1109 | /* ASSERT: channel is idle */ | |
1110 | BUG_ON(!list_empty(&dc->active_list)); | |
1111 | BUG_ON(!list_empty(&dc->queue)); | |
1112 | BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT); | |
1113 | ||
1114 | spin_lock_bh(&dc->lock); | |
1115 | list_splice_init(&dc->free_list, &list); | |
1116 | dc->descs_allocated = 0; | |
1117 | spin_unlock_bh(&dc->lock); | |
1118 | ||
1119 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | |
1120 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | |
1121 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | |
1122 | ddev->descsize, DMA_TO_DEVICE); | |
1123 | kfree(desc); | |
1124 | } | |
1125 | ||
1126 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); | |
1127 | } | |
1128 | ||
1129 | /*----------------------------------------------------------------------*/ | |
1130 | ||
1131 | static void txx9dmac_off(struct txx9dmac_dev *ddev) | |
1132 | { | |
1133 | dma_writel(ddev, MCR, 0); | |
1134 | mmiowb(); | |
1135 | } | |
1136 | ||
1137 | static int __init txx9dmac_chan_probe(struct platform_device *pdev) | |
1138 | { | |
1139 | struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data; | |
1140 | struct platform_device *dmac_dev = cpdata->dmac_dev; | |
1141 | struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data; | |
1142 | struct txx9dmac_chan *dc; | |
1143 | int err; | |
1144 | int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS; | |
1145 | int irq; | |
1146 | ||
1147 | dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); | |
1148 | if (!dc) | |
1149 | return -ENOMEM; | |
1150 | ||
1151 | dc->dma.dev = &pdev->dev; | |
1152 | dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; | |
1153 | dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; | |
1154 | dc->dma.device_terminate_all = txx9dmac_terminate_all; | |
1155 | dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete; | |
1156 | dc->dma.device_issue_pending = txx9dmac_issue_pending; | |
1157 | if (pdata && pdata->memcpy_chan == ch) { | |
1158 | dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; | |
1159 | dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask); | |
1160 | } else { | |
1161 | dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg; | |
1162 | dma_cap_set(DMA_SLAVE, dc->dma.cap_mask); | |
1163 | dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask); | |
1164 | } | |
1165 | ||
1166 | INIT_LIST_HEAD(&dc->dma.channels); | |
1167 | dc->ddev = platform_get_drvdata(dmac_dev); | |
1168 | if (dc->ddev->irq < 0) { | |
1169 | irq = platform_get_irq(pdev, 0); | |
1170 | if (irq < 0) | |
1171 | return irq; | |
1172 | tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet, | |
1173 | (unsigned long)dc); | |
1174 | dc->irq = irq; | |
1175 | err = devm_request_irq(&pdev->dev, dc->irq, | |
1176 | txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc); | |
1177 | if (err) | |
1178 | return err; | |
1179 | } else | |
1180 | dc->irq = -1; | |
1181 | dc->ddev->chan[ch] = dc; | |
1182 | dc->chan.device = &dc->dma; | |
1183 | list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); | |
1184 | dc->chan.cookie = dc->completed = 1; | |
1185 | ||
1186 | if (is_dmac64(dc)) | |
1187 | dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; | |
1188 | else | |
1189 | dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch]; | |
1190 | spin_lock_init(&dc->lock); | |
1191 | ||
1192 | INIT_LIST_HEAD(&dc->active_list); | |
1193 | INIT_LIST_HEAD(&dc->queue); | |
1194 | INIT_LIST_HEAD(&dc->free_list); | |
1195 | ||
1196 | txx9dmac_reset_chan(dc); | |
1197 | ||
1198 | platform_set_drvdata(pdev, dc); | |
1199 | ||
1200 | err = dma_async_device_register(&dc->dma); | |
1201 | if (err) | |
1202 | return err; | |
1203 | dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n", | |
1204 | dc->dma.dev_id, | |
1205 | dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "", | |
1206 | dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : ""); | |
1207 | ||
1208 | return 0; | |
1209 | } | |
1210 | ||
1211 | static int __exit txx9dmac_chan_remove(struct platform_device *pdev) | |
1212 | { | |
1213 | struct txx9dmac_chan *dc = platform_get_drvdata(pdev); | |
1214 | ||
1215 | dma_async_device_unregister(&dc->dma); | |
1216 | if (dc->irq >= 0) | |
1217 | tasklet_kill(&dc->tasklet); | |
1218 | dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; | |
1219 | return 0; | |
1220 | } | |
1221 | ||
1222 | static int __init txx9dmac_probe(struct platform_device *pdev) | |
1223 | { | |
1224 | struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; | |
1225 | struct resource *io; | |
1226 | struct txx9dmac_dev *ddev; | |
1227 | u32 mcr; | |
1228 | int err; | |
1229 | ||
1230 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1231 | if (!io) | |
1232 | return -EINVAL; | |
1233 | ||
1234 | ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL); | |
1235 | if (!ddev) | |
1236 | return -ENOMEM; | |
1237 | ||
1238 | if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), | |
1239 | dev_name(&pdev->dev))) | |
1240 | return -EBUSY; | |
1241 | ||
1242 | ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io)); | |
1243 | if (!ddev->regs) | |
1244 | return -ENOMEM; | |
1245 | ddev->have_64bit_regs = pdata->have_64bit_regs; | |
1246 | if (__is_dmac64(ddev)) | |
1247 | ddev->descsize = sizeof(struct txx9dmac_hwdesc); | |
1248 | else | |
1249 | ddev->descsize = sizeof(struct txx9dmac_hwdesc32); | |
1250 | ||
1251 | /* force dma off, just in case */ | |
1252 | txx9dmac_off(ddev); | |
1253 | ||
1254 | ddev->irq = platform_get_irq(pdev, 0); | |
1255 | if (ddev->irq >= 0) { | |
1256 | tasklet_init(&ddev->tasklet, txx9dmac_tasklet, | |
1257 | (unsigned long)ddev); | |
1258 | err = devm_request_irq(&pdev->dev, ddev->irq, | |
1259 | txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev); | |
1260 | if (err) | |
1261 | return err; | |
1262 | } | |
1263 | ||
1264 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; | |
1265 | if (pdata && pdata->memcpy_chan >= 0) | |
1266 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); | |
1267 | dma_writel(ddev, MCR, mcr); | |
1268 | ||
1269 | platform_set_drvdata(pdev, ddev); | |
1270 | return 0; | |
1271 | } | |
1272 | ||
1273 | static int __exit txx9dmac_remove(struct platform_device *pdev) | |
1274 | { | |
1275 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | |
1276 | ||
1277 | txx9dmac_off(ddev); | |
1278 | if (ddev->irq >= 0) | |
1279 | tasklet_kill(&ddev->tasklet); | |
1280 | return 0; | |
1281 | } | |
1282 | ||
1283 | static void txx9dmac_shutdown(struct platform_device *pdev) | |
1284 | { | |
1285 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | |
1286 | ||
1287 | txx9dmac_off(ddev); | |
1288 | } | |
1289 | ||
1290 | static int txx9dmac_suspend_late(struct platform_device *pdev, | |
1291 | pm_message_t mesg) | |
1292 | { | |
1293 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | |
1294 | ||
1295 | txx9dmac_off(ddev); | |
1296 | return 0; | |
1297 | } | |
1298 | ||
1299 | static int txx9dmac_resume_early(struct platform_device *pdev) | |
1300 | { | |
1301 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | |
1302 | struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; | |
1303 | u32 mcr; | |
1304 | ||
1305 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; | |
1306 | if (pdata && pdata->memcpy_chan >= 0) | |
1307 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); | |
1308 | dma_writel(ddev, MCR, mcr); | |
1309 | return 0; | |
1310 | ||
1311 | } | |
1312 | ||
1313 | static struct platform_driver txx9dmac_chan_driver = { | |
1314 | .remove = __exit_p(txx9dmac_chan_remove), | |
1315 | .driver = { | |
1316 | .name = "txx9dmac-chan", | |
1317 | }, | |
1318 | }; | |
1319 | ||
1320 | static struct platform_driver txx9dmac_driver = { | |
1321 | .remove = __exit_p(txx9dmac_remove), | |
1322 | .shutdown = txx9dmac_shutdown, | |
1323 | .suspend_late = txx9dmac_suspend_late, | |
1324 | .resume_early = txx9dmac_resume_early, | |
1325 | .driver = { | |
1326 | .name = "txx9dmac", | |
1327 | }, | |
1328 | }; | |
1329 | ||
1330 | static int __init txx9dmac_init(void) | |
1331 | { | |
1332 | int rc; | |
1333 | ||
1334 | rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe); | |
1335 | if (!rc) { | |
1336 | rc = platform_driver_probe(&txx9dmac_chan_driver, | |
1337 | txx9dmac_chan_probe); | |
1338 | if (rc) | |
1339 | platform_driver_unregister(&txx9dmac_driver); | |
1340 | } | |
1341 | return rc; | |
1342 | } | |
1343 | module_init(txx9dmac_init); | |
1344 | ||
1345 | static void __exit txx9dmac_exit(void) | |
1346 | { | |
1347 | platform_driver_unregister(&txx9dmac_chan_driver); | |
1348 | platform_driver_unregister(&txx9dmac_driver); | |
1349 | } | |
1350 | module_exit(txx9dmac_exit); | |
1351 | ||
1352 | MODULE_LICENSE("GPL"); | |
1353 | MODULE_DESCRIPTION("TXx9 DMA Controller driver"); | |
1354 | MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); |