Commit | Line | Data |
---|---|---|
dc78baa2 NF |
1 | /* |
2 | * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) | |
3 | * | |
4 | * Copyright (C) 2008 Atmel Corporation | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * | |
9102d871 NF |
12 | * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. |
13 | * The only Atmel DMA Controller that is not covered by this driver is the one | |
14 | * found on AT91SAM9263. | |
dc78baa2 NF |
15 | */ |
16 | ||
62971b29 | 17 | #include <dt-bindings/dma/at91.h> |
dc78baa2 NF |
18 | #include <linux/clk.h> |
19 | #include <linux/dmaengine.h> | |
20 | #include <linux/dma-mapping.h> | |
21 | #include <linux/dmapool.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/platform_device.h> | |
5a0e3ad6 | 25 | #include <linux/slab.h> |
c5115953 NF |
26 | #include <linux/of.h> |
27 | #include <linux/of_device.h> | |
bbe89c8e | 28 | #include <linux/of_dma.h> |
dc78baa2 NF |
29 | |
30 | #include "at_hdmac_regs.h" | |
d2ebfb33 | 31 | #include "dmaengine.h" |
dc78baa2 NF |
32 | |
33 | /* | |
34 | * Glossary | |
35 | * -------- | |
36 | * | |
37 | * at_hdmac : Name of the ATmel AHB DMA Controller | |
38 | * at_dma_ / atdma : ATmel DMA controller entity related | |
39 | * atc_ / atchan : ATmel DMA Channel entity related | |
40 | */ | |
41 | ||
42 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) | |
ae14d4b5 NF |
43 | #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ |
44 | |ATC_DIF(AT_DMA_MEM_IF)) | |
816070ed LD |
45 | #define ATC_DMA_BUSWIDTHS\ |
46 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ | |
47 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ | |
48 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ | |
49 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | |
dc78baa2 NF |
50 | |
51 | /* | |
52 | * Initial number of descriptors to allocate for each channel. This could | |
53 | * be increased during dma usage. | |
54 | */ | |
55 | static unsigned int init_nr_desc_per_channel = 64; | |
56 | module_param(init_nr_desc_per_channel, uint, 0644); | |
57 | MODULE_PARM_DESC(init_nr_desc_per_channel, | |
58 | "initial descriptors per channel (default: 64)"); | |
59 | ||
60 | ||
61 | /* prototypes */ | |
62 | static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); | |
d48de6f1 | 63 | static void atc_issue_pending(struct dma_chan *chan); |
dc78baa2 NF |
64 | |
65 | ||
66 | /*----------------------------------------------------------------------*/ | |
67 | ||
265567fb TF |
68 | static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst, |
69 | size_t len) | |
70 | { | |
71 | unsigned int width; | |
72 | ||
73 | if (!((src | dst | len) & 3)) | |
74 | width = 2; | |
75 | else if (!((src | dst | len) & 1)) | |
76 | width = 1; | |
77 | else | |
78 | width = 0; | |
79 | ||
80 | return width; | |
81 | } | |
82 | ||
dc78baa2 NF |
83 | static struct at_desc *atc_first_active(struct at_dma_chan *atchan) |
84 | { | |
85 | return list_first_entry(&atchan->active_list, | |
86 | struct at_desc, desc_node); | |
87 | } | |
88 | ||
89 | static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) | |
90 | { | |
91 | return list_first_entry(&atchan->queue, | |
92 | struct at_desc, desc_node); | |
93 | } | |
94 | ||
95 | /** | |
421f91d2 | 96 | * atc_alloc_descriptor - allocate and return an initialized descriptor |
dc78baa2 NF |
97 | * @chan: the channel to allocate descriptors for |
98 | * @gfp_flags: GFP allocation flags | |
99 | * | |
100 | * Note: The ack-bit is positioned in the descriptor flag at creation time | |
101 | * to make initial allocation more convenient. This bit will be cleared | |
102 | * and control will be given to client at usage time (during | |
103 | * preparation functions). | |
104 | */ | |
105 | static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, | |
106 | gfp_t gfp_flags) | |
107 | { | |
108 | struct at_desc *desc = NULL; | |
109 | struct at_dma *atdma = to_at_dma(chan->device); | |
110 | dma_addr_t phys; | |
111 | ||
112 | desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); | |
113 | if (desc) { | |
114 | memset(desc, 0, sizeof(struct at_desc)); | |
285a3c71 | 115 | INIT_LIST_HEAD(&desc->tx_list); |
dc78baa2 NF |
116 | dma_async_tx_descriptor_init(&desc->txd, chan); |
117 | /* txd.flags will be overwritten in prep functions */ | |
118 | desc->txd.flags = DMA_CTRL_ACK; | |
119 | desc->txd.tx_submit = atc_tx_submit; | |
120 | desc->txd.phys = phys; | |
121 | } | |
122 | ||
123 | return desc; | |
124 | } | |
125 | ||
126 | /** | |
af901ca1 | 127 | * atc_desc_get - get an unused descriptor from free_list |
dc78baa2 NF |
128 | * @atchan: channel we want a new descriptor for |
129 | */ | |
130 | static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) | |
131 | { | |
132 | struct at_desc *desc, *_desc; | |
133 | struct at_desc *ret = NULL; | |
d8cb04b0 | 134 | unsigned long flags; |
dc78baa2 NF |
135 | unsigned int i = 0; |
136 | LIST_HEAD(tmp_list); | |
137 | ||
d8cb04b0 | 138 | spin_lock_irqsave(&atchan->lock, flags); |
dc78baa2 NF |
139 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { |
140 | i++; | |
141 | if (async_tx_test_ack(&desc->txd)) { | |
142 | list_del(&desc->desc_node); | |
143 | ret = desc; | |
144 | break; | |
145 | } | |
146 | dev_dbg(chan2dev(&atchan->chan_common), | |
147 | "desc %p not ACKed\n", desc); | |
148 | } | |
d8cb04b0 | 149 | spin_unlock_irqrestore(&atchan->lock, flags); |
dc78baa2 NF |
150 | dev_vdbg(chan2dev(&atchan->chan_common), |
151 | "scanned %u descriptors on freelist\n", i); | |
152 | ||
153 | /* no more descriptor available in initial pool: create one more */ | |
154 | if (!ret) { | |
155 | ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); | |
156 | if (ret) { | |
d8cb04b0 | 157 | spin_lock_irqsave(&atchan->lock, flags); |
dc78baa2 | 158 | atchan->descs_allocated++; |
d8cb04b0 | 159 | spin_unlock_irqrestore(&atchan->lock, flags); |
dc78baa2 NF |
160 | } else { |
161 | dev_err(chan2dev(&atchan->chan_common), | |
162 | "not enough descriptors available\n"); | |
163 | } | |
164 | } | |
165 | ||
166 | return ret; | |
167 | } | |
168 | ||
169 | /** | |
170 | * atc_desc_put - move a descriptor, including any children, to the free list | |
171 | * @atchan: channel we work on | |
172 | * @desc: descriptor, at the head of a chain, to move to free list | |
173 | */ | |
174 | static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) | |
175 | { | |
176 | if (desc) { | |
177 | struct at_desc *child; | |
d8cb04b0 | 178 | unsigned long flags; |
dc78baa2 | 179 | |
d8cb04b0 | 180 | spin_lock_irqsave(&atchan->lock, flags); |
285a3c71 | 181 | list_for_each_entry(child, &desc->tx_list, desc_node) |
dc78baa2 NF |
182 | dev_vdbg(chan2dev(&atchan->chan_common), |
183 | "moving child desc %p to freelist\n", | |
184 | child); | |
285a3c71 | 185 | list_splice_init(&desc->tx_list, &atchan->free_list); |
dc78baa2 NF |
186 | dev_vdbg(chan2dev(&atchan->chan_common), |
187 | "moving desc %p to freelist\n", desc); | |
188 | list_add(&desc->desc_node, &atchan->free_list); | |
d8cb04b0 | 189 | spin_unlock_irqrestore(&atchan->lock, flags); |
dc78baa2 NF |
190 | } |
191 | } | |
192 | ||
53830cc7 | 193 | /** |
d73111c6 MI |
194 | * atc_desc_chain - build chain adding a descriptor |
195 | * @first: address of first descriptor of the chain | |
196 | * @prev: address of previous descriptor of the chain | |
53830cc7 NF |
197 | * @desc: descriptor to queue |
198 | * | |
199 | * Called from prep_* functions | |
200 | */ | |
201 | static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, | |
202 | struct at_desc *desc) | |
203 | { | |
204 | if (!(*first)) { | |
205 | *first = desc; | |
206 | } else { | |
207 | /* inform the HW lli about chaining */ | |
208 | (*prev)->lli.dscr = desc->txd.phys; | |
209 | /* insert the link descriptor to the LD ring */ | |
210 | list_add_tail(&desc->desc_node, | |
211 | &(*first)->tx_list); | |
212 | } | |
213 | *prev = desc; | |
214 | } | |
215 | ||
dc78baa2 NF |
216 | /** |
217 | * atc_dostart - starts the DMA engine for real | |
218 | * @atchan: the channel we want to start | |
219 | * @first: first descriptor in the list we want to begin with | |
220 | * | |
221 | * Called with atchan->lock held and bh disabled | |
222 | */ | |
223 | static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) | |
224 | { | |
225 | struct at_dma *atdma = to_at_dma(atchan->chan_common.device); | |
226 | ||
227 | /* ASSERT: channel is idle */ | |
228 | if (atc_chan_is_enabled(atchan)) { | |
229 | dev_err(chan2dev(&atchan->chan_common), | |
230 | "BUG: Attempted to start non-idle channel\n"); | |
231 | dev_err(chan2dev(&atchan->chan_common), | |
232 | " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", | |
233 | channel_readl(atchan, SADDR), | |
234 | channel_readl(atchan, DADDR), | |
235 | channel_readl(atchan, CTRLA), | |
236 | channel_readl(atchan, CTRLB), | |
237 | channel_readl(atchan, DSCR)); | |
238 | ||
239 | /* The tasklet will hopefully advance the queue... */ | |
240 | return; | |
241 | } | |
242 | ||
243 | vdbg_dump_regs(atchan); | |
244 | ||
dc78baa2 NF |
245 | channel_writel(atchan, SADDR, 0); |
246 | channel_writel(atchan, DADDR, 0); | |
247 | channel_writel(atchan, CTRLA, 0); | |
248 | channel_writel(atchan, CTRLB, 0); | |
249 | channel_writel(atchan, DSCR, first->txd.phys); | |
250 | dma_writel(atdma, CHER, atchan->mask); | |
251 | ||
252 | vdbg_dump_regs(atchan); | |
253 | } | |
254 | ||
d48de6f1 | 255 | /* |
bdf6c792 TF |
256 | * atc_get_desc_by_cookie - get the descriptor of a cookie |
257 | * @atchan: the DMA channel | |
258 | * @cookie: the cookie to get the descriptor for | |
d48de6f1 | 259 | */ |
bdf6c792 TF |
260 | static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, |
261 | dma_cookie_t cookie) | |
d48de6f1 | 262 | { |
bdf6c792 | 263 | struct at_desc *desc, *_desc; |
d48de6f1 | 264 | |
bdf6c792 TF |
265 | list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) { |
266 | if (desc->txd.cookie == cookie) | |
267 | return desc; | |
268 | } | |
d48de6f1 | 269 | |
bdf6c792 TF |
270 | list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { |
271 | if (desc->txd.cookie == cookie) | |
272 | return desc; | |
d48de6f1 ES |
273 | } |
274 | ||
bdf6c792 | 275 | return NULL; |
d48de6f1 ES |
276 | } |
277 | ||
bdf6c792 TF |
278 | /** |
279 | * atc_calc_bytes_left - calculates the number of bytes left according to the | |
280 | * value read from CTRLA. | |
281 | * | |
282 | * @current_len: the number of bytes left before reading CTRLA | |
283 | * @ctrla: the value of CTRLA | |
284 | * @desc: the descriptor containing the transfer width | |
285 | */ | |
286 | static inline int atc_calc_bytes_left(int current_len, u32 ctrla, | |
287 | struct at_desc *desc) | |
288 | { | |
289 | return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); | |
290 | } | |
291 | ||
292 | /** | |
293 | * atc_calc_bytes_left_from_reg - calculates the number of bytes left according | |
294 | * to the current value of CTRLA. | |
295 | * | |
296 | * @current_len: the number of bytes left before reading CTRLA | |
297 | * @atchan: the channel to read CTRLA for | |
298 | * @desc: the descriptor containing the transfer width | |
299 | */ | |
300 | static inline int atc_calc_bytes_left_from_reg(int current_len, | |
301 | struct at_dma_chan *atchan, struct at_desc *desc) | |
302 | { | |
303 | u32 ctrla = channel_readl(atchan, CTRLA); | |
304 | ||
305 | return atc_calc_bytes_left(current_len, ctrla, desc); | |
306 | } | |
307 | ||
308 | /** | |
309 | * atc_get_bytes_left - get the number of bytes residue for a cookie | |
310 | * @chan: DMA channel | |
311 | * @cookie: transaction identifier to check status of | |
d48de6f1 | 312 | */ |
bdf6c792 | 313 | static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) |
d48de6f1 ES |
314 | { |
315 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
d48de6f1 | 316 | struct at_desc *desc_first = atc_first_active(atchan); |
bdf6c792 TF |
317 | struct at_desc *desc; |
318 | int ret; | |
319 | u32 ctrla, dscr; | |
d48de6f1 ES |
320 | |
321 | /* | |
bdf6c792 TF |
322 | * If the cookie doesn't match to the currently running transfer then |
323 | * we can return the total length of the associated DMA transfer, | |
324 | * because it is still queued. | |
d48de6f1 | 325 | */ |
bdf6c792 TF |
326 | desc = atc_get_desc_by_cookie(atchan, cookie); |
327 | if (desc == NULL) | |
328 | return -EINVAL; | |
329 | else if (desc != desc_first) | |
330 | return desc->total_len; | |
d48de6f1 | 331 | |
bdf6c792 TF |
332 | /* cookie matches to the currently running transfer */ |
333 | ret = desc_first->total_len; | |
6758ddaf | 334 | |
bdf6c792 TF |
335 | if (desc_first->lli.dscr) { |
336 | /* hardware linked list transfer */ | |
337 | ||
338 | /* | |
339 | * Calculate the residue by removing the length of the child | |
340 | * descriptors already transferred from the total length. | |
341 | * To get the current child descriptor we can use the value of | |
342 | * the channel's DSCR register and compare it against the value | |
343 | * of the hardware linked list structure of each child | |
344 | * descriptor. | |
345 | */ | |
346 | ||
347 | ctrla = channel_readl(atchan, CTRLA); | |
348 | rmb(); /* ensure CTRLA is read before DSCR */ | |
349 | dscr = channel_readl(atchan, DSCR); | |
350 | ||
351 | /* for the first descriptor we can be more accurate */ | |
352 | if (desc_first->lli.dscr == dscr) | |
353 | return atc_calc_bytes_left(ret, ctrla, desc_first); | |
354 | ||
355 | ret -= desc_first->len; | |
356 | list_for_each_entry(desc, &desc_first->tx_list, desc_node) { | |
357 | if (desc->lli.dscr == dscr) | |
358 | break; | |
359 | ||
360 | ret -= desc->len; | |
c3dbc60c | 361 | } |
6758ddaf | 362 | |
d48de6f1 | 363 | /* |
bdf6c792 TF |
364 | * For the last descriptor in the chain we can calculate |
365 | * the remaining bytes using the channel's register. | |
366 | * Note that the transfer width of the first and last | |
367 | * descriptor may differ. | |
d48de6f1 | 368 | */ |
bdf6c792 TF |
369 | if (!desc->lli.dscr) |
370 | ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); | |
371 | } else { | |
372 | /* single transfer */ | |
373 | ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); | |
d48de6f1 | 374 | } |
d48de6f1 | 375 | |
d48de6f1 ES |
376 | return ret; |
377 | } | |
378 | ||
dc78baa2 NF |
379 | /** |
380 | * atc_chain_complete - finish work for one transaction chain | |
381 | * @atchan: channel we work on | |
382 | * @desc: descriptor at the head of the chain we want do complete | |
383 | * | |
384 | * Called with atchan->lock held and bh disabled */ | |
385 | static void | |
386 | atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |
387 | { | |
dc78baa2 NF |
388 | struct dma_async_tx_descriptor *txd = &desc->txd; |
389 | ||
390 | dev_vdbg(chan2dev(&atchan->chan_common), | |
391 | "descriptor %u complete\n", txd->cookie); | |
392 | ||
d4116052 VK |
393 | /* mark the descriptor as complete for non cyclic cases only */ |
394 | if (!atc_chan_is_cyclic(atchan)) | |
395 | dma_cookie_complete(txd); | |
dc78baa2 NF |
396 | |
397 | /* move children to free_list */ | |
285a3c71 | 398 | list_splice_init(&desc->tx_list, &atchan->free_list); |
dc78baa2 NF |
399 | /* move myself to free_list */ |
400 | list_move(&desc->desc_node, &atchan->free_list); | |
401 | ||
d38a8c62 | 402 | dma_descriptor_unmap(txd); |
53830cc7 NF |
403 | /* for cyclic transfers, |
404 | * no need to replay callback function while stopping */ | |
3c477482 | 405 | if (!atc_chan_is_cyclic(atchan)) { |
53830cc7 NF |
406 | dma_async_tx_callback callback = txd->callback; |
407 | void *param = txd->callback_param; | |
408 | ||
409 | /* | |
410 | * The API requires that no submissions are done from a | |
411 | * callback, so we don't need to drop the lock here | |
412 | */ | |
413 | if (callback) | |
414 | callback(param); | |
415 | } | |
dc78baa2 NF |
416 | |
417 | dma_run_dependencies(txd); | |
418 | } | |
419 | ||
420 | /** | |
421 | * atc_complete_all - finish work for all transactions | |
422 | * @atchan: channel to complete transactions for | |
423 | * | |
424 | * Eventually submit queued descriptors if any | |
425 | * | |
426 | * Assume channel is idle while calling this function | |
427 | * Called with atchan->lock held and bh disabled | |
428 | */ | |
429 | static void atc_complete_all(struct at_dma_chan *atchan) | |
430 | { | |
431 | struct at_desc *desc, *_desc; | |
432 | LIST_HEAD(list); | |
433 | ||
434 | dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); | |
435 | ||
dc78baa2 NF |
436 | /* |
437 | * Submit queued descriptors ASAP, i.e. before we go through | |
438 | * the completed ones. | |
439 | */ | |
440 | if (!list_empty(&atchan->queue)) | |
441 | atc_dostart(atchan, atc_first_queued(atchan)); | |
442 | /* empty active_list now it is completed */ | |
443 | list_splice_init(&atchan->active_list, &list); | |
444 | /* empty queue list by moving descriptors (if any) to active_list */ | |
445 | list_splice_init(&atchan->queue, &atchan->active_list); | |
446 | ||
447 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
448 | atc_chain_complete(atchan, desc); | |
449 | } | |
450 | ||
dc78baa2 NF |
451 | /** |
452 | * atc_advance_work - at the end of a transaction, move forward | |
453 | * @atchan: channel where the transaction ended | |
454 | * | |
455 | * Called with atchan->lock held and bh disabled | |
456 | */ | |
457 | static void atc_advance_work(struct at_dma_chan *atchan) | |
458 | { | |
459 | dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); | |
460 | ||
d202f051 LD |
461 | if (atc_chan_is_enabled(atchan)) |
462 | return; | |
463 | ||
dc78baa2 NF |
464 | if (list_empty(&atchan->active_list) || |
465 | list_is_singular(&atchan->active_list)) { | |
466 | atc_complete_all(atchan); | |
467 | } else { | |
468 | atc_chain_complete(atchan, atc_first_active(atchan)); | |
469 | /* advance work */ | |
470 | atc_dostart(atchan, atc_first_active(atchan)); | |
471 | } | |
472 | } | |
473 | ||
474 | ||
475 | /** | |
476 | * atc_handle_error - handle errors reported by DMA controller | |
477 | * @atchan: channel where error occurs | |
478 | * | |
479 | * Called with atchan->lock held and bh disabled | |
480 | */ | |
481 | static void atc_handle_error(struct at_dma_chan *atchan) | |
482 | { | |
483 | struct at_desc *bad_desc; | |
484 | struct at_desc *child; | |
485 | ||
486 | /* | |
487 | * The descriptor currently at the head of the active list is | |
488 | * broked. Since we don't have any way to report errors, we'll | |
489 | * just have to scream loudly and try to carry on. | |
490 | */ | |
491 | bad_desc = atc_first_active(atchan); | |
492 | list_del_init(&bad_desc->desc_node); | |
493 | ||
494 | /* As we are stopped, take advantage to push queued descriptors | |
495 | * in active_list */ | |
496 | list_splice_init(&atchan->queue, atchan->active_list.prev); | |
497 | ||
498 | /* Try to restart the controller */ | |
499 | if (!list_empty(&atchan->active_list)) | |
500 | atc_dostart(atchan, atc_first_active(atchan)); | |
501 | ||
502 | /* | |
503 | * KERN_CRITICAL may seem harsh, but since this only happens | |
504 | * when someone submits a bad physical address in a | |
505 | * descriptor, we should consider ourselves lucky that the | |
506 | * controller flagged an error instead of scribbling over | |
507 | * random memory locations. | |
508 | */ | |
509 | dev_crit(chan2dev(&atchan->chan_common), | |
510 | "Bad descriptor submitted for DMA!\n"); | |
511 | dev_crit(chan2dev(&atchan->chan_common), | |
512 | " cookie: %d\n", bad_desc->txd.cookie); | |
513 | atc_dump_lli(atchan, &bad_desc->lli); | |
285a3c71 | 514 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
dc78baa2 NF |
515 | atc_dump_lli(atchan, &child->lli); |
516 | ||
517 | /* Pretend the descriptor completed successfully */ | |
518 | atc_chain_complete(atchan, bad_desc); | |
519 | } | |
520 | ||
53830cc7 NF |
521 | /** |
522 | * atc_handle_cyclic - at the end of a period, run callback function | |
523 | * @atchan: channel used for cyclic operations | |
524 | * | |
525 | * Called with atchan->lock held and bh disabled | |
526 | */ | |
527 | static void atc_handle_cyclic(struct at_dma_chan *atchan) | |
528 | { | |
529 | struct at_desc *first = atc_first_active(atchan); | |
530 | struct dma_async_tx_descriptor *txd = &first->txd; | |
531 | dma_async_tx_callback callback = txd->callback; | |
532 | void *param = txd->callback_param; | |
533 | ||
534 | dev_vdbg(chan2dev(&atchan->chan_common), | |
535 | "new cyclic period llp 0x%08x\n", | |
536 | channel_readl(atchan, DSCR)); | |
537 | ||
538 | if (callback) | |
539 | callback(param); | |
540 | } | |
dc78baa2 NF |
541 | |
542 | /*-- IRQ & Tasklet ---------------------------------------------------*/ | |
543 | ||
544 | static void atc_tasklet(unsigned long data) | |
545 | { | |
546 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; | |
d8cb04b0 | 547 | unsigned long flags; |
dc78baa2 | 548 | |
d8cb04b0 | 549 | spin_lock_irqsave(&atchan->lock, flags); |
53830cc7 | 550 | if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) |
dc78baa2 | 551 | atc_handle_error(atchan); |
3c477482 | 552 | else if (atc_chan_is_cyclic(atchan)) |
53830cc7 | 553 | atc_handle_cyclic(atchan); |
dc78baa2 NF |
554 | else |
555 | atc_advance_work(atchan); | |
556 | ||
d8cb04b0 | 557 | spin_unlock_irqrestore(&atchan->lock, flags); |
dc78baa2 NF |
558 | } |
559 | ||
560 | static irqreturn_t at_dma_interrupt(int irq, void *dev_id) | |
561 | { | |
562 | struct at_dma *atdma = (struct at_dma *)dev_id; | |
563 | struct at_dma_chan *atchan; | |
564 | int i; | |
565 | u32 status, pending, imr; | |
566 | int ret = IRQ_NONE; | |
567 | ||
568 | do { | |
569 | imr = dma_readl(atdma, EBCIMR); | |
570 | status = dma_readl(atdma, EBCISR); | |
571 | pending = status & imr; | |
572 | ||
573 | if (!pending) | |
574 | break; | |
575 | ||
576 | dev_vdbg(atdma->dma_common.dev, | |
577 | "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", | |
578 | status, imr, pending); | |
579 | ||
580 | for (i = 0; i < atdma->dma_common.chancnt; i++) { | |
581 | atchan = &atdma->chan[i]; | |
9b3aa589 | 582 | if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { |
dc78baa2 NF |
583 | if (pending & AT_DMA_ERR(i)) { |
584 | /* Disable channel on AHB error */ | |
23b5e3ad NF |
585 | dma_writel(atdma, CHDR, |
586 | AT_DMA_RES(i) | atchan->mask); | |
dc78baa2 | 587 | /* Give information to tasklet */ |
53830cc7 | 588 | set_bit(ATC_IS_ERROR, &atchan->status); |
dc78baa2 NF |
589 | } |
590 | tasklet_schedule(&atchan->tasklet); | |
591 | ret = IRQ_HANDLED; | |
592 | } | |
593 | } | |
594 | ||
595 | } while (pending); | |
596 | ||
597 | return ret; | |
598 | } | |
599 | ||
600 | ||
601 | /*-- DMA Engine API --------------------------------------------------*/ | |
602 | ||
603 | /** | |
604 | * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine | |
605 | * @desc: descriptor at the head of the transaction chain | |
606 | * | |
607 | * Queue chain if DMA engine is working already | |
608 | * | |
609 | * Cookie increment and adding to active_list or queue must be atomic | |
610 | */ | |
611 | static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) | |
612 | { | |
613 | struct at_desc *desc = txd_to_at_desc(tx); | |
614 | struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); | |
615 | dma_cookie_t cookie; | |
d8cb04b0 | 616 | unsigned long flags; |
dc78baa2 | 617 | |
d8cb04b0 | 618 | spin_lock_irqsave(&atchan->lock, flags); |
884485e1 | 619 | cookie = dma_cookie_assign(tx); |
dc78baa2 NF |
620 | |
621 | if (list_empty(&atchan->active_list)) { | |
622 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", | |
623 | desc->txd.cookie); | |
624 | atc_dostart(atchan, desc); | |
625 | list_add_tail(&desc->desc_node, &atchan->active_list); | |
626 | } else { | |
627 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", | |
628 | desc->txd.cookie); | |
629 | list_add_tail(&desc->desc_node, &atchan->queue); | |
630 | } | |
631 | ||
d8cb04b0 | 632 | spin_unlock_irqrestore(&atchan->lock, flags); |
dc78baa2 NF |
633 | |
634 | return cookie; | |
635 | } | |
636 | ||
637 | /** | |
638 | * atc_prep_dma_memcpy - prepare a memcpy operation | |
639 | * @chan: the channel to prepare operation on | |
640 | * @dest: operation virtual destination address | |
641 | * @src: operation virtual source address | |
642 | * @len: operation length | |
643 | * @flags: tx descriptor status flags | |
644 | */ | |
645 | static struct dma_async_tx_descriptor * | |
646 | atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
647 | size_t len, unsigned long flags) | |
648 | { | |
649 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
650 | struct at_desc *desc = NULL; | |
651 | struct at_desc *first = NULL; | |
652 | struct at_desc *prev = NULL; | |
653 | size_t xfer_count; | |
654 | size_t offset; | |
655 | unsigned int src_width; | |
656 | unsigned int dst_width; | |
657 | u32 ctrla; | |
658 | u32 ctrlb; | |
659 | ||
660 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", | |
661 | dest, src, len, flags); | |
662 | ||
663 | if (unlikely(!len)) { | |
664 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | |
665 | return NULL; | |
666 | } | |
667 | ||
9b3aa589 | 668 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
dc78baa2 NF |
669 | | ATC_SRC_ADDR_MODE_INCR |
670 | | ATC_DST_ADDR_MODE_INCR | |
671 | | ATC_FC_MEM2MEM; | |
672 | ||
673 | /* | |
674 | * We can be a lot more clever here, but this should take care | |
675 | * of the most common optimization. | |
676 | */ | |
265567fb TF |
677 | src_width = dst_width = atc_get_xfer_width(src, dest, len); |
678 | ||
679 | ctrla = ATC_SRC_WIDTH(src_width) | | |
680 | ATC_DST_WIDTH(dst_width); | |
dc78baa2 NF |
681 | |
682 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | |
683 | xfer_count = min_t(size_t, (len - offset) >> src_width, | |
684 | ATC_BTSIZE_MAX); | |
685 | ||
686 | desc = atc_desc_get(atchan); | |
687 | if (!desc) | |
688 | goto err_desc_get; | |
689 | ||
690 | desc->lli.saddr = src + offset; | |
691 | desc->lli.daddr = dest + offset; | |
692 | desc->lli.ctrla = ctrla | xfer_count; | |
693 | desc->lli.ctrlb = ctrlb; | |
694 | ||
695 | desc->txd.cookie = 0; | |
bdf6c792 | 696 | desc->len = xfer_count << src_width; |
dc78baa2 | 697 | |
e257e156 | 698 | atc_desc_chain(&first, &prev, desc); |
dc78baa2 NF |
699 | } |
700 | ||
701 | /* First descriptor of the chain embedds additional information */ | |
702 | first->txd.cookie = -EBUSY; | |
bdf6c792 TF |
703 | first->total_len = len; |
704 | ||
705 | /* set transfer width for the calculation of the residue */ | |
d088c33b | 706 | first->tx_width = src_width; |
bdf6c792 | 707 | prev->tx_width = src_width; |
dc78baa2 NF |
708 | |
709 | /* set end-of-link to the last link descriptor of list*/ | |
710 | set_desc_eol(desc); | |
711 | ||
568f7f0c | 712 | first->txd.flags = flags; /* client is in control of this ack */ |
dc78baa2 NF |
713 | |
714 | return &first->txd; | |
715 | ||
716 | err_desc_get: | |
717 | atc_desc_put(atchan, first); | |
718 | return NULL; | |
719 | } | |
720 | ||
808347f6 NF |
721 | |
722 | /** | |
723 | * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | |
724 | * @chan: DMA channel | |
725 | * @sgl: scatterlist to transfer to/from | |
726 | * @sg_len: number of entries in @scatterlist | |
727 | * @direction: DMA direction | |
728 | * @flags: tx descriptor status flags | |
185ecb5f | 729 | * @context: transaction context (ignored) |
808347f6 NF |
730 | */ |
731 | static struct dma_async_tx_descriptor * | |
732 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 733 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 734 | unsigned long flags, void *context) |
808347f6 NF |
735 | { |
736 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
737 | struct at_dma_slave *atslave = chan->private; | |
beeaa103 | 738 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; |
808347f6 NF |
739 | struct at_desc *first = NULL; |
740 | struct at_desc *prev = NULL; | |
741 | u32 ctrla; | |
742 | u32 ctrlb; | |
743 | dma_addr_t reg; | |
744 | unsigned int reg_width; | |
745 | unsigned int mem_width; | |
746 | unsigned int i; | |
747 | struct scatterlist *sg; | |
748 | size_t total_len = 0; | |
749 | ||
cc52a10a NF |
750 | dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", |
751 | sg_len, | |
db8196df | 752 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", |
808347f6 NF |
753 | flags); |
754 | ||
755 | if (unlikely(!atslave || !sg_len)) { | |
c618a9be | 756 | dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n"); |
808347f6 NF |
757 | return NULL; |
758 | } | |
759 | ||
1dd1ea8e NF |
760 | ctrla = ATC_SCSIZE(sconfig->src_maxburst) |
761 | | ATC_DCSIZE(sconfig->dst_maxburst); | |
ae14d4b5 | 762 | ctrlb = ATC_IEN; |
808347f6 NF |
763 | |
764 | switch (direction) { | |
db8196df | 765 | case DMA_MEM_TO_DEV: |
beeaa103 | 766 | reg_width = convert_buswidth(sconfig->dst_addr_width); |
808347f6 NF |
767 | ctrla |= ATC_DST_WIDTH(reg_width); |
768 | ctrlb |= ATC_DST_ADDR_MODE_FIXED | |
769 | | ATC_SRC_ADDR_MODE_INCR | |
ae14d4b5 | 770 | | ATC_FC_MEM2PER |
bbe89c8e | 771 | | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); |
beeaa103 | 772 | reg = sconfig->dst_addr; |
808347f6 NF |
773 | for_each_sg(sgl, sg, sg_len, i) { |
774 | struct at_desc *desc; | |
775 | u32 len; | |
776 | u32 mem; | |
777 | ||
778 | desc = atc_desc_get(atchan); | |
779 | if (!desc) | |
780 | goto err_desc_get; | |
781 | ||
0f70e8ce | 782 | mem = sg_dma_address(sg); |
808347f6 | 783 | len = sg_dma_len(sg); |
c4567976 NF |
784 | if (unlikely(!len)) { |
785 | dev_dbg(chan2dev(chan), | |
786 | "prep_slave_sg: sg(%d) data length is zero\n", i); | |
787 | goto err; | |
788 | } | |
808347f6 NF |
789 | mem_width = 2; |
790 | if (unlikely(mem & 3 || len & 3)) | |
791 | mem_width = 0; | |
792 | ||
793 | desc->lli.saddr = mem; | |
794 | desc->lli.daddr = reg; | |
795 | desc->lli.ctrla = ctrla | |
796 | | ATC_SRC_WIDTH(mem_width) | |
797 | | len >> mem_width; | |
798 | desc->lli.ctrlb = ctrlb; | |
bdf6c792 | 799 | desc->len = len; |
808347f6 | 800 | |
e257e156 | 801 | atc_desc_chain(&first, &prev, desc); |
808347f6 NF |
802 | total_len += len; |
803 | } | |
804 | break; | |
db8196df | 805 | case DMA_DEV_TO_MEM: |
beeaa103 | 806 | reg_width = convert_buswidth(sconfig->src_addr_width); |
808347f6 NF |
807 | ctrla |= ATC_SRC_WIDTH(reg_width); |
808 | ctrlb |= ATC_DST_ADDR_MODE_INCR | |
809 | | ATC_SRC_ADDR_MODE_FIXED | |
ae14d4b5 | 810 | | ATC_FC_PER2MEM |
bbe89c8e | 811 | | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); |
808347f6 | 812 | |
beeaa103 | 813 | reg = sconfig->src_addr; |
808347f6 NF |
814 | for_each_sg(sgl, sg, sg_len, i) { |
815 | struct at_desc *desc; | |
816 | u32 len; | |
817 | u32 mem; | |
818 | ||
819 | desc = atc_desc_get(atchan); | |
820 | if (!desc) | |
821 | goto err_desc_get; | |
822 | ||
0f70e8ce | 823 | mem = sg_dma_address(sg); |
808347f6 | 824 | len = sg_dma_len(sg); |
c4567976 NF |
825 | if (unlikely(!len)) { |
826 | dev_dbg(chan2dev(chan), | |
827 | "prep_slave_sg: sg(%d) data length is zero\n", i); | |
828 | goto err; | |
829 | } | |
808347f6 NF |
830 | mem_width = 2; |
831 | if (unlikely(mem & 3 || len & 3)) | |
832 | mem_width = 0; | |
833 | ||
834 | desc->lli.saddr = reg; | |
835 | desc->lli.daddr = mem; | |
836 | desc->lli.ctrla = ctrla | |
837 | | ATC_DST_WIDTH(mem_width) | |
59a609d9 | 838 | | len >> reg_width; |
808347f6 | 839 | desc->lli.ctrlb = ctrlb; |
bdf6c792 | 840 | desc->len = len; |
808347f6 | 841 | |
e257e156 | 842 | atc_desc_chain(&first, &prev, desc); |
808347f6 NF |
843 | total_len += len; |
844 | } | |
845 | break; | |
846 | default: | |
847 | return NULL; | |
848 | } | |
849 | ||
850 | /* set end-of-link to the last link descriptor of list*/ | |
851 | set_desc_eol(prev); | |
852 | ||
853 | /* First descriptor of the chain embedds additional information */ | |
854 | first->txd.cookie = -EBUSY; | |
bdf6c792 TF |
855 | first->total_len = total_len; |
856 | ||
857 | /* set transfer width for the calculation of the residue */ | |
d088c33b | 858 | first->tx_width = reg_width; |
bdf6c792 | 859 | prev->tx_width = reg_width; |
808347f6 | 860 | |
568f7f0c NF |
861 | /* first link descriptor of list is responsible of flags */ |
862 | first->txd.flags = flags; /* client is in control of this ack */ | |
808347f6 NF |
863 | |
864 | return &first->txd; | |
865 | ||
866 | err_desc_get: | |
867 | dev_err(chan2dev(chan), "not enough descriptors available\n"); | |
c4567976 | 868 | err: |
808347f6 NF |
869 | atc_desc_put(atchan, first); |
870 | return NULL; | |
871 | } | |
872 | ||
265567fb TF |
873 | /** |
874 | * atc_prep_dma_sg - prepare memory to memory scather-gather operation | |
875 | * @chan: the channel to prepare operation on | |
876 | * @dst_sg: destination scatterlist | |
877 | * @dst_nents: number of destination scatterlist entries | |
878 | * @src_sg: source scatterlist | |
879 | * @src_nents: number of source scatterlist entries | |
880 | * @flags: tx descriptor status flags | |
881 | */ | |
882 | static struct dma_async_tx_descriptor * | |
883 | atc_prep_dma_sg(struct dma_chan *chan, | |
884 | struct scatterlist *dst_sg, unsigned int dst_nents, | |
885 | struct scatterlist *src_sg, unsigned int src_nents, | |
886 | unsigned long flags) | |
887 | { | |
888 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
889 | struct at_desc *desc = NULL; | |
890 | struct at_desc *first = NULL; | |
891 | struct at_desc *prev = NULL; | |
892 | unsigned int src_width; | |
893 | unsigned int dst_width; | |
894 | size_t xfer_count; | |
895 | u32 ctrla; | |
896 | u32 ctrlb; | |
897 | size_t dst_len = 0, src_len = 0; | |
898 | dma_addr_t dst = 0, src = 0; | |
899 | size_t len = 0, total_len = 0; | |
900 | ||
901 | if (unlikely(dst_nents == 0 || src_nents == 0)) | |
902 | return NULL; | |
903 | ||
904 | if (unlikely(dst_sg == NULL || src_sg == NULL)) | |
905 | return NULL; | |
906 | ||
907 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | |
908 | | ATC_SRC_ADDR_MODE_INCR | |
909 | | ATC_DST_ADDR_MODE_INCR | |
910 | | ATC_FC_MEM2MEM; | |
911 | ||
912 | /* | |
913 | * loop until there is either no more source or no more destination | |
914 | * scatterlist entry | |
915 | */ | |
916 | while (true) { | |
917 | ||
918 | /* prepare the next transfer */ | |
919 | if (dst_len == 0) { | |
920 | ||
921 | /* no more destination scatterlist entries */ | |
922 | if (!dst_sg || !dst_nents) | |
923 | break; | |
924 | ||
925 | dst = sg_dma_address(dst_sg); | |
926 | dst_len = sg_dma_len(dst_sg); | |
927 | ||
928 | dst_sg = sg_next(dst_sg); | |
929 | dst_nents--; | |
930 | } | |
931 | ||
932 | if (src_len == 0) { | |
933 | ||
934 | /* no more source scatterlist entries */ | |
935 | if (!src_sg || !src_nents) | |
936 | break; | |
937 | ||
938 | src = sg_dma_address(src_sg); | |
939 | src_len = sg_dma_len(src_sg); | |
940 | ||
941 | src_sg = sg_next(src_sg); | |
942 | src_nents--; | |
943 | } | |
944 | ||
945 | len = min_t(size_t, src_len, dst_len); | |
946 | if (len == 0) | |
947 | continue; | |
948 | ||
949 | /* take care for the alignment */ | |
950 | src_width = dst_width = atc_get_xfer_width(src, dst, len); | |
951 | ||
952 | ctrla = ATC_SRC_WIDTH(src_width) | | |
953 | ATC_DST_WIDTH(dst_width); | |
954 | ||
955 | /* | |
956 | * The number of transfers to set up refer to the source width | |
957 | * that depends on the alignment. | |
958 | */ | |
959 | xfer_count = len >> src_width; | |
960 | if (xfer_count > ATC_BTSIZE_MAX) { | |
961 | xfer_count = ATC_BTSIZE_MAX; | |
962 | len = ATC_BTSIZE_MAX << src_width; | |
963 | } | |
964 | ||
965 | /* create the transfer */ | |
966 | desc = atc_desc_get(atchan); | |
967 | if (!desc) | |
968 | goto err_desc_get; | |
969 | ||
970 | desc->lli.saddr = src; | |
971 | desc->lli.daddr = dst; | |
972 | desc->lli.ctrla = ctrla | xfer_count; | |
973 | desc->lli.ctrlb = ctrlb; | |
974 | ||
975 | desc->txd.cookie = 0; | |
976 | desc->len = len; | |
977 | ||
978 | /* | |
979 | * Although we only need the transfer width for the first and | |
980 | * the last descriptor, its easier to set it to all descriptors. | |
981 | */ | |
982 | desc->tx_width = src_width; | |
983 | ||
984 | atc_desc_chain(&first, &prev, desc); | |
985 | ||
986 | /* update the lengths and addresses for the next loop cycle */ | |
987 | dst_len -= len; | |
988 | src_len -= len; | |
989 | dst += len; | |
990 | src += len; | |
991 | ||
992 | total_len += len; | |
993 | } | |
994 | ||
995 | /* First descriptor of the chain embedds additional information */ | |
996 | first->txd.cookie = -EBUSY; | |
997 | first->total_len = total_len; | |
998 | ||
999 | /* set end-of-link to the last link descriptor of list*/ | |
1000 | set_desc_eol(desc); | |
1001 | ||
1002 | first->txd.flags = flags; /* client is in control of this ack */ | |
1003 | ||
1004 | return &first->txd; | |
1005 | ||
1006 | err_desc_get: | |
1007 | atc_desc_put(atchan, first); | |
1008 | return NULL; | |
1009 | } | |
1010 | ||
53830cc7 NF |
1011 | /** |
1012 | * atc_dma_cyclic_check_values | |
1013 | * Check for too big/unaligned periods and unaligned DMA buffer | |
1014 | */ | |
1015 | static int | |
1016 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, | |
0e7264cc | 1017 | size_t period_len) |
53830cc7 NF |
1018 | { |
1019 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) | |
1020 | goto err_out; | |
1021 | if (unlikely(period_len & ((1 << reg_width) - 1))) | |
1022 | goto err_out; | |
1023 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | |
1024 | goto err_out; | |
53830cc7 NF |
1025 | |
1026 | return 0; | |
1027 | ||
1028 | err_out: | |
1029 | return -EINVAL; | |
1030 | } | |
1031 | ||
1032 | /** | |
d73111c6 | 1033 | * atc_dma_cyclic_fill_desc - Fill one period descriptor |
53830cc7 NF |
1034 | */ |
1035 | static int | |
beeaa103 | 1036 | atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, |
53830cc7 | 1037 | unsigned int period_index, dma_addr_t buf_addr, |
beeaa103 NF |
1038 | unsigned int reg_width, size_t period_len, |
1039 | enum dma_transfer_direction direction) | |
53830cc7 | 1040 | { |
beeaa103 | 1041 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
beeaa103 NF |
1042 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; |
1043 | u32 ctrla; | |
53830cc7 NF |
1044 | |
1045 | /* prepare common CRTLA value */ | |
1dd1ea8e NF |
1046 | ctrla = ATC_SCSIZE(sconfig->src_maxburst) |
1047 | | ATC_DCSIZE(sconfig->dst_maxburst) | |
53830cc7 NF |
1048 | | ATC_DST_WIDTH(reg_width) |
1049 | | ATC_SRC_WIDTH(reg_width) | |
1050 | | period_len >> reg_width; | |
1051 | ||
1052 | switch (direction) { | |
db8196df | 1053 | case DMA_MEM_TO_DEV: |
53830cc7 | 1054 | desc->lli.saddr = buf_addr + (period_len * period_index); |
beeaa103 | 1055 | desc->lli.daddr = sconfig->dst_addr; |
53830cc7 | 1056 | desc->lli.ctrla = ctrla; |
ae14d4b5 | 1057 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED |
53830cc7 | 1058 | | ATC_SRC_ADDR_MODE_INCR |
ae14d4b5 | 1059 | | ATC_FC_MEM2PER |
bbe89c8e LD |
1060 | | ATC_SIF(atchan->mem_if) |
1061 | | ATC_DIF(atchan->per_if); | |
bdf6c792 | 1062 | desc->len = period_len; |
53830cc7 NF |
1063 | break; |
1064 | ||
db8196df | 1065 | case DMA_DEV_TO_MEM: |
beeaa103 | 1066 | desc->lli.saddr = sconfig->src_addr; |
53830cc7 NF |
1067 | desc->lli.daddr = buf_addr + (period_len * period_index); |
1068 | desc->lli.ctrla = ctrla; | |
ae14d4b5 | 1069 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR |
53830cc7 | 1070 | | ATC_SRC_ADDR_MODE_FIXED |
ae14d4b5 | 1071 | | ATC_FC_PER2MEM |
bbe89c8e LD |
1072 | | ATC_SIF(atchan->per_if) |
1073 | | ATC_DIF(atchan->mem_if); | |
bdf6c792 | 1074 | desc->len = period_len; |
53830cc7 NF |
1075 | break; |
1076 | ||
1077 | default: | |
1078 | return -EINVAL; | |
1079 | } | |
1080 | ||
1081 | return 0; | |
1082 | } | |
1083 | ||
1084 | /** | |
1085 | * atc_prep_dma_cyclic - prepare the cyclic DMA transfer | |
1086 | * @chan: the DMA channel to prepare | |
1087 | * @buf_addr: physical DMA address where the buffer starts | |
1088 | * @buf_len: total number of bytes for the entire buffer | |
1089 | * @period_len: number of bytes for each period | |
1090 | * @direction: transfer direction, to or from device | |
ec8b5e48 | 1091 | * @flags: tx descriptor status flags |
53830cc7 NF |
1092 | */ |
1093 | static struct dma_async_tx_descriptor * | |
1094 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |
185ecb5f | 1095 | size_t period_len, enum dma_transfer_direction direction, |
31c1e5a1 | 1096 | unsigned long flags) |
53830cc7 NF |
1097 | { |
1098 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
1099 | struct at_dma_slave *atslave = chan->private; | |
beeaa103 | 1100 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; |
53830cc7 NF |
1101 | struct at_desc *first = NULL; |
1102 | struct at_desc *prev = NULL; | |
1103 | unsigned long was_cyclic; | |
beeaa103 | 1104 | unsigned int reg_width; |
53830cc7 NF |
1105 | unsigned int periods = buf_len / period_len; |
1106 | unsigned int i; | |
1107 | ||
1108 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", | |
db8196df | 1109 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", |
53830cc7 NF |
1110 | buf_addr, |
1111 | periods, buf_len, period_len); | |
1112 | ||
1113 | if (unlikely(!atslave || !buf_len || !period_len)) { | |
1114 | dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); | |
1115 | return NULL; | |
1116 | } | |
1117 | ||
1118 | was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); | |
1119 | if (was_cyclic) { | |
1120 | dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); | |
1121 | return NULL; | |
1122 | } | |
1123 | ||
0e7264cc AS |
1124 | if (unlikely(!is_slave_direction(direction))) |
1125 | goto err_out; | |
1126 | ||
beeaa103 NF |
1127 | if (sconfig->direction == DMA_MEM_TO_DEV) |
1128 | reg_width = convert_buswidth(sconfig->dst_addr_width); | |
1129 | else | |
1130 | reg_width = convert_buswidth(sconfig->src_addr_width); | |
1131 | ||
53830cc7 | 1132 | /* Check for too big/unaligned periods and unaligned DMA buffer */ |
0e7264cc | 1133 | if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len)) |
53830cc7 NF |
1134 | goto err_out; |
1135 | ||
1136 | /* build cyclic linked list */ | |
1137 | for (i = 0; i < periods; i++) { | |
1138 | struct at_desc *desc; | |
1139 | ||
1140 | desc = atc_desc_get(atchan); | |
1141 | if (!desc) | |
1142 | goto err_desc_get; | |
1143 | ||
beeaa103 NF |
1144 | if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, |
1145 | reg_width, period_len, direction)) | |
53830cc7 NF |
1146 | goto err_desc_get; |
1147 | ||
1148 | atc_desc_chain(&first, &prev, desc); | |
1149 | } | |
1150 | ||
1151 | /* lets make a cyclic list */ | |
1152 | prev->lli.dscr = first->txd.phys; | |
1153 | ||
1154 | /* First descriptor of the chain embedds additional information */ | |
1155 | first->txd.cookie = -EBUSY; | |
bdf6c792 | 1156 | first->total_len = buf_len; |
d088c33b | 1157 | first->tx_width = reg_width; |
53830cc7 NF |
1158 | |
1159 | return &first->txd; | |
1160 | ||
1161 | err_desc_get: | |
1162 | dev_err(chan2dev(chan), "not enough descriptors available\n"); | |
1163 | atc_desc_put(atchan, first); | |
1164 | err_out: | |
1165 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | |
1166 | return NULL; | |
1167 | } | |
1168 | ||
4facfe7f MR |
1169 | static int atc_config(struct dma_chan *chan, |
1170 | struct dma_slave_config *sconfig) | |
beeaa103 NF |
1171 | { |
1172 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
1173 | ||
4facfe7f MR |
1174 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
1175 | ||
beeaa103 NF |
1176 | /* Check if it is chan is configured for slave transfers */ |
1177 | if (!chan->private) | |
1178 | return -EINVAL; | |
1179 | ||
1180 | memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); | |
1181 | ||
1182 | convert_burst(&atchan->dma_sconfig.src_maxburst); | |
1183 | convert_burst(&atchan->dma_sconfig.dst_maxburst); | |
1184 | ||
1185 | return 0; | |
1186 | } | |
1187 | ||
4facfe7f MR |
1188 | static int atc_pause(struct dma_chan *chan) |
1189 | { | |
1190 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
1191 | struct at_dma *atdma = to_at_dma(chan->device); | |
1192 | int chan_id = atchan->chan_common.chan_id; | |
1193 | unsigned long flags; | |
53830cc7 | 1194 | |
4facfe7f MR |
1195 | LIST_HEAD(list); |
1196 | ||
1197 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | |
1198 | ||
1199 | spin_lock_irqsave(&atchan->lock, flags); | |
1200 | ||
1201 | dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); | |
1202 | set_bit(ATC_IS_PAUSED, &atchan->status); | |
1203 | ||
1204 | spin_unlock_irqrestore(&atchan->lock, flags); | |
1205 | ||
1206 | return 0; | |
1207 | } | |
1208 | ||
1209 | static int atc_resume(struct dma_chan *chan) | |
808347f6 NF |
1210 | { |
1211 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
1212 | struct at_dma *atdma = to_at_dma(chan->device); | |
23b5e3ad | 1213 | int chan_id = atchan->chan_common.chan_id; |
d8cb04b0 | 1214 | unsigned long flags; |
23b5e3ad | 1215 | |
808347f6 NF |
1216 | LIST_HEAD(list); |
1217 | ||
4facfe7f | 1218 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
c3635c78 | 1219 | |
4facfe7f MR |
1220 | if (!atc_chan_is_paused(atchan)) |
1221 | return 0; | |
808347f6 | 1222 | |
4facfe7f | 1223 | spin_lock_irqsave(&atchan->lock, flags); |
808347f6 | 1224 | |
4facfe7f MR |
1225 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); |
1226 | clear_bit(ATC_IS_PAUSED, &atchan->status); | |
808347f6 | 1227 | |
4facfe7f | 1228 | spin_unlock_irqrestore(&atchan->lock, flags); |
808347f6 | 1229 | |
4facfe7f MR |
1230 | return 0; |
1231 | } | |
c3635c78 | 1232 | |
4facfe7f MR |
1233 | static int atc_terminate_all(struct dma_chan *chan) |
1234 | { | |
1235 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
1236 | struct at_dma *atdma = to_at_dma(chan->device); | |
1237 | int chan_id = atchan->chan_common.chan_id; | |
1238 | struct at_desc *desc, *_desc; | |
1239 | unsigned long flags; | |
23b5e3ad | 1240 | |
4facfe7f | 1241 | LIST_HEAD(list); |
23b5e3ad | 1242 | |
4facfe7f | 1243 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
23b5e3ad | 1244 | |
4facfe7f MR |
1245 | /* |
1246 | * This is only called when something went wrong elsewhere, so | |
1247 | * we don't really care about the data. Just disable the | |
1248 | * channel. We still have to poll the channel enable bit due | |
1249 | * to AHB/HSB limitations. | |
1250 | */ | |
1251 | spin_lock_irqsave(&atchan->lock, flags); | |
23b5e3ad | 1252 | |
4facfe7f MR |
1253 | /* disabling channel: must also remove suspend state */ |
1254 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); | |
23b5e3ad | 1255 | |
4facfe7f MR |
1256 | /* confirm that this channel is disabled */ |
1257 | while (dma_readl(atdma, CHSR) & atchan->mask) | |
1258 | cpu_relax(); | |
23b5e3ad | 1259 | |
4facfe7f MR |
1260 | /* active_list entries will end up before queued entries */ |
1261 | list_splice_init(&atchan->queue, &list); | |
1262 | list_splice_init(&atchan->active_list, &list); | |
1263 | ||
1264 | /* Flush all pending and queued descriptors */ | |
1265 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
1266 | atc_chain_complete(atchan, desc); | |
1267 | ||
1268 | clear_bit(ATC_IS_PAUSED, &atchan->status); | |
1269 | /* if channel dedicated to cyclic operations, free it */ | |
1270 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | |
1271 | ||
1272 | spin_unlock_irqrestore(&atchan->lock, flags); | |
b0ebeb9c | 1273 | |
c3635c78 | 1274 | return 0; |
808347f6 NF |
1275 | } |
1276 | ||
dc78baa2 | 1277 | /** |
07934481 | 1278 | * atc_tx_status - poll for transaction completion |
dc78baa2 NF |
1279 | * @chan: DMA channel |
1280 | * @cookie: transaction identifier to check status of | |
07934481 | 1281 | * @txstate: if not %NULL updated with transaction state |
dc78baa2 | 1282 | * |
07934481 | 1283 | * If @txstate is passed in, upon return it reflect the driver |
dc78baa2 NF |
1284 | * internal state and can be used with dma_async_is_complete() to check |
1285 | * the status of multiple cookies without re-checking hardware state. | |
1286 | */ | |
1287 | static enum dma_status | |
07934481 | 1288 | atc_tx_status(struct dma_chan *chan, |
dc78baa2 | 1289 | dma_cookie_t cookie, |
07934481 | 1290 | struct dma_tx_state *txstate) |
dc78baa2 NF |
1291 | { |
1292 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
d8cb04b0 | 1293 | unsigned long flags; |
dc78baa2 | 1294 | enum dma_status ret; |
d48de6f1 | 1295 | int bytes = 0; |
dc78baa2 | 1296 | |
96a2af41 | 1297 | ret = dma_cookie_status(chan, cookie, txstate); |
6d203d1e | 1298 | if (ret == DMA_COMPLETE) |
d48de6f1 ES |
1299 | return ret; |
1300 | /* | |
1301 | * There's no point calculating the residue if there's | |
1302 | * no txstate to store the value. | |
1303 | */ | |
1304 | if (!txstate) | |
1305 | return DMA_ERROR; | |
dc78baa2 | 1306 | |
d48de6f1 | 1307 | spin_lock_irqsave(&atchan->lock, flags); |
dc78baa2 | 1308 | |
d48de6f1 | 1309 | /* Get number of bytes left in the active transactions */ |
bdf6c792 | 1310 | bytes = atc_get_bytes_left(chan, cookie); |
96a2af41 | 1311 | |
d8cb04b0 | 1312 | spin_unlock_irqrestore(&atchan->lock, flags); |
dc78baa2 | 1313 | |
d48de6f1 ES |
1314 | if (unlikely(bytes < 0)) { |
1315 | dev_vdbg(chan2dev(chan), "get residual bytes error\n"); | |
1316 | return DMA_ERROR; | |
c3dbc60c | 1317 | } else { |
d48de6f1 | 1318 | dma_set_residue(txstate, bytes); |
c3dbc60c | 1319 | } |
23b5e3ad | 1320 | |
d48de6f1 ES |
1321 | dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n", |
1322 | ret, cookie, bytes); | |
dc78baa2 NF |
1323 | |
1324 | return ret; | |
1325 | } | |
1326 | ||
1327 | /** | |
1328 | * atc_issue_pending - try to finish work | |
1329 | * @chan: target DMA channel | |
1330 | */ | |
1331 | static void atc_issue_pending(struct dma_chan *chan) | |
1332 | { | |
1333 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
d8cb04b0 | 1334 | unsigned long flags; |
dc78baa2 NF |
1335 | |
1336 | dev_vdbg(chan2dev(chan), "issue_pending\n"); | |
1337 | ||
53830cc7 | 1338 | /* Not needed for cyclic transfers */ |
3c477482 | 1339 | if (atc_chan_is_cyclic(atchan)) |
53830cc7 NF |
1340 | return; |
1341 | ||
d8cb04b0 | 1342 | spin_lock_irqsave(&atchan->lock, flags); |
d202f051 | 1343 | atc_advance_work(atchan); |
d8cb04b0 | 1344 | spin_unlock_irqrestore(&atchan->lock, flags); |
dc78baa2 NF |
1345 | } |
1346 | ||
1347 | /** | |
1348 | * atc_alloc_chan_resources - allocate resources for DMA channel | |
1349 | * @chan: allocate descriptor resources for this channel | |
1350 | * @client: current client requesting the channel be ready for requests | |
1351 | * | |
1352 | * return - the number of allocated descriptors | |
1353 | */ | |
1354 | static int atc_alloc_chan_resources(struct dma_chan *chan) | |
1355 | { | |
1356 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
1357 | struct at_dma *atdma = to_at_dma(chan->device); | |
1358 | struct at_desc *desc; | |
808347f6 | 1359 | struct at_dma_slave *atslave; |
d8cb04b0 | 1360 | unsigned long flags; |
dc78baa2 | 1361 | int i; |
808347f6 | 1362 | u32 cfg; |
dc78baa2 NF |
1363 | LIST_HEAD(tmp_list); |
1364 | ||
1365 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | |
1366 | ||
1367 | /* ASSERT: channel is idle */ | |
1368 | if (atc_chan_is_enabled(atchan)) { | |
1369 | dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); | |
1370 | return -EIO; | |
1371 | } | |
1372 | ||
808347f6 NF |
1373 | cfg = ATC_DEFAULT_CFG; |
1374 | ||
1375 | atslave = chan->private; | |
1376 | if (atslave) { | |
1377 | /* | |
1378 | * We need controller-specific data to set up slave | |
1379 | * transfers. | |
1380 | */ | |
1381 | BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); | |
1382 | ||
ea7e7906 | 1383 | /* if cfg configuration specified take it instead of default */ |
808347f6 NF |
1384 | if (atslave->cfg) |
1385 | cfg = atslave->cfg; | |
1386 | } | |
1387 | ||
1388 | /* have we already been set up? | |
1389 | * reconfigure channel but no need to reallocate descriptors */ | |
dc78baa2 NF |
1390 | if (!list_empty(&atchan->free_list)) |
1391 | return atchan->descs_allocated; | |
1392 | ||
1393 | /* Allocate initial pool of descriptors */ | |
1394 | for (i = 0; i < init_nr_desc_per_channel; i++) { | |
1395 | desc = atc_alloc_descriptor(chan, GFP_KERNEL); | |
1396 | if (!desc) { | |
1397 | dev_err(atdma->dma_common.dev, | |
1398 | "Only %d initial descriptors\n", i); | |
1399 | break; | |
1400 | } | |
1401 | list_add_tail(&desc->desc_node, &tmp_list); | |
1402 | } | |
1403 | ||
d8cb04b0 | 1404 | spin_lock_irqsave(&atchan->lock, flags); |
dc78baa2 NF |
1405 | atchan->descs_allocated = i; |
1406 | list_splice(&tmp_list, &atchan->free_list); | |
d3ee98cd | 1407 | dma_cookie_init(chan); |
d8cb04b0 | 1408 | spin_unlock_irqrestore(&atchan->lock, flags); |
dc78baa2 NF |
1409 | |
1410 | /* channel parameters */ | |
808347f6 | 1411 | channel_writel(atchan, CFG, cfg); |
dc78baa2 NF |
1412 | |
1413 | dev_dbg(chan2dev(chan), | |
1414 | "alloc_chan_resources: allocated %d descriptors\n", | |
1415 | atchan->descs_allocated); | |
1416 | ||
1417 | return atchan->descs_allocated; | |
1418 | } | |
1419 | ||
1420 | /** | |
1421 | * atc_free_chan_resources - free all channel resources | |
1422 | * @chan: DMA channel | |
1423 | */ | |
1424 | static void atc_free_chan_resources(struct dma_chan *chan) | |
1425 | { | |
1426 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
1427 | struct at_dma *atdma = to_at_dma(chan->device); | |
1428 | struct at_desc *desc, *_desc; | |
1429 | LIST_HEAD(list); | |
1430 | ||
1431 | dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", | |
1432 | atchan->descs_allocated); | |
1433 | ||
1434 | /* ASSERT: channel is idle */ | |
1435 | BUG_ON(!list_empty(&atchan->active_list)); | |
1436 | BUG_ON(!list_empty(&atchan->queue)); | |
1437 | BUG_ON(atc_chan_is_enabled(atchan)); | |
1438 | ||
1439 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { | |
1440 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | |
1441 | list_del(&desc->desc_node); | |
1442 | /* free link descriptor */ | |
1443 | dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); | |
1444 | } | |
1445 | list_splice_init(&atchan->free_list, &list); | |
1446 | atchan->descs_allocated = 0; | |
53830cc7 | 1447 | atchan->status = 0; |
dc78baa2 NF |
1448 | |
1449 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); | |
1450 | } | |
1451 | ||
bbe89c8e LD |
1452 | #ifdef CONFIG_OF |
1453 | static bool at_dma_filter(struct dma_chan *chan, void *slave) | |
1454 | { | |
1455 | struct at_dma_slave *atslave = slave; | |
1456 | ||
1457 | if (atslave->dma_dev == chan->device->dev) { | |
1458 | chan->private = atslave; | |
1459 | return true; | |
1460 | } else { | |
1461 | return false; | |
1462 | } | |
1463 | } | |
1464 | ||
1465 | static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, | |
1466 | struct of_dma *of_dma) | |
1467 | { | |
1468 | struct dma_chan *chan; | |
1469 | struct at_dma_chan *atchan; | |
1470 | struct at_dma_slave *atslave; | |
1471 | dma_cap_mask_t mask; | |
1472 | unsigned int per_id; | |
1473 | struct platform_device *dmac_pdev; | |
1474 | ||
1475 | if (dma_spec->args_count != 2) | |
1476 | return NULL; | |
1477 | ||
1478 | dmac_pdev = of_find_device_by_node(dma_spec->np); | |
1479 | ||
1480 | dma_cap_zero(mask); | |
1481 | dma_cap_set(DMA_SLAVE, mask); | |
1482 | ||
1483 | atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); | |
1484 | if (!atslave) | |
1485 | return NULL; | |
62971b29 LD |
1486 | |
1487 | atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW; | |
bbe89c8e LD |
1488 | /* |
1489 | * We can fill both SRC_PER and DST_PER, one of these fields will be | |
1490 | * ignored depending on DMA transfer direction. | |
1491 | */ | |
62971b29 LD |
1492 | per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK; |
1493 | atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id) | |
6c22770f | 1494 | | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); |
62971b29 LD |
1495 | /* |
1496 | * We have to translate the value we get from the device tree since | |
1497 | * the half FIFO configuration value had to be 0 to keep backward | |
1498 | * compatibility. | |
1499 | */ | |
1500 | switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) { | |
1501 | case AT91_DMA_CFG_FIFOCFG_ALAP: | |
1502 | atslave->cfg |= ATC_FIFOCFG_LARGESTBURST; | |
1503 | break; | |
1504 | case AT91_DMA_CFG_FIFOCFG_ASAP: | |
1505 | atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE; | |
1506 | break; | |
1507 | case AT91_DMA_CFG_FIFOCFG_HALF: | |
1508 | default: | |
1509 | atslave->cfg |= ATC_FIFOCFG_HALFFIFO; | |
1510 | } | |
bbe89c8e LD |
1511 | atslave->dma_dev = &dmac_pdev->dev; |
1512 | ||
1513 | chan = dma_request_channel(mask, at_dma_filter, atslave); | |
1514 | if (!chan) | |
1515 | return NULL; | |
1516 | ||
1517 | atchan = to_at_dma_chan(chan); | |
1518 | atchan->per_if = dma_spec->args[0] & 0xff; | |
1519 | atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff; | |
1520 | ||
1521 | return chan; | |
1522 | } | |
1523 | #else | |
1524 | static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, | |
1525 | struct of_dma *of_dma) | |
1526 | { | |
1527 | return NULL; | |
1528 | } | |
1529 | #endif | |
dc78baa2 NF |
1530 | |
1531 | /*-- Module Management -----------------------------------------------*/ | |
1532 | ||
02f88be9 NF |
1533 | /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ |
1534 | static struct at_dma_platform_data at91sam9rl_config = { | |
1535 | .nr_channels = 2, | |
1536 | }; | |
1537 | static struct at_dma_platform_data at91sam9g45_config = { | |
1538 | .nr_channels = 8, | |
1539 | }; | |
1540 | ||
c5115953 NF |
1541 | #if defined(CONFIG_OF) |
1542 | static const struct of_device_id atmel_dma_dt_ids[] = { | |
1543 | { | |
1544 | .compatible = "atmel,at91sam9rl-dma", | |
02f88be9 | 1545 | .data = &at91sam9rl_config, |
c5115953 NF |
1546 | }, { |
1547 | .compatible = "atmel,at91sam9g45-dma", | |
02f88be9 | 1548 | .data = &at91sam9g45_config, |
dcc81734 NF |
1549 | }, { |
1550 | /* sentinel */ | |
1551 | } | |
c5115953 NF |
1552 | }; |
1553 | ||
1554 | MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); | |
1555 | #endif | |
1556 | ||
0ab88a01 | 1557 | static const struct platform_device_id atdma_devtypes[] = { |
67348450 NF |
1558 | { |
1559 | .name = "at91sam9rl_dma", | |
02f88be9 | 1560 | .driver_data = (unsigned long) &at91sam9rl_config, |
67348450 NF |
1561 | }, { |
1562 | .name = "at91sam9g45_dma", | |
02f88be9 | 1563 | .driver_data = (unsigned long) &at91sam9g45_config, |
67348450 NF |
1564 | }, { |
1565 | /* sentinel */ | |
1566 | } | |
1567 | }; | |
1568 | ||
7fd63ccd | 1569 | static inline const struct at_dma_platform_data * __init at_dma_get_driver_data( |
02f88be9 | 1570 | struct platform_device *pdev) |
c5115953 NF |
1571 | { |
1572 | if (pdev->dev.of_node) { | |
1573 | const struct of_device_id *match; | |
1574 | match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); | |
1575 | if (match == NULL) | |
02f88be9 NF |
1576 | return NULL; |
1577 | return match->data; | |
c5115953 | 1578 | } |
02f88be9 NF |
1579 | return (struct at_dma_platform_data *) |
1580 | platform_get_device_id(pdev)->driver_data; | |
c5115953 NF |
1581 | } |
1582 | ||
dc78baa2 NF |
1583 | /** |
1584 | * at_dma_off - disable DMA controller | |
1585 | * @atdma: the Atmel HDAMC device | |
1586 | */ | |
1587 | static void at_dma_off(struct at_dma *atdma) | |
1588 | { | |
1589 | dma_writel(atdma, EN, 0); | |
1590 | ||
1591 | /* disable all interrupts */ | |
1592 | dma_writel(atdma, EBCIDR, -1L); | |
1593 | ||
1594 | /* confirm that all channels are disabled */ | |
1595 | while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) | |
1596 | cpu_relax(); | |
1597 | } | |
1598 | ||
1599 | static int __init at_dma_probe(struct platform_device *pdev) | |
1600 | { | |
dc78baa2 NF |
1601 | struct resource *io; |
1602 | struct at_dma *atdma; | |
1603 | size_t size; | |
1604 | int irq; | |
1605 | int err; | |
1606 | int i; | |
7fd63ccd | 1607 | const struct at_dma_platform_data *plat_dat; |
67348450 | 1608 | |
02f88be9 NF |
1609 | /* setup platform data for each SoC */ |
1610 | dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); | |
265567fb | 1611 | dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask); |
02f88be9 NF |
1612 | dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); |
1613 | dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); | |
265567fb | 1614 | dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask); |
67348450 NF |
1615 | |
1616 | /* get DMA parameters from controller type */ | |
02f88be9 NF |
1617 | plat_dat = at_dma_get_driver_data(pdev); |
1618 | if (!plat_dat) | |
1619 | return -ENODEV; | |
dc78baa2 NF |
1620 | |
1621 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1622 | if (!io) | |
1623 | return -EINVAL; | |
1624 | ||
1625 | irq = platform_get_irq(pdev, 0); | |
1626 | if (irq < 0) | |
1627 | return irq; | |
1628 | ||
1629 | size = sizeof(struct at_dma); | |
02f88be9 | 1630 | size += plat_dat->nr_channels * sizeof(struct at_dma_chan); |
dc78baa2 NF |
1631 | atdma = kzalloc(size, GFP_KERNEL); |
1632 | if (!atdma) | |
1633 | return -ENOMEM; | |
1634 | ||
67348450 | 1635 | /* discover transaction capabilities */ |
02f88be9 NF |
1636 | atdma->dma_common.cap_mask = plat_dat->cap_mask; |
1637 | atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; | |
dc78baa2 | 1638 | |
114df7d6 | 1639 | size = resource_size(io); |
dc78baa2 NF |
1640 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { |
1641 | err = -EBUSY; | |
1642 | goto err_kfree; | |
1643 | } | |
1644 | ||
1645 | atdma->regs = ioremap(io->start, size); | |
1646 | if (!atdma->regs) { | |
1647 | err = -ENOMEM; | |
1648 | goto err_release_r; | |
1649 | } | |
1650 | ||
1651 | atdma->clk = clk_get(&pdev->dev, "dma_clk"); | |
1652 | if (IS_ERR(atdma->clk)) { | |
1653 | err = PTR_ERR(atdma->clk); | |
1654 | goto err_clk; | |
1655 | } | |
f784d9c9 BB |
1656 | err = clk_prepare_enable(atdma->clk); |
1657 | if (err) | |
1658 | goto err_clk_prepare; | |
dc78baa2 NF |
1659 | |
1660 | /* force dma off, just in case */ | |
1661 | at_dma_off(atdma); | |
1662 | ||
1663 | err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); | |
1664 | if (err) | |
1665 | goto err_irq; | |
1666 | ||
1667 | platform_set_drvdata(pdev, atdma); | |
1668 | ||
1669 | /* create a pool of consistent memory blocks for hardware descriptors */ | |
1670 | atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", | |
1671 | &pdev->dev, sizeof(struct at_desc), | |
1672 | 4 /* word alignment */, 0); | |
1673 | if (!atdma->dma_desc_pool) { | |
1674 | dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); | |
1675 | err = -ENOMEM; | |
1676 | goto err_pool_create; | |
1677 | } | |
1678 | ||
1679 | /* clear any pending interrupt */ | |
1680 | while (dma_readl(atdma, EBCISR)) | |
1681 | cpu_relax(); | |
1682 | ||
1683 | /* initialize channels related values */ | |
1684 | INIT_LIST_HEAD(&atdma->dma_common.channels); | |
02f88be9 | 1685 | for (i = 0; i < plat_dat->nr_channels; i++) { |
dc78baa2 NF |
1686 | struct at_dma_chan *atchan = &atdma->chan[i]; |
1687 | ||
bbe89c8e LD |
1688 | atchan->mem_if = AT_DMA_MEM_IF; |
1689 | atchan->per_if = AT_DMA_PER_IF; | |
dc78baa2 | 1690 | atchan->chan_common.device = &atdma->dma_common; |
d3ee98cd | 1691 | dma_cookie_init(&atchan->chan_common); |
dc78baa2 NF |
1692 | list_add_tail(&atchan->chan_common.device_node, |
1693 | &atdma->dma_common.channels); | |
1694 | ||
1695 | atchan->ch_regs = atdma->regs + ch_regs(i); | |
1696 | spin_lock_init(&atchan->lock); | |
1697 | atchan->mask = 1 << i; | |
1698 | ||
1699 | INIT_LIST_HEAD(&atchan->active_list); | |
1700 | INIT_LIST_HEAD(&atchan->queue); | |
1701 | INIT_LIST_HEAD(&atchan->free_list); | |
1702 | ||
1703 | tasklet_init(&atchan->tasklet, atc_tasklet, | |
1704 | (unsigned long)atchan); | |
bda3a47c | 1705 | atc_enable_chan_irq(atdma, i); |
dc78baa2 NF |
1706 | } |
1707 | ||
1708 | /* set base routines */ | |
1709 | atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; | |
1710 | atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; | |
07934481 | 1711 | atdma->dma_common.device_tx_status = atc_tx_status; |
dc78baa2 NF |
1712 | atdma->dma_common.device_issue_pending = atc_issue_pending; |
1713 | atdma->dma_common.dev = &pdev->dev; | |
1714 | ||
1715 | /* set prep routines based on capability */ | |
1716 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) | |
1717 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; | |
1718 | ||
d7db8080 | 1719 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { |
808347f6 | 1720 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; |
d7db8080 NF |
1721 | /* controller can do slave DMA: can trigger cyclic transfers */ |
1722 | dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); | |
53830cc7 | 1723 | atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; |
4facfe7f MR |
1724 | atdma->dma_common.device_config = atc_config; |
1725 | atdma->dma_common.device_pause = atc_pause; | |
1726 | atdma->dma_common.device_resume = atc_resume; | |
1727 | atdma->dma_common.device_terminate_all = atc_terminate_all; | |
816070ed LD |
1728 | atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS; |
1729 | atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS; | |
1730 | atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
1731 | atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
d7db8080 | 1732 | } |
808347f6 | 1733 | |
265567fb TF |
1734 | if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)) |
1735 | atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg; | |
1736 | ||
dc78baa2 NF |
1737 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
1738 | ||
265567fb | 1739 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n", |
dc78baa2 NF |
1740 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", |
1741 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", | |
265567fb | 1742 | dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "", |
02f88be9 | 1743 | plat_dat->nr_channels); |
dc78baa2 NF |
1744 | |
1745 | dma_async_device_register(&atdma->dma_common); | |
1746 | ||
bbe89c8e LD |
1747 | /* |
1748 | * Do not return an error if the dmac node is not present in order to | |
1749 | * not break the existing way of requesting channel with | |
1750 | * dma_request_channel(). | |
1751 | */ | |
1752 | if (pdev->dev.of_node) { | |
1753 | err = of_dma_controller_register(pdev->dev.of_node, | |
1754 | at_dma_xlate, atdma); | |
1755 | if (err) { | |
1756 | dev_err(&pdev->dev, "could not register of_dma_controller\n"); | |
1757 | goto err_of_dma_controller_register; | |
1758 | } | |
1759 | } | |
1760 | ||
dc78baa2 NF |
1761 | return 0; |
1762 | ||
bbe89c8e LD |
1763 | err_of_dma_controller_register: |
1764 | dma_async_device_unregister(&atdma->dma_common); | |
1765 | dma_pool_destroy(atdma->dma_desc_pool); | |
dc78baa2 | 1766 | err_pool_create: |
dc78baa2 NF |
1767 | free_irq(platform_get_irq(pdev, 0), atdma); |
1768 | err_irq: | |
f784d9c9 BB |
1769 | clk_disable_unprepare(atdma->clk); |
1770 | err_clk_prepare: | |
dc78baa2 NF |
1771 | clk_put(atdma->clk); |
1772 | err_clk: | |
1773 | iounmap(atdma->regs); | |
1774 | atdma->regs = NULL; | |
1775 | err_release_r: | |
1776 | release_mem_region(io->start, size); | |
1777 | err_kfree: | |
1778 | kfree(atdma); | |
1779 | return err; | |
1780 | } | |
1781 | ||
1d1bbd30 | 1782 | static int at_dma_remove(struct platform_device *pdev) |
dc78baa2 NF |
1783 | { |
1784 | struct at_dma *atdma = platform_get_drvdata(pdev); | |
1785 | struct dma_chan *chan, *_chan; | |
1786 | struct resource *io; | |
1787 | ||
1788 | at_dma_off(atdma); | |
1789 | dma_async_device_unregister(&atdma->dma_common); | |
1790 | ||
1791 | dma_pool_destroy(atdma->dma_desc_pool); | |
dc78baa2 NF |
1792 | free_irq(platform_get_irq(pdev, 0), atdma); |
1793 | ||
1794 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, | |
1795 | device_node) { | |
1796 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
1797 | ||
1798 | /* Disable interrupts */ | |
bda3a47c | 1799 | atc_disable_chan_irq(atdma, chan->chan_id); |
dc78baa2 NF |
1800 | |
1801 | tasklet_kill(&atchan->tasklet); | |
1802 | list_del(&chan->device_node); | |
1803 | } | |
1804 | ||
f784d9c9 | 1805 | clk_disable_unprepare(atdma->clk); |
dc78baa2 NF |
1806 | clk_put(atdma->clk); |
1807 | ||
1808 | iounmap(atdma->regs); | |
1809 | atdma->regs = NULL; | |
1810 | ||
1811 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
114df7d6 | 1812 | release_mem_region(io->start, resource_size(io)); |
dc78baa2 NF |
1813 | |
1814 | kfree(atdma); | |
1815 | ||
1816 | return 0; | |
1817 | } | |
1818 | ||
1819 | static void at_dma_shutdown(struct platform_device *pdev) | |
1820 | { | |
1821 | struct at_dma *atdma = platform_get_drvdata(pdev); | |
1822 | ||
1823 | at_dma_off(platform_get_drvdata(pdev)); | |
f784d9c9 | 1824 | clk_disable_unprepare(atdma->clk); |
dc78baa2 NF |
1825 | } |
1826 | ||
c0ba5947 NF |
1827 | static int at_dma_prepare(struct device *dev) |
1828 | { | |
1829 | struct platform_device *pdev = to_platform_device(dev); | |
1830 | struct at_dma *atdma = platform_get_drvdata(pdev); | |
1831 | struct dma_chan *chan, *_chan; | |
1832 | ||
1833 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, | |
1834 | device_node) { | |
1835 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
1836 | /* wait for transaction completion (except in cyclic case) */ | |
3c477482 | 1837 | if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) |
c0ba5947 NF |
1838 | return -EAGAIN; |
1839 | } | |
1840 | return 0; | |
1841 | } | |
1842 | ||
1843 | static void atc_suspend_cyclic(struct at_dma_chan *atchan) | |
1844 | { | |
1845 | struct dma_chan *chan = &atchan->chan_common; | |
1846 | ||
1847 | /* Channel should be paused by user | |
1848 | * do it anyway even if it is not done already */ | |
3c477482 | 1849 | if (!atc_chan_is_paused(atchan)) { |
c0ba5947 NF |
1850 | dev_warn(chan2dev(chan), |
1851 | "cyclic channel not paused, should be done by channel user\n"); | |
4facfe7f | 1852 | atc_pause(chan); |
c0ba5947 NF |
1853 | } |
1854 | ||
1855 | /* now preserve additional data for cyclic operations */ | |
1856 | /* next descriptor address in the cyclic list */ | |
1857 | atchan->save_dscr = channel_readl(atchan, DSCR); | |
1858 | ||
1859 | vdbg_dump_regs(atchan); | |
1860 | } | |
1861 | ||
33f82d14 | 1862 | static int at_dma_suspend_noirq(struct device *dev) |
dc78baa2 | 1863 | { |
33f82d14 DW |
1864 | struct platform_device *pdev = to_platform_device(dev); |
1865 | struct at_dma *atdma = platform_get_drvdata(pdev); | |
c0ba5947 | 1866 | struct dma_chan *chan, *_chan; |
dc78baa2 | 1867 | |
c0ba5947 NF |
1868 | /* preserve data */ |
1869 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, | |
1870 | device_node) { | |
1871 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
1872 | ||
3c477482 | 1873 | if (atc_chan_is_cyclic(atchan)) |
c0ba5947 NF |
1874 | atc_suspend_cyclic(atchan); |
1875 | atchan->save_cfg = channel_readl(atchan, CFG); | |
1876 | } | |
1877 | atdma->save_imr = dma_readl(atdma, EBCIMR); | |
1878 | ||
1879 | /* disable DMA controller */ | |
1880 | at_dma_off(atdma); | |
f784d9c9 | 1881 | clk_disable_unprepare(atdma->clk); |
dc78baa2 NF |
1882 | return 0; |
1883 | } | |
1884 | ||
c0ba5947 NF |
1885 | static void atc_resume_cyclic(struct at_dma_chan *atchan) |
1886 | { | |
1887 | struct at_dma *atdma = to_at_dma(atchan->chan_common.device); | |
1888 | ||
1889 | /* restore channel status for cyclic descriptors list: | |
1890 | * next descriptor in the cyclic list at the time of suspend */ | |
1891 | channel_writel(atchan, SADDR, 0); | |
1892 | channel_writel(atchan, DADDR, 0); | |
1893 | channel_writel(atchan, CTRLA, 0); | |
1894 | channel_writel(atchan, CTRLB, 0); | |
1895 | channel_writel(atchan, DSCR, atchan->save_dscr); | |
1896 | dma_writel(atdma, CHER, atchan->mask); | |
1897 | ||
1898 | /* channel pause status should be removed by channel user | |
1899 | * We cannot take the initiative to do it here */ | |
1900 | ||
1901 | vdbg_dump_regs(atchan); | |
1902 | } | |
1903 | ||
33f82d14 | 1904 | static int at_dma_resume_noirq(struct device *dev) |
dc78baa2 | 1905 | { |
33f82d14 DW |
1906 | struct platform_device *pdev = to_platform_device(dev); |
1907 | struct at_dma *atdma = platform_get_drvdata(pdev); | |
c0ba5947 | 1908 | struct dma_chan *chan, *_chan; |
dc78baa2 | 1909 | |
c0ba5947 | 1910 | /* bring back DMA controller */ |
f784d9c9 | 1911 | clk_prepare_enable(atdma->clk); |
dc78baa2 | 1912 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
c0ba5947 NF |
1913 | |
1914 | /* clear any pending interrupt */ | |
1915 | while (dma_readl(atdma, EBCISR)) | |
1916 | cpu_relax(); | |
1917 | ||
1918 | /* restore saved data */ | |
1919 | dma_writel(atdma, EBCIER, atdma->save_imr); | |
1920 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, | |
1921 | device_node) { | |
1922 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | |
1923 | ||
1924 | channel_writel(atchan, CFG, atchan->save_cfg); | |
3c477482 | 1925 | if (atc_chan_is_cyclic(atchan)) |
c0ba5947 NF |
1926 | atc_resume_cyclic(atchan); |
1927 | } | |
dc78baa2 | 1928 | return 0; |
dc78baa2 NF |
1929 | } |
1930 | ||
47145210 | 1931 | static const struct dev_pm_ops at_dma_dev_pm_ops = { |
c0ba5947 | 1932 | .prepare = at_dma_prepare, |
33f82d14 DW |
1933 | .suspend_noirq = at_dma_suspend_noirq, |
1934 | .resume_noirq = at_dma_resume_noirq, | |
1935 | }; | |
1936 | ||
dc78baa2 | 1937 | static struct platform_driver at_dma_driver = { |
1d1bbd30 | 1938 | .remove = at_dma_remove, |
dc78baa2 | 1939 | .shutdown = at_dma_shutdown, |
67348450 | 1940 | .id_table = atdma_devtypes, |
dc78baa2 NF |
1941 | .driver = { |
1942 | .name = "at_hdmac", | |
33f82d14 | 1943 | .pm = &at_dma_dev_pm_ops, |
c5115953 | 1944 | .of_match_table = of_match_ptr(atmel_dma_dt_ids), |
dc78baa2 NF |
1945 | }, |
1946 | }; | |
1947 | ||
1948 | static int __init at_dma_init(void) | |
1949 | { | |
1950 | return platform_driver_probe(&at_dma_driver, at_dma_probe); | |
1951 | } | |
93d0bec2 | 1952 | subsys_initcall(at_dma_init); |
dc78baa2 NF |
1953 | |
1954 | static void __exit at_dma_exit(void) | |
1955 | { | |
1956 | platform_driver_unregister(&at_dma_driver); | |
1957 | } | |
1958 | module_exit(at_dma_exit); | |
1959 | ||
1960 | MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); | |
1961 | MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); | |
1962 | MODULE_LICENSE("GPL"); | |
1963 | MODULE_ALIAS("platform:at_hdmac"); |