Commit | Line | Data |
---|---|---|
de5d4453 RR |
1 | /* |
2 | * timb_dma.c timberdale FPGA DMA driver | |
3 | * Copyright (c) 2010 Intel Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
17 | */ | |
18 | ||
19 | /* Supports: | |
20 | * Timberdale FPGA DMA engine | |
21 | */ | |
22 | ||
23 | #include <linux/dmaengine.h> | |
24 | #include <linux/dma-mapping.h> | |
25 | #include <linux/init.h> | |
26 | #include <linux/interrupt.h> | |
27 | #include <linux/io.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/platform_device.h> | |
30 | ||
31 | #include <linux/timb_dma.h> | |
32 | ||
33 | #define DRIVER_NAME "timb-dma" | |
34 | ||
35 | /* Global DMA registers */ | |
36 | #define TIMBDMA_ACR 0x34 | |
37 | #define TIMBDMA_32BIT_ADDR 0x01 | |
38 | ||
39 | #define TIMBDMA_ISR 0x080000 | |
40 | #define TIMBDMA_IPR 0x080004 | |
41 | #define TIMBDMA_IER 0x080008 | |
42 | ||
43 | /* Channel specific registers */ | |
44 | /* RX instances base addresses are 0x00, 0x40, 0x80 ... | |
45 | * TX instances base addresses are 0x18, 0x58, 0x98 ... | |
46 | */ | |
47 | #define TIMBDMA_INSTANCE_OFFSET 0x40 | |
48 | #define TIMBDMA_INSTANCE_TX_OFFSET 0x18 | |
49 | ||
50 | /* RX registers, relative the instance base */ | |
51 | #define TIMBDMA_OFFS_RX_DHAR 0x00 | |
52 | #define TIMBDMA_OFFS_RX_DLAR 0x04 | |
53 | #define TIMBDMA_OFFS_RX_LR 0x0C | |
54 | #define TIMBDMA_OFFS_RX_BLR 0x10 | |
55 | #define TIMBDMA_OFFS_RX_ER 0x14 | |
56 | #define TIMBDMA_RX_EN 0x01 | |
57 | /* bytes per Row, video specific register | |
58 | * which is placed after the TX registers... | |
59 | */ | |
60 | #define TIMBDMA_OFFS_RX_BPRR 0x30 | |
61 | ||
62 | /* TX registers, relative the instance base */ | |
63 | #define TIMBDMA_OFFS_TX_DHAR 0x00 | |
64 | #define TIMBDMA_OFFS_TX_DLAR 0x04 | |
65 | #define TIMBDMA_OFFS_TX_BLR 0x0C | |
66 | #define TIMBDMA_OFFS_TX_LR 0x14 | |
67 | ||
68 | ||
69 | #define TIMB_DMA_DESC_SIZE 8 | |
70 | ||
71 | struct timb_dma_desc { | |
72 | struct list_head desc_node; | |
73 | struct dma_async_tx_descriptor txd; | |
74 | u8 *desc_list; | |
75 | unsigned int desc_list_len; | |
76 | bool interrupt; | |
77 | }; | |
78 | ||
79 | struct timb_dma_chan { | |
80 | struct dma_chan chan; | |
81 | void __iomem *membase; | |
0f65169b RR |
82 | spinlock_t lock; /* Used to protect data structures, |
83 | especially the lists and descriptors, | |
84 | from races between the tasklet and calls | |
85 | from above */ | |
de5d4453 RR |
86 | dma_cookie_t last_completed_cookie; |
87 | bool ongoing; | |
88 | struct list_head active_list; | |
89 | struct list_head queue; | |
90 | struct list_head free_list; | |
91 | unsigned int bytes_per_line; | |
92 | enum dma_data_direction direction; | |
93 | unsigned int descs; /* Descriptors to allocate */ | |
94 | unsigned int desc_elems; /* number of elems per descriptor */ | |
95 | }; | |
96 | ||
97 | struct timb_dma { | |
98 | struct dma_device dma; | |
99 | void __iomem *membase; | |
100 | struct tasklet_struct tasklet; | |
101 | struct timb_dma_chan channels[0]; | |
102 | }; | |
103 | ||
104 | static struct device *chan2dev(struct dma_chan *chan) | |
105 | { | |
106 | return &chan->dev->device; | |
107 | } | |
108 | static struct device *chan2dmadev(struct dma_chan *chan) | |
109 | { | |
110 | return chan2dev(chan)->parent->parent; | |
111 | } | |
112 | ||
113 | static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan) | |
114 | { | |
115 | int id = td_chan->chan.chan_id; | |
116 | return (struct timb_dma *)((u8 *)td_chan - | |
117 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); | |
118 | } | |
119 | ||
120 | /* Must be called with the spinlock held */ | |
121 | static void __td_enable_chan_irq(struct timb_dma_chan *td_chan) | |
122 | { | |
123 | int id = td_chan->chan.chan_id; | |
124 | struct timb_dma *td = tdchantotd(td_chan); | |
125 | u32 ier; | |
126 | ||
127 | /* enable interrupt for this channel */ | |
128 | ier = ioread32(td->membase + TIMBDMA_IER); | |
129 | ier |= 1 << id; | |
130 | dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id, | |
131 | ier); | |
132 | iowrite32(ier, td->membase + TIMBDMA_IER); | |
133 | } | |
134 | ||
135 | /* Should be called with the spinlock held */ | |
136 | static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) | |
137 | { | |
138 | int id = td_chan->chan.chan_id; | |
139 | struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan - | |
140 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); | |
141 | u32 isr; | |
142 | bool done = false; | |
143 | ||
144 | dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td); | |
145 | ||
146 | isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id); | |
147 | if (isr) { | |
148 | iowrite32(isr, td->membase + TIMBDMA_ISR); | |
149 | done = true; | |
150 | } | |
151 | ||
152 | return done; | |
153 | } | |
154 | ||
155 | static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, | |
156 | bool single) | |
157 | { | |
158 | dma_addr_t addr; | |
159 | int len; | |
160 | ||
161 | addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | | |
162 | dma_desc[4]; | |
163 | ||
164 | len = (dma_desc[3] << 8) | dma_desc[2]; | |
165 | ||
166 | if (single) | |
167 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, | |
168 | td_chan->direction); | |
169 | else | |
170 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, | |
171 | td_chan->direction); | |
172 | } | |
173 | ||
174 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) | |
175 | { | |
176 | struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, | |
177 | struct timb_dma_chan, chan); | |
178 | u8 *descs; | |
179 | ||
180 | for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { | |
181 | __td_unmap_desc(td_chan, descs, single); | |
182 | if (descs[0] & 0x02) | |
183 | break; | |
184 | } | |
185 | } | |
186 | ||
187 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, | |
188 | struct scatterlist *sg, bool last) | |
189 | { | |
190 | if (sg_dma_len(sg) > USHORT_MAX) { | |
191 | dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); | |
192 | return -EINVAL; | |
193 | } | |
194 | ||
195 | /* length must be word aligned */ | |
196 | if (sg_dma_len(sg) % sizeof(u32)) { | |
197 | dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n", | |
198 | sg_dma_len(sg)); | |
199 | return -EINVAL; | |
200 | } | |
201 | ||
202 | dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n", | |
0f65169b | 203 | dma_desc, (void *)sg_dma_address(sg)); |
de5d4453 RR |
204 | |
205 | dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; | |
206 | dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; | |
207 | dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; | |
208 | dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; | |
209 | ||
210 | dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; | |
211 | dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; | |
212 | ||
213 | dma_desc[1] = 0x00; | |
214 | dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */ | |
215 | ||
216 | return 0; | |
217 | } | |
218 | ||
219 | /* Must be called with the spinlock held */ | |
220 | static void __td_start_dma(struct timb_dma_chan *td_chan) | |
221 | { | |
222 | struct timb_dma_desc *td_desc; | |
223 | ||
224 | if (td_chan->ongoing) { | |
225 | dev_err(chan2dev(&td_chan->chan), | |
226 | "Transfer already ongoing\n"); | |
227 | return; | |
228 | } | |
229 | ||
230 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, | |
231 | desc_node); | |
232 | ||
233 | dev_dbg(chan2dev(&td_chan->chan), | |
234 | "td_chan: %p, chan: %d, membase: %p\n", | |
235 | td_chan, td_chan->chan.chan_id, td_chan->membase); | |
236 | ||
237 | if (td_chan->direction == DMA_FROM_DEVICE) { | |
238 | ||
239 | /* descriptor address */ | |
240 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); | |
241 | iowrite32(td_desc->txd.phys, td_chan->membase + | |
242 | TIMBDMA_OFFS_RX_DLAR); | |
243 | /* Bytes per line */ | |
244 | iowrite32(td_chan->bytes_per_line, td_chan->membase + | |
245 | TIMBDMA_OFFS_RX_BPRR); | |
246 | /* enable RX */ | |
247 | iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER); | |
248 | } else { | |
249 | /* address high */ | |
250 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR); | |
251 | iowrite32(td_desc->txd.phys, td_chan->membase + | |
252 | TIMBDMA_OFFS_TX_DLAR); | |
253 | } | |
254 | ||
255 | td_chan->ongoing = true; | |
256 | ||
257 | if (td_desc->interrupt) | |
258 | __td_enable_chan_irq(td_chan); | |
259 | } | |
260 | ||
261 | static void __td_finish(struct timb_dma_chan *td_chan) | |
262 | { | |
263 | dma_async_tx_callback callback; | |
264 | void *param; | |
265 | struct dma_async_tx_descriptor *txd; | |
266 | struct timb_dma_desc *td_desc; | |
267 | ||
268 | /* can happen if the descriptor is canceled */ | |
269 | if (list_empty(&td_chan->active_list)) | |
270 | return; | |
271 | ||
272 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, | |
273 | desc_node); | |
274 | txd = &td_desc->txd; | |
275 | ||
276 | dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n", | |
277 | txd->cookie); | |
278 | ||
279 | /* make sure to stop the transfer */ | |
280 | if (td_chan->direction == DMA_FROM_DEVICE) | |
281 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); | |
282 | /* Currently no support for stopping DMA transfers | |
283 | else | |
284 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); | |
285 | */ | |
286 | td_chan->last_completed_cookie = txd->cookie; | |
287 | td_chan->ongoing = false; | |
288 | ||
289 | callback = txd->callback; | |
290 | param = txd->callback_param; | |
291 | ||
292 | list_move(&td_desc->desc_node, &td_chan->free_list); | |
293 | ||
294 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | |
295 | __td_unmap_descs(td_desc, | |
296 | txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); | |
297 | ||
298 | /* | |
299 | * The API requires that no submissions are done from a | |
300 | * callback, so we don't need to drop the lock here | |
301 | */ | |
302 | if (callback) | |
303 | callback(param); | |
304 | } | |
305 | ||
306 | static u32 __td_ier_mask(struct timb_dma *td) | |
307 | { | |
308 | int i; | |
309 | u32 ret = 0; | |
310 | ||
311 | for (i = 0; i < td->dma.chancnt; i++) { | |
312 | struct timb_dma_chan *td_chan = td->channels + i; | |
313 | if (td_chan->ongoing) { | |
314 | struct timb_dma_desc *td_desc = | |
315 | list_entry(td_chan->active_list.next, | |
316 | struct timb_dma_desc, desc_node); | |
317 | if (td_desc->interrupt) | |
318 | ret |= 1 << i; | |
319 | } | |
320 | } | |
321 | ||
322 | return ret; | |
323 | } | |
324 | ||
325 | static void __td_start_next(struct timb_dma_chan *td_chan) | |
326 | { | |
327 | struct timb_dma_desc *td_desc; | |
328 | ||
329 | BUG_ON(list_empty(&td_chan->queue)); | |
330 | BUG_ON(td_chan->ongoing); | |
331 | ||
332 | td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc, | |
333 | desc_node); | |
334 | ||
335 | dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n", | |
336 | __func__, td_desc->txd.cookie); | |
337 | ||
338 | list_move(&td_desc->desc_node, &td_chan->active_list); | |
339 | __td_start_dma(td_chan); | |
340 | } | |
341 | ||
342 | static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) | |
343 | { | |
344 | struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, | |
345 | txd); | |
346 | struct timb_dma_chan *td_chan = container_of(txd->chan, | |
347 | struct timb_dma_chan, chan); | |
348 | dma_cookie_t cookie; | |
349 | ||
350 | spin_lock_bh(&td_chan->lock); | |
351 | ||
352 | cookie = txd->chan->cookie; | |
353 | if (++cookie < 0) | |
354 | cookie = 1; | |
355 | txd->chan->cookie = cookie; | |
356 | txd->cookie = cookie; | |
357 | ||
358 | if (list_empty(&td_chan->active_list)) { | |
359 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, | |
360 | txd->cookie); | |
361 | list_add_tail(&td_desc->desc_node, &td_chan->active_list); | |
362 | __td_start_dma(td_chan); | |
363 | } else { | |
364 | dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", | |
365 | txd->cookie); | |
366 | ||
367 | list_add_tail(&td_desc->desc_node, &td_chan->queue); | |
368 | } | |
369 | ||
370 | spin_unlock_bh(&td_chan->lock); | |
371 | ||
372 | return cookie; | |
373 | } | |
374 | ||
375 | static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) | |
376 | { | |
377 | struct dma_chan *chan = &td_chan->chan; | |
378 | struct timb_dma_desc *td_desc; | |
379 | int err; | |
380 | ||
381 | td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); | |
382 | if (!td_desc) { | |
383 | dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); | |
384 | goto err; | |
385 | } | |
386 | ||
387 | td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; | |
388 | ||
389 | td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); | |
390 | if (!td_desc->desc_list) { | |
391 | dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); | |
392 | goto err; | |
393 | } | |
394 | ||
395 | dma_async_tx_descriptor_init(&td_desc->txd, chan); | |
396 | td_desc->txd.tx_submit = td_tx_submit; | |
397 | td_desc->txd.flags = DMA_CTRL_ACK; | |
398 | ||
399 | td_desc->txd.phys = dma_map_single(chan2dmadev(chan), | |
400 | td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); | |
401 | ||
402 | err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); | |
403 | if (err) { | |
404 | dev_err(chan2dev(chan), "DMA mapping error: %d\n", err); | |
405 | goto err; | |
406 | } | |
407 | ||
408 | return td_desc; | |
409 | err: | |
410 | kfree(td_desc->desc_list); | |
411 | kfree(td_desc); | |
412 | ||
413 | return NULL; | |
414 | ||
415 | } | |
416 | ||
417 | static void td_free_desc(struct timb_dma_desc *td_desc) | |
418 | { | |
419 | dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); | |
420 | dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, | |
421 | td_desc->desc_list_len, DMA_TO_DEVICE); | |
422 | ||
423 | kfree(td_desc->desc_list); | |
424 | kfree(td_desc); | |
425 | } | |
426 | ||
427 | static void td_desc_put(struct timb_dma_chan *td_chan, | |
428 | struct timb_dma_desc *td_desc) | |
429 | { | |
430 | dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc); | |
431 | ||
432 | spin_lock_bh(&td_chan->lock); | |
433 | list_add(&td_desc->desc_node, &td_chan->free_list); | |
434 | spin_unlock_bh(&td_chan->lock); | |
435 | } | |
436 | ||
437 | static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan) | |
438 | { | |
439 | struct timb_dma_desc *td_desc, *_td_desc; | |
440 | struct timb_dma_desc *ret = NULL; | |
441 | ||
442 | spin_lock_bh(&td_chan->lock); | |
443 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list, | |
444 | desc_node) { | |
445 | if (async_tx_test_ack(&td_desc->txd)) { | |
446 | list_del(&td_desc->desc_node); | |
447 | ret = td_desc; | |
448 | break; | |
449 | } | |
450 | dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n", | |
451 | td_desc); | |
452 | } | |
453 | spin_unlock_bh(&td_chan->lock); | |
454 | ||
455 | return ret; | |
456 | } | |
457 | ||
458 | static int td_alloc_chan_resources(struct dma_chan *chan) | |
459 | { | |
460 | struct timb_dma_chan *td_chan = | |
461 | container_of(chan, struct timb_dma_chan, chan); | |
462 | int i; | |
463 | ||
464 | dev_dbg(chan2dev(chan), "%s: entry\n", __func__); | |
465 | ||
466 | BUG_ON(!list_empty(&td_chan->free_list)); | |
467 | for (i = 0; i < td_chan->descs; i++) { | |
468 | struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan); | |
469 | if (!td_desc) { | |
470 | if (i) | |
471 | break; | |
472 | else { | |
473 | dev_err(chan2dev(chan), | |
474 | "Couldnt allocate any descriptors\n"); | |
475 | return -ENOMEM; | |
476 | } | |
477 | } | |
478 | ||
479 | td_desc_put(td_chan, td_desc); | |
480 | } | |
481 | ||
482 | spin_lock_bh(&td_chan->lock); | |
483 | td_chan->last_completed_cookie = 1; | |
484 | chan->cookie = 1; | |
485 | spin_unlock_bh(&td_chan->lock); | |
486 | ||
487 | return 0; | |
488 | } | |
489 | ||
490 | static void td_free_chan_resources(struct dma_chan *chan) | |
491 | { | |
492 | struct timb_dma_chan *td_chan = | |
493 | container_of(chan, struct timb_dma_chan, chan); | |
494 | struct timb_dma_desc *td_desc, *_td_desc; | |
495 | LIST_HEAD(list); | |
496 | ||
497 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | |
498 | ||
499 | /* check that all descriptors are free */ | |
500 | BUG_ON(!list_empty(&td_chan->active_list)); | |
501 | BUG_ON(!list_empty(&td_chan->queue)); | |
502 | ||
503 | spin_lock_bh(&td_chan->lock); | |
504 | list_splice_init(&td_chan->free_list, &list); | |
505 | spin_unlock_bh(&td_chan->lock); | |
506 | ||
507 | list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) { | |
508 | dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__, | |
509 | td_desc); | |
510 | td_free_desc(td_desc); | |
511 | } | |
512 | } | |
513 | ||
514 | static enum dma_status td_is_tx_complete(struct dma_chan *chan, | |
515 | dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) | |
516 | { | |
517 | struct timb_dma_chan *td_chan = | |
518 | container_of(chan, struct timb_dma_chan, chan); | |
519 | dma_cookie_t last_used; | |
520 | dma_cookie_t last_complete; | |
521 | int ret; | |
522 | ||
523 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | |
524 | ||
525 | last_complete = td_chan->last_completed_cookie; | |
526 | last_used = chan->cookie; | |
527 | ||
528 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
529 | ||
530 | if (done) | |
531 | *done = last_complete; | |
532 | if (used) | |
533 | *used = last_used; | |
534 | ||
535 | dev_dbg(chan2dev(chan), | |
536 | "%s: exit, ret: %d, last_complete: %d, last_used: %d\n", | |
537 | __func__, ret, last_complete, last_used); | |
538 | ||
539 | return ret; | |
540 | } | |
541 | ||
542 | static void td_issue_pending(struct dma_chan *chan) | |
543 | { | |
544 | struct timb_dma_chan *td_chan = | |
545 | container_of(chan, struct timb_dma_chan, chan); | |
546 | ||
547 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | |
548 | spin_lock_bh(&td_chan->lock); | |
549 | ||
550 | if (!list_empty(&td_chan->active_list)) | |
551 | /* transfer ongoing */ | |
552 | if (__td_dma_done_ack(td_chan)) | |
553 | __td_finish(td_chan); | |
554 | ||
555 | if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue)) | |
556 | __td_start_next(td_chan); | |
557 | ||
558 | spin_unlock_bh(&td_chan->lock); | |
559 | } | |
560 | ||
561 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | |
562 | struct scatterlist *sgl, unsigned int sg_len, | |
563 | enum dma_data_direction direction, unsigned long flags) | |
564 | { | |
565 | struct timb_dma_chan *td_chan = | |
566 | container_of(chan, struct timb_dma_chan, chan); | |
567 | struct timb_dma_desc *td_desc; | |
568 | struct scatterlist *sg; | |
569 | unsigned int i; | |
570 | unsigned int desc_usage = 0; | |
571 | ||
572 | if (!sgl || !sg_len) { | |
573 | dev_err(chan2dev(chan), "%s: No SG list\n", __func__); | |
574 | return NULL; | |
575 | } | |
576 | ||
577 | /* even channels are for RX, odd for TX */ | |
578 | if (td_chan->direction != direction) { | |
579 | dev_err(chan2dev(chan), | |
580 | "Requesting channel in wrong direction\n"); | |
581 | return NULL; | |
582 | } | |
583 | ||
584 | td_desc = td_desc_get(td_chan); | |
585 | if (!td_desc) { | |
586 | dev_err(chan2dev(chan), "Not enough descriptors available\n"); | |
587 | return NULL; | |
588 | } | |
589 | ||
590 | td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; | |
591 | ||
592 | for_each_sg(sgl, sg, sg_len, i) { | |
593 | int err; | |
594 | if (desc_usage > td_desc->desc_list_len) { | |
595 | dev_err(chan2dev(chan), "No descriptor space\n"); | |
596 | return NULL; | |
597 | } | |
598 | ||
599 | err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, | |
600 | i == (sg_len - 1)); | |
601 | if (err) { | |
602 | dev_err(chan2dev(chan), "Failed to update desc: %d\n", | |
603 | err); | |
604 | td_desc_put(td_chan, td_desc); | |
605 | return NULL; | |
606 | } | |
607 | desc_usage += TIMB_DMA_DESC_SIZE; | |
608 | } | |
609 | ||
610 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, | |
611 | td_desc->desc_list_len, DMA_TO_DEVICE); | |
612 | ||
613 | return &td_desc->txd; | |
614 | } | |
615 | ||
c3635c78 | 616 | static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) |
de5d4453 RR |
617 | { |
618 | struct timb_dma_chan *td_chan = | |
619 | container_of(chan, struct timb_dma_chan, chan); | |
620 | struct timb_dma_desc *td_desc, *_td_desc; | |
621 | ||
622 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | |
623 | ||
c3635c78 LW |
624 | if (cmd != DMA_TERMINATE_ALL) |
625 | return -ENXIO; | |
626 | ||
de5d4453 RR |
627 | /* first the easy part, put the queue into the free list */ |
628 | spin_lock_bh(&td_chan->lock); | |
629 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, | |
630 | desc_node) | |
631 | list_move(&td_desc->desc_node, &td_chan->free_list); | |
632 | ||
633 | /* now tear down the runnning */ | |
634 | __td_finish(td_chan); | |
635 | spin_unlock_bh(&td_chan->lock); | |
c3635c78 LW |
636 | |
637 | return 0; | |
de5d4453 RR |
638 | } |
639 | ||
640 | static void td_tasklet(unsigned long data) | |
641 | { | |
642 | struct timb_dma *td = (struct timb_dma *)data; | |
643 | u32 isr; | |
644 | u32 ipr; | |
645 | u32 ier; | |
646 | int i; | |
647 | ||
648 | isr = ioread32(td->membase + TIMBDMA_ISR); | |
649 | ipr = isr & __td_ier_mask(td); | |
650 | ||
651 | /* ack the interrupts */ | |
652 | iowrite32(ipr, td->membase + TIMBDMA_ISR); | |
653 | ||
654 | for (i = 0; i < td->dma.chancnt; i++) | |
655 | if (ipr & (1 << i)) { | |
656 | struct timb_dma_chan *td_chan = td->channels + i; | |
657 | spin_lock(&td_chan->lock); | |
658 | __td_finish(td_chan); | |
659 | if (!list_empty(&td_chan->queue)) | |
660 | __td_start_next(td_chan); | |
661 | spin_unlock(&td_chan->lock); | |
662 | } | |
663 | ||
664 | ier = __td_ier_mask(td); | |
665 | iowrite32(ier, td->membase + TIMBDMA_IER); | |
666 | } | |
667 | ||
668 | ||
669 | static irqreturn_t td_irq(int irq, void *devid) | |
670 | { | |
671 | struct timb_dma *td = devid; | |
672 | u32 ipr = ioread32(td->membase + TIMBDMA_IPR); | |
673 | ||
674 | if (ipr) { | |
675 | /* disable interrupts, will be re-enabled in tasklet */ | |
676 | iowrite32(0, td->membase + TIMBDMA_IER); | |
677 | ||
678 | tasklet_schedule(&td->tasklet); | |
679 | ||
680 | return IRQ_HANDLED; | |
681 | } else | |
682 | return IRQ_NONE; | |
683 | } | |
684 | ||
685 | ||
686 | static int __devinit td_probe(struct platform_device *pdev) | |
687 | { | |
688 | struct timb_dma_platform_data *pdata = pdev->dev.platform_data; | |
689 | struct timb_dma *td; | |
690 | struct resource *iomem; | |
691 | int irq; | |
692 | int err; | |
693 | int i; | |
694 | ||
695 | if (!pdata) { | |
696 | dev_err(&pdev->dev, "No platform data\n"); | |
697 | return -EINVAL; | |
698 | } | |
699 | ||
700 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
701 | if (!iomem) | |
702 | return -EINVAL; | |
703 | ||
704 | irq = platform_get_irq(pdev, 0); | |
705 | if (irq < 0) | |
706 | return irq; | |
707 | ||
708 | if (!request_mem_region(iomem->start, resource_size(iomem), | |
709 | DRIVER_NAME)) | |
710 | return -EBUSY; | |
711 | ||
712 | td = kzalloc(sizeof(struct timb_dma) + | |
713 | sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); | |
714 | if (!td) { | |
715 | err = -ENOMEM; | |
716 | goto err_release_region; | |
717 | } | |
718 | ||
719 | dev_dbg(&pdev->dev, "Allocated TD: %p\n", td); | |
720 | ||
721 | td->membase = ioremap(iomem->start, resource_size(iomem)); | |
722 | if (!td->membase) { | |
723 | dev_err(&pdev->dev, "Failed to remap I/O memory\n"); | |
724 | err = -ENOMEM; | |
725 | goto err_free_mem; | |
726 | } | |
727 | ||
728 | /* 32bit addressing */ | |
729 | iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR); | |
730 | ||
731 | /* disable and clear any interrupts */ | |
732 | iowrite32(0x0, td->membase + TIMBDMA_IER); | |
733 | iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR); | |
734 | ||
735 | tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td); | |
736 | ||
737 | err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td); | |
738 | if (err) { | |
739 | dev_err(&pdev->dev, "Failed to request IRQ\n"); | |
740 | goto err_tasklet_kill; | |
741 | } | |
742 | ||
743 | td->dma.device_alloc_chan_resources = td_alloc_chan_resources; | |
744 | td->dma.device_free_chan_resources = td_free_chan_resources; | |
745 | td->dma.device_is_tx_complete = td_is_tx_complete; | |
746 | td->dma.device_issue_pending = td_issue_pending; | |
747 | ||
748 | dma_cap_set(DMA_SLAVE, td->dma.cap_mask); | |
749 | dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); | |
750 | td->dma.device_prep_slave_sg = td_prep_slave_sg; | |
c3635c78 | 751 | td->dma.device_control = td_control; |
de5d4453 RR |
752 | |
753 | td->dma.dev = &pdev->dev; | |
754 | ||
755 | INIT_LIST_HEAD(&td->dma.channels); | |
756 | ||
757 | for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) { | |
758 | struct timb_dma_chan *td_chan = &td->channels[i]; | |
759 | struct timb_dma_platform_data_channel *pchan = | |
760 | pdata->channels + i; | |
761 | ||
762 | /* even channels are RX, odd are TX */ | |
763 | if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) { | |
764 | dev_err(&pdev->dev, "Wrong channel configuration\n"); | |
765 | err = -EINVAL; | |
766 | goto err_tasklet_kill; | |
767 | } | |
768 | ||
769 | td_chan->chan.device = &td->dma; | |
770 | td_chan->chan.cookie = 1; | |
771 | td_chan->chan.chan_id = i; | |
772 | spin_lock_init(&td_chan->lock); | |
773 | INIT_LIST_HEAD(&td_chan->active_list); | |
774 | INIT_LIST_HEAD(&td_chan->queue); | |
775 | INIT_LIST_HEAD(&td_chan->free_list); | |
776 | ||
777 | td_chan->descs = pchan->descriptors; | |
778 | td_chan->desc_elems = pchan->descriptor_elements; | |
779 | td_chan->bytes_per_line = pchan->bytes_per_line; | |
780 | td_chan->direction = pchan->rx ? DMA_FROM_DEVICE : | |
781 | DMA_TO_DEVICE; | |
782 | ||
783 | td_chan->membase = td->membase + | |
784 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + | |
785 | (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET); | |
786 | ||
787 | dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n", | |
788 | i, td_chan->membase); | |
789 | ||
790 | list_add_tail(&td_chan->chan.device_node, &td->dma.channels); | |
791 | } | |
792 | ||
793 | err = dma_async_device_register(&td->dma); | |
794 | if (err) { | |
795 | dev_err(&pdev->dev, "Failed to register async device\n"); | |
796 | goto err_free_irq; | |
797 | } | |
798 | ||
799 | platform_set_drvdata(pdev, td); | |
800 | ||
801 | dev_dbg(&pdev->dev, "Probe result: %d\n", err); | |
802 | return err; | |
803 | ||
804 | err_free_irq: | |
805 | free_irq(irq, td); | |
806 | err_tasklet_kill: | |
807 | tasklet_kill(&td->tasklet); | |
808 | iounmap(td->membase); | |
809 | err_free_mem: | |
810 | kfree(td); | |
811 | err_release_region: | |
812 | release_mem_region(iomem->start, resource_size(iomem)); | |
813 | ||
814 | return err; | |
815 | ||
816 | } | |
817 | ||
818 | static int __devexit td_remove(struct platform_device *pdev) | |
819 | { | |
820 | struct timb_dma *td = platform_get_drvdata(pdev); | |
821 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
822 | int irq = platform_get_irq(pdev, 0); | |
823 | ||
824 | dma_async_device_unregister(&td->dma); | |
825 | free_irq(irq, td); | |
826 | tasklet_kill(&td->tasklet); | |
827 | iounmap(td->membase); | |
828 | kfree(td); | |
829 | release_mem_region(iomem->start, resource_size(iomem)); | |
830 | ||
831 | platform_set_drvdata(pdev, NULL); | |
832 | ||
833 | dev_dbg(&pdev->dev, "Removed...\n"); | |
834 | return 0; | |
835 | } | |
836 | ||
837 | static struct platform_driver td_driver = { | |
838 | .driver = { | |
839 | .name = DRIVER_NAME, | |
840 | .owner = THIS_MODULE, | |
841 | }, | |
842 | .probe = td_probe, | |
843 | .remove = __exit_p(td_remove), | |
844 | }; | |
845 | ||
846 | static int __init td_init(void) | |
847 | { | |
848 | return platform_driver_register(&td_driver); | |
849 | } | |
850 | module_init(td_init); | |
851 | ||
852 | static void __exit td_exit(void) | |
853 | { | |
854 | platform_driver_unregister(&td_driver); | |
855 | } | |
856 | module_exit(td_exit); | |
857 | ||
858 | MODULE_LICENSE("GPL v2"); | |
859 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); | |
860 | MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>"); | |
861 | MODULE_ALIAS("platform:"DRIVER_NAME); |