Commit | Line | Data |
---|---|---|
de5d4453 RR |
1 | /* |
2 | * timb_dma.c timberdale FPGA DMA driver | |
3 | * Copyright (c) 2010 Intel Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
de5d4453 RR |
13 | */ |
14 | ||
15 | /* Supports: | |
16 | * Timberdale FPGA DMA engine | |
17 | */ | |
18 | ||
19 | #include <linux/dmaengine.h> | |
20 | #include <linux/dma-mapping.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/io.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/platform_device.h> | |
6a3cd3ea | 26 | #include <linux/slab.h> |
de5d4453 RR |
27 | |
28 | #include <linux/timb_dma.h> | |
29 | ||
d2ebfb33 RKAL |
30 | #include "dmaengine.h" |
31 | ||
de5d4453 RR |
32 | #define DRIVER_NAME "timb-dma" |
33 | ||
34 | /* Global DMA registers */ | |
35 | #define TIMBDMA_ACR 0x34 | |
36 | #define TIMBDMA_32BIT_ADDR 0x01 | |
37 | ||
38 | #define TIMBDMA_ISR 0x080000 | |
39 | #define TIMBDMA_IPR 0x080004 | |
40 | #define TIMBDMA_IER 0x080008 | |
41 | ||
42 | /* Channel specific registers */ | |
43 | /* RX instances base addresses are 0x00, 0x40, 0x80 ... | |
44 | * TX instances base addresses are 0x18, 0x58, 0x98 ... | |
45 | */ | |
46 | #define TIMBDMA_INSTANCE_OFFSET 0x40 | |
47 | #define TIMBDMA_INSTANCE_TX_OFFSET 0x18 | |
48 | ||
49 | /* RX registers, relative the instance base */ | |
50 | #define TIMBDMA_OFFS_RX_DHAR 0x00 | |
51 | #define TIMBDMA_OFFS_RX_DLAR 0x04 | |
52 | #define TIMBDMA_OFFS_RX_LR 0x0C | |
53 | #define TIMBDMA_OFFS_RX_BLR 0x10 | |
54 | #define TIMBDMA_OFFS_RX_ER 0x14 | |
55 | #define TIMBDMA_RX_EN 0x01 | |
56 | /* bytes per Row, video specific register | |
57 | * which is placed after the TX registers... | |
58 | */ | |
59 | #define TIMBDMA_OFFS_RX_BPRR 0x30 | |
60 | ||
61 | /* TX registers, relative the instance base */ | |
62 | #define TIMBDMA_OFFS_TX_DHAR 0x00 | |
63 | #define TIMBDMA_OFFS_TX_DLAR 0x04 | |
64 | #define TIMBDMA_OFFS_TX_BLR 0x0C | |
65 | #define TIMBDMA_OFFS_TX_LR 0x14 | |
66 | ||
67 | ||
68 | #define TIMB_DMA_DESC_SIZE 8 | |
69 | ||
70 | struct timb_dma_desc { | |
71 | struct list_head desc_node; | |
72 | struct dma_async_tx_descriptor txd; | |
73 | u8 *desc_list; | |
74 | unsigned int desc_list_len; | |
75 | bool interrupt; | |
76 | }; | |
77 | ||
78 | struct timb_dma_chan { | |
79 | struct dma_chan chan; | |
80 | void __iomem *membase; | |
0f65169b RR |
81 | spinlock_t lock; /* Used to protect data structures, |
82 | especially the lists and descriptors, | |
83 | from races between the tasklet and calls | |
84 | from above */ | |
de5d4453 RR |
85 | bool ongoing; |
86 | struct list_head active_list; | |
87 | struct list_head queue; | |
88 | struct list_head free_list; | |
89 | unsigned int bytes_per_line; | |
db8196df | 90 | enum dma_transfer_direction direction; |
de5d4453 RR |
91 | unsigned int descs; /* Descriptors to allocate */ |
92 | unsigned int desc_elems; /* number of elems per descriptor */ | |
93 | }; | |
94 | ||
95 | struct timb_dma { | |
96 | struct dma_device dma; | |
97 | void __iomem *membase; | |
98 | struct tasklet_struct tasklet; | |
99 | struct timb_dma_chan channels[0]; | |
100 | }; | |
101 | ||
102 | static struct device *chan2dev(struct dma_chan *chan) | |
103 | { | |
104 | return &chan->dev->device; | |
105 | } | |
106 | static struct device *chan2dmadev(struct dma_chan *chan) | |
107 | { | |
108 | return chan2dev(chan)->parent->parent; | |
109 | } | |
110 | ||
111 | static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan) | |
112 | { | |
113 | int id = td_chan->chan.chan_id; | |
114 | return (struct timb_dma *)((u8 *)td_chan - | |
115 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); | |
116 | } | |
117 | ||
118 | /* Must be called with the spinlock held */ | |
119 | static void __td_enable_chan_irq(struct timb_dma_chan *td_chan) | |
120 | { | |
121 | int id = td_chan->chan.chan_id; | |
122 | struct timb_dma *td = tdchantotd(td_chan); | |
123 | u32 ier; | |
124 | ||
125 | /* enable interrupt for this channel */ | |
126 | ier = ioread32(td->membase + TIMBDMA_IER); | |
127 | ier |= 1 << id; | |
128 | dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id, | |
129 | ier); | |
130 | iowrite32(ier, td->membase + TIMBDMA_IER); | |
131 | } | |
132 | ||
133 | /* Should be called with the spinlock held */ | |
134 | static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) | |
135 | { | |
136 | int id = td_chan->chan.chan_id; | |
137 | struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan - | |
138 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); | |
139 | u32 isr; | |
140 | bool done = false; | |
141 | ||
142 | dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td); | |
143 | ||
144 | isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id); | |
145 | if (isr) { | |
146 | iowrite32(isr, td->membase + TIMBDMA_ISR); | |
147 | done = true; | |
148 | } | |
149 | ||
150 | return done; | |
151 | } | |
152 | ||
de5d4453 RR |
153 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, |
154 | struct scatterlist *sg, bool last) | |
155 | { | |
4be929be | 156 | if (sg_dma_len(sg) > USHRT_MAX) { |
de5d4453 RR |
157 | dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); |
158 | return -EINVAL; | |
159 | } | |
160 | ||
161 | /* length must be word aligned */ | |
162 | if (sg_dma_len(sg) % sizeof(u32)) { | |
163 | dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n", | |
164 | sg_dma_len(sg)); | |
165 | return -EINVAL; | |
166 | } | |
167 | ||
efcc2898 DC |
168 | dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n", |
169 | dma_desc, (unsigned long long)sg_dma_address(sg)); | |
de5d4453 RR |
170 | |
171 | dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; | |
172 | dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; | |
173 | dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; | |
174 | dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; | |
175 | ||
176 | dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; | |
177 | dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; | |
178 | ||
179 | dma_desc[1] = 0x00; | |
180 | dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */ | |
181 | ||
182 | return 0; | |
183 | } | |
184 | ||
185 | /* Must be called with the spinlock held */ | |
186 | static void __td_start_dma(struct timb_dma_chan *td_chan) | |
187 | { | |
188 | struct timb_dma_desc *td_desc; | |
189 | ||
190 | if (td_chan->ongoing) { | |
191 | dev_err(chan2dev(&td_chan->chan), | |
192 | "Transfer already ongoing\n"); | |
193 | return; | |
194 | } | |
195 | ||
196 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, | |
197 | desc_node); | |
198 | ||
199 | dev_dbg(chan2dev(&td_chan->chan), | |
200 | "td_chan: %p, chan: %d, membase: %p\n", | |
201 | td_chan, td_chan->chan.chan_id, td_chan->membase); | |
202 | ||
db8196df | 203 | if (td_chan->direction == DMA_DEV_TO_MEM) { |
de5d4453 RR |
204 | |
205 | /* descriptor address */ | |
206 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); | |
207 | iowrite32(td_desc->txd.phys, td_chan->membase + | |
208 | TIMBDMA_OFFS_RX_DLAR); | |
209 | /* Bytes per line */ | |
210 | iowrite32(td_chan->bytes_per_line, td_chan->membase + | |
211 | TIMBDMA_OFFS_RX_BPRR); | |
212 | /* enable RX */ | |
213 | iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER); | |
214 | } else { | |
215 | /* address high */ | |
216 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR); | |
217 | iowrite32(td_desc->txd.phys, td_chan->membase + | |
218 | TIMBDMA_OFFS_TX_DLAR); | |
219 | } | |
220 | ||
221 | td_chan->ongoing = true; | |
222 | ||
223 | if (td_desc->interrupt) | |
224 | __td_enable_chan_irq(td_chan); | |
225 | } | |
226 | ||
227 | static void __td_finish(struct timb_dma_chan *td_chan) | |
228 | { | |
229 | dma_async_tx_callback callback; | |
230 | void *param; | |
231 | struct dma_async_tx_descriptor *txd; | |
232 | struct timb_dma_desc *td_desc; | |
233 | ||
234 | /* can happen if the descriptor is canceled */ | |
235 | if (list_empty(&td_chan->active_list)) | |
236 | return; | |
237 | ||
238 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, | |
239 | desc_node); | |
240 | txd = &td_desc->txd; | |
241 | ||
242 | dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n", | |
243 | txd->cookie); | |
244 | ||
245 | /* make sure to stop the transfer */ | |
db8196df | 246 | if (td_chan->direction == DMA_DEV_TO_MEM) |
de5d4453 RR |
247 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); |
248 | /* Currently no support for stopping DMA transfers | |
249 | else | |
250 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); | |
251 | */ | |
f7fbce07 | 252 | dma_cookie_complete(txd); |
de5d4453 RR |
253 | td_chan->ongoing = false; |
254 | ||
255 | callback = txd->callback; | |
256 | param = txd->callback_param; | |
257 | ||
258 | list_move(&td_desc->desc_node, &td_chan->free_list); | |
259 | ||
d38a8c62 | 260 | dma_descriptor_unmap(txd); |
de5d4453 RR |
261 | /* |
262 | * The API requires that no submissions are done from a | |
263 | * callback, so we don't need to drop the lock here | |
264 | */ | |
265 | if (callback) | |
266 | callback(param); | |
267 | } | |
268 | ||
269 | static u32 __td_ier_mask(struct timb_dma *td) | |
270 | { | |
271 | int i; | |
272 | u32 ret = 0; | |
273 | ||
274 | for (i = 0; i < td->dma.chancnt; i++) { | |
275 | struct timb_dma_chan *td_chan = td->channels + i; | |
276 | if (td_chan->ongoing) { | |
277 | struct timb_dma_desc *td_desc = | |
278 | list_entry(td_chan->active_list.next, | |
279 | struct timb_dma_desc, desc_node); | |
280 | if (td_desc->interrupt) | |
281 | ret |= 1 << i; | |
282 | } | |
283 | } | |
284 | ||
285 | return ret; | |
286 | } | |
287 | ||
288 | static void __td_start_next(struct timb_dma_chan *td_chan) | |
289 | { | |
290 | struct timb_dma_desc *td_desc; | |
291 | ||
292 | BUG_ON(list_empty(&td_chan->queue)); | |
293 | BUG_ON(td_chan->ongoing); | |
294 | ||
295 | td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc, | |
296 | desc_node); | |
297 | ||
298 | dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n", | |
299 | __func__, td_desc->txd.cookie); | |
300 | ||
301 | list_move(&td_desc->desc_node, &td_chan->active_list); | |
302 | __td_start_dma(td_chan); | |
303 | } | |
304 | ||
305 | static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) | |
306 | { | |
307 | struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, | |
308 | txd); | |
309 | struct timb_dma_chan *td_chan = container_of(txd->chan, | |
310 | struct timb_dma_chan, chan); | |
311 | dma_cookie_t cookie; | |
312 | ||
313 | spin_lock_bh(&td_chan->lock); | |
884485e1 | 314 | cookie = dma_cookie_assign(txd); |
de5d4453 RR |
315 | |
316 | if (list_empty(&td_chan->active_list)) { | |
317 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, | |
318 | txd->cookie); | |
319 | list_add_tail(&td_desc->desc_node, &td_chan->active_list); | |
320 | __td_start_dma(td_chan); | |
321 | } else { | |
322 | dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", | |
323 | txd->cookie); | |
324 | ||
325 | list_add_tail(&td_desc->desc_node, &td_chan->queue); | |
326 | } | |
327 | ||
328 | spin_unlock_bh(&td_chan->lock); | |
329 | ||
330 | return cookie; | |
331 | } | |
332 | ||
333 | static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) | |
334 | { | |
335 | struct dma_chan *chan = &td_chan->chan; | |
336 | struct timb_dma_desc *td_desc; | |
337 | int err; | |
338 | ||
339 | td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); | |
340 | if (!td_desc) { | |
341 | dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); | |
48568005 | 342 | goto out; |
de5d4453 RR |
343 | } |
344 | ||
345 | td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; | |
346 | ||
347 | td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); | |
348 | if (!td_desc->desc_list) { | |
349 | dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); | |
350 | goto err; | |
351 | } | |
352 | ||
353 | dma_async_tx_descriptor_init(&td_desc->txd, chan); | |
354 | td_desc->txd.tx_submit = td_tx_submit; | |
355 | td_desc->txd.flags = DMA_CTRL_ACK; | |
356 | ||
357 | td_desc->txd.phys = dma_map_single(chan2dmadev(chan), | |
d5613947 | 358 | td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); |
de5d4453 RR |
359 | |
360 | err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); | |
361 | if (err) { | |
362 | dev_err(chan2dev(chan), "DMA mapping error: %d\n", err); | |
363 | goto err; | |
364 | } | |
365 | ||
366 | return td_desc; | |
367 | err: | |
368 | kfree(td_desc->desc_list); | |
369 | kfree(td_desc); | |
48568005 | 370 | out: |
de5d4453 RR |
371 | return NULL; |
372 | ||
373 | } | |
374 | ||
375 | static void td_free_desc(struct timb_dma_desc *td_desc) | |
376 | { | |
377 | dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); | |
378 | dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, | |
d5613947 | 379 | td_desc->desc_list_len, DMA_TO_DEVICE); |
de5d4453 RR |
380 | |
381 | kfree(td_desc->desc_list); | |
382 | kfree(td_desc); | |
383 | } | |
384 | ||
385 | static void td_desc_put(struct timb_dma_chan *td_chan, | |
386 | struct timb_dma_desc *td_desc) | |
387 | { | |
388 | dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc); | |
389 | ||
390 | spin_lock_bh(&td_chan->lock); | |
391 | list_add(&td_desc->desc_node, &td_chan->free_list); | |
392 | spin_unlock_bh(&td_chan->lock); | |
393 | } | |
394 | ||
395 | static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan) | |
396 | { | |
397 | struct timb_dma_desc *td_desc, *_td_desc; | |
398 | struct timb_dma_desc *ret = NULL; | |
399 | ||
400 | spin_lock_bh(&td_chan->lock); | |
401 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list, | |
402 | desc_node) { | |
403 | if (async_tx_test_ack(&td_desc->txd)) { | |
404 | list_del(&td_desc->desc_node); | |
405 | ret = td_desc; | |
406 | break; | |
407 | } | |
408 | dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n", | |
409 | td_desc); | |
410 | } | |
411 | spin_unlock_bh(&td_chan->lock); | |
412 | ||
413 | return ret; | |
414 | } | |
415 | ||
416 | static int td_alloc_chan_resources(struct dma_chan *chan) | |
417 | { | |
418 | struct timb_dma_chan *td_chan = | |
419 | container_of(chan, struct timb_dma_chan, chan); | |
420 | int i; | |
421 | ||
422 | dev_dbg(chan2dev(chan), "%s: entry\n", __func__); | |
423 | ||
424 | BUG_ON(!list_empty(&td_chan->free_list)); | |
425 | for (i = 0; i < td_chan->descs; i++) { | |
426 | struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan); | |
427 | if (!td_desc) { | |
428 | if (i) | |
429 | break; | |
430 | else { | |
431 | dev_err(chan2dev(chan), | |
432 | "Couldnt allocate any descriptors\n"); | |
433 | return -ENOMEM; | |
434 | } | |
435 | } | |
436 | ||
437 | td_desc_put(td_chan, td_desc); | |
438 | } | |
439 | ||
440 | spin_lock_bh(&td_chan->lock); | |
d3ee98cd | 441 | dma_cookie_init(chan); |
de5d4453 RR |
442 | spin_unlock_bh(&td_chan->lock); |
443 | ||
444 | return 0; | |
445 | } | |
446 | ||
447 | static void td_free_chan_resources(struct dma_chan *chan) | |
448 | { | |
449 | struct timb_dma_chan *td_chan = | |
450 | container_of(chan, struct timb_dma_chan, chan); | |
451 | struct timb_dma_desc *td_desc, *_td_desc; | |
452 | LIST_HEAD(list); | |
453 | ||
454 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | |
455 | ||
456 | /* check that all descriptors are free */ | |
457 | BUG_ON(!list_empty(&td_chan->active_list)); | |
458 | BUG_ON(!list_empty(&td_chan->queue)); | |
459 | ||
460 | spin_lock_bh(&td_chan->lock); | |
461 | list_splice_init(&td_chan->free_list, &list); | |
462 | spin_unlock_bh(&td_chan->lock); | |
463 | ||
464 | list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) { | |
465 | dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__, | |
466 | td_desc); | |
467 | td_free_desc(td_desc); | |
468 | } | |
469 | } | |
470 | ||
07934481 LW |
471 | static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
472 | struct dma_tx_state *txstate) | |
de5d4453 | 473 | { |
96a2af41 | 474 | enum dma_status ret; |
de5d4453 RR |
475 | |
476 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | |
477 | ||
96a2af41 | 478 | ret = dma_cookie_status(chan, cookie, txstate); |
de5d4453 | 479 | |
949ff5b8 | 480 | dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret); |
de5d4453 RR |
481 | |
482 | return ret; | |
483 | } | |
484 | ||
485 | static void td_issue_pending(struct dma_chan *chan) | |
486 | { | |
487 | struct timb_dma_chan *td_chan = | |
488 | container_of(chan, struct timb_dma_chan, chan); | |
489 | ||
490 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | |
491 | spin_lock_bh(&td_chan->lock); | |
492 | ||
493 | if (!list_empty(&td_chan->active_list)) | |
494 | /* transfer ongoing */ | |
495 | if (__td_dma_done_ack(td_chan)) | |
496 | __td_finish(td_chan); | |
497 | ||
498 | if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue)) | |
499 | __td_start_next(td_chan); | |
500 | ||
501 | spin_unlock_bh(&td_chan->lock); | |
502 | } | |
503 | ||
504 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | |
505 | struct scatterlist *sgl, unsigned int sg_len, | |
185ecb5f AB |
506 | enum dma_transfer_direction direction, unsigned long flags, |
507 | void *context) | |
de5d4453 RR |
508 | { |
509 | struct timb_dma_chan *td_chan = | |
510 | container_of(chan, struct timb_dma_chan, chan); | |
511 | struct timb_dma_desc *td_desc; | |
512 | struct scatterlist *sg; | |
513 | unsigned int i; | |
514 | unsigned int desc_usage = 0; | |
515 | ||
516 | if (!sgl || !sg_len) { | |
517 | dev_err(chan2dev(chan), "%s: No SG list\n", __func__); | |
518 | return NULL; | |
519 | } | |
520 | ||
521 | /* even channels are for RX, odd for TX */ | |
522 | if (td_chan->direction != direction) { | |
523 | dev_err(chan2dev(chan), | |
524 | "Requesting channel in wrong direction\n"); | |
525 | return NULL; | |
526 | } | |
527 | ||
528 | td_desc = td_desc_get(td_chan); | |
529 | if (!td_desc) { | |
530 | dev_err(chan2dev(chan), "Not enough descriptors available\n"); | |
531 | return NULL; | |
532 | } | |
533 | ||
534 | td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; | |
535 | ||
536 | for_each_sg(sgl, sg, sg_len, i) { | |
537 | int err; | |
538 | if (desc_usage > td_desc->desc_list_len) { | |
539 | dev_err(chan2dev(chan), "No descriptor space\n"); | |
540 | return NULL; | |
541 | } | |
542 | ||
543 | err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, | |
544 | i == (sg_len - 1)); | |
545 | if (err) { | |
546 | dev_err(chan2dev(chan), "Failed to update desc: %d\n", | |
547 | err); | |
548 | td_desc_put(td_chan, td_desc); | |
549 | return NULL; | |
550 | } | |
551 | desc_usage += TIMB_DMA_DESC_SIZE; | |
552 | } | |
553 | ||
554 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, | |
db8196df | 555 | td_desc->desc_list_len, DMA_MEM_TO_DEV); |
de5d4453 RR |
556 | |
557 | return &td_desc->txd; | |
558 | } | |
559 | ||
2c55536a | 560 | static int td_terminate_all(struct dma_chan *chan) |
de5d4453 RR |
561 | { |
562 | struct timb_dma_chan *td_chan = | |
563 | container_of(chan, struct timb_dma_chan, chan); | |
564 | struct timb_dma_desc *td_desc, *_td_desc; | |
565 | ||
566 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | |
567 | ||
568 | /* first the easy part, put the queue into the free list */ | |
569 | spin_lock_bh(&td_chan->lock); | |
570 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, | |
571 | desc_node) | |
572 | list_move(&td_desc->desc_node, &td_chan->free_list); | |
573 | ||
ae0e47f0 | 574 | /* now tear down the running */ |
de5d4453 RR |
575 | __td_finish(td_chan); |
576 | spin_unlock_bh(&td_chan->lock); | |
c3635c78 LW |
577 | |
578 | return 0; | |
de5d4453 RR |
579 | } |
580 | ||
581 | static void td_tasklet(unsigned long data) | |
582 | { | |
583 | struct timb_dma *td = (struct timb_dma *)data; | |
584 | u32 isr; | |
585 | u32 ipr; | |
586 | u32 ier; | |
587 | int i; | |
588 | ||
589 | isr = ioread32(td->membase + TIMBDMA_ISR); | |
590 | ipr = isr & __td_ier_mask(td); | |
591 | ||
592 | /* ack the interrupts */ | |
593 | iowrite32(ipr, td->membase + TIMBDMA_ISR); | |
594 | ||
595 | for (i = 0; i < td->dma.chancnt; i++) | |
596 | if (ipr & (1 << i)) { | |
597 | struct timb_dma_chan *td_chan = td->channels + i; | |
598 | spin_lock(&td_chan->lock); | |
599 | __td_finish(td_chan); | |
600 | if (!list_empty(&td_chan->queue)) | |
601 | __td_start_next(td_chan); | |
602 | spin_unlock(&td_chan->lock); | |
603 | } | |
604 | ||
605 | ier = __td_ier_mask(td); | |
606 | iowrite32(ier, td->membase + TIMBDMA_IER); | |
607 | } | |
608 | ||
609 | ||
610 | static irqreturn_t td_irq(int irq, void *devid) | |
611 | { | |
612 | struct timb_dma *td = devid; | |
613 | u32 ipr = ioread32(td->membase + TIMBDMA_IPR); | |
614 | ||
615 | if (ipr) { | |
616 | /* disable interrupts, will be re-enabled in tasklet */ | |
617 | iowrite32(0, td->membase + TIMBDMA_IER); | |
618 | ||
619 | tasklet_schedule(&td->tasklet); | |
620 | ||
621 | return IRQ_HANDLED; | |
622 | } else | |
623 | return IRQ_NONE; | |
624 | } | |
625 | ||
626 | ||
463a1f8b | 627 | static int td_probe(struct platform_device *pdev) |
de5d4453 | 628 | { |
d4adcc01 | 629 | struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); |
de5d4453 RR |
630 | struct timb_dma *td; |
631 | struct resource *iomem; | |
632 | int irq; | |
633 | int err; | |
634 | int i; | |
635 | ||
636 | if (!pdata) { | |
637 | dev_err(&pdev->dev, "No platform data\n"); | |
638 | return -EINVAL; | |
639 | } | |
640 | ||
641 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
642 | if (!iomem) | |
643 | return -EINVAL; | |
644 | ||
645 | irq = platform_get_irq(pdev, 0); | |
646 | if (irq < 0) | |
647 | return irq; | |
648 | ||
649 | if (!request_mem_region(iomem->start, resource_size(iomem), | |
650 | DRIVER_NAME)) | |
651 | return -EBUSY; | |
652 | ||
653 | td = kzalloc(sizeof(struct timb_dma) + | |
654 | sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); | |
655 | if (!td) { | |
656 | err = -ENOMEM; | |
657 | goto err_release_region; | |
658 | } | |
659 | ||
660 | dev_dbg(&pdev->dev, "Allocated TD: %p\n", td); | |
661 | ||
662 | td->membase = ioremap(iomem->start, resource_size(iomem)); | |
663 | if (!td->membase) { | |
664 | dev_err(&pdev->dev, "Failed to remap I/O memory\n"); | |
665 | err = -ENOMEM; | |
666 | goto err_free_mem; | |
667 | } | |
668 | ||
669 | /* 32bit addressing */ | |
670 | iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR); | |
671 | ||
672 | /* disable and clear any interrupts */ | |
673 | iowrite32(0x0, td->membase + TIMBDMA_IER); | |
674 | iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR); | |
675 | ||
676 | tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td); | |
677 | ||
678 | err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td); | |
679 | if (err) { | |
680 | dev_err(&pdev->dev, "Failed to request IRQ\n"); | |
681 | goto err_tasklet_kill; | |
682 | } | |
683 | ||
684 | td->dma.device_alloc_chan_resources = td_alloc_chan_resources; | |
685 | td->dma.device_free_chan_resources = td_free_chan_resources; | |
07934481 | 686 | td->dma.device_tx_status = td_tx_status; |
de5d4453 RR |
687 | td->dma.device_issue_pending = td_issue_pending; |
688 | ||
689 | dma_cap_set(DMA_SLAVE, td->dma.cap_mask); | |
690 | dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); | |
691 | td->dma.device_prep_slave_sg = td_prep_slave_sg; | |
2c55536a | 692 | td->dma.device_terminate_all = td_terminate_all; |
de5d4453 RR |
693 | |
694 | td->dma.dev = &pdev->dev; | |
695 | ||
696 | INIT_LIST_HEAD(&td->dma.channels); | |
697 | ||
46389470 | 698 | for (i = 0; i < pdata->nr_channels; i++) { |
de5d4453 RR |
699 | struct timb_dma_chan *td_chan = &td->channels[i]; |
700 | struct timb_dma_platform_data_channel *pchan = | |
701 | pdata->channels + i; | |
702 | ||
703 | /* even channels are RX, odd are TX */ | |
9cb047d4 | 704 | if ((i % 2) == pchan->rx) { |
de5d4453 RR |
705 | dev_err(&pdev->dev, "Wrong channel configuration\n"); |
706 | err = -EINVAL; | |
f80befe0 | 707 | goto err_free_irq; |
de5d4453 RR |
708 | } |
709 | ||
710 | td_chan->chan.device = &td->dma; | |
d3ee98cd | 711 | dma_cookie_init(&td_chan->chan); |
de5d4453 RR |
712 | spin_lock_init(&td_chan->lock); |
713 | INIT_LIST_HEAD(&td_chan->active_list); | |
714 | INIT_LIST_HEAD(&td_chan->queue); | |
715 | INIT_LIST_HEAD(&td_chan->free_list); | |
716 | ||
717 | td_chan->descs = pchan->descriptors; | |
718 | td_chan->desc_elems = pchan->descriptor_elements; | |
719 | td_chan->bytes_per_line = pchan->bytes_per_line; | |
db8196df VK |
720 | td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM : |
721 | DMA_MEM_TO_DEV; | |
de5d4453 RR |
722 | |
723 | td_chan->membase = td->membase + | |
724 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + | |
725 | (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET); | |
726 | ||
727 | dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n", | |
728 | i, td_chan->membase); | |
729 | ||
730 | list_add_tail(&td_chan->chan.device_node, &td->dma.channels); | |
731 | } | |
732 | ||
733 | err = dma_async_device_register(&td->dma); | |
734 | if (err) { | |
735 | dev_err(&pdev->dev, "Failed to register async device\n"); | |
736 | goto err_free_irq; | |
737 | } | |
738 | ||
739 | platform_set_drvdata(pdev, td); | |
740 | ||
741 | dev_dbg(&pdev->dev, "Probe result: %d\n", err); | |
742 | return err; | |
743 | ||
744 | err_free_irq: | |
745 | free_irq(irq, td); | |
746 | err_tasklet_kill: | |
747 | tasklet_kill(&td->tasklet); | |
748 | iounmap(td->membase); | |
749 | err_free_mem: | |
750 | kfree(td); | |
751 | err_release_region: | |
752 | release_mem_region(iomem->start, resource_size(iomem)); | |
753 | ||
754 | return err; | |
755 | ||
756 | } | |
757 | ||
4bf27b8b | 758 | static int td_remove(struct platform_device *pdev) |
de5d4453 RR |
759 | { |
760 | struct timb_dma *td = platform_get_drvdata(pdev); | |
761 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
762 | int irq = platform_get_irq(pdev, 0); | |
763 | ||
764 | dma_async_device_unregister(&td->dma); | |
765 | free_irq(irq, td); | |
766 | tasklet_kill(&td->tasklet); | |
767 | iounmap(td->membase); | |
768 | kfree(td); | |
769 | release_mem_region(iomem->start, resource_size(iomem)); | |
770 | ||
de5d4453 RR |
771 | dev_dbg(&pdev->dev, "Removed...\n"); |
772 | return 0; | |
773 | } | |
774 | ||
775 | static struct platform_driver td_driver = { | |
776 | .driver = { | |
777 | .name = DRIVER_NAME, | |
de5d4453 RR |
778 | }, |
779 | .probe = td_probe, | |
234846d4 | 780 | .remove = td_remove, |
de5d4453 RR |
781 | }; |
782 | ||
c94e9105 | 783 | module_platform_driver(td_driver); |
de5d4453 RR |
784 | |
785 | MODULE_LICENSE("GPL v2"); | |
786 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); | |
787 | MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>"); | |
788 | MODULE_ALIAS("platform:"DRIVER_NAME); |