Commit | Line | Data |
---|---|---|
b3040e40 JB |
1 | /* linux/drivers/dma/pl330.c |
2 | * | |
3 | * Copyright (C) 2010 Samsung Electronics Co. Ltd. | |
4 | * Jaswinder Singh <jassi.brar@samsung.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/io.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/dmaengine.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/amba/bus.h> | |
19 | #include <linux/amba/pl330.h> | |
a2f5203f | 20 | #include <linux/pm_runtime.h> |
1b9bb715 | 21 | #include <linux/scatterlist.h> |
b3040e40 JB |
22 | |
23 | #define NR_DEFAULT_DESC 16 | |
24 | ||
25 | enum desc_status { | |
26 | /* In the DMAC pool */ | |
27 | FREE, | |
28 | /* | |
29 | * Allocted to some channel during prep_xxx | |
30 | * Also may be sitting on the work_list. | |
31 | */ | |
32 | PREP, | |
33 | /* | |
34 | * Sitting on the work_list and already submitted | |
35 | * to the PL330 core. Not more than two descriptors | |
36 | * of a channel can be BUSY at any time. | |
37 | */ | |
38 | BUSY, | |
39 | /* | |
40 | * Sitting on the channel work_list but xfer done | |
41 | * by PL330 core | |
42 | */ | |
43 | DONE, | |
44 | }; | |
45 | ||
46 | struct dma_pl330_chan { | |
47 | /* Schedule desc completion */ | |
48 | struct tasklet_struct task; | |
49 | ||
50 | /* DMA-Engine Channel */ | |
51 | struct dma_chan chan; | |
52 | ||
53 | /* Last completed cookie */ | |
54 | dma_cookie_t completed; | |
55 | ||
56 | /* List of to be xfered descriptors */ | |
57 | struct list_head work_list; | |
58 | ||
59 | /* Pointer to the DMAC that manages this channel, | |
60 | * NULL if the channel is available to be acquired. | |
61 | * As the parent, this DMAC also provides descriptors | |
62 | * to the channel. | |
63 | */ | |
64 | struct dma_pl330_dmac *dmac; | |
65 | ||
66 | /* To protect channel manipulation */ | |
67 | spinlock_t lock; | |
68 | ||
69 | /* Token of a hardware channel thread of PL330 DMAC | |
70 | * NULL if the channel is available to be acquired. | |
71 | */ | |
72 | void *pl330_chid; | |
1b9bb715 BK |
73 | |
74 | /* For D-to-M and M-to-D channels */ | |
75 | int burst_sz; /* the peripheral fifo width */ | |
1d0c1d60 | 76 | int burst_len; /* the number of burst */ |
1b9bb715 | 77 | dma_addr_t fifo_addr; |
42bc9cf4 BK |
78 | |
79 | /* for cyclic capability */ | |
80 | bool cyclic; | |
b3040e40 JB |
81 | }; |
82 | ||
83 | struct dma_pl330_dmac { | |
84 | struct pl330_info pif; | |
85 | ||
86 | /* DMA-Engine Device */ | |
87 | struct dma_device ddma; | |
88 | ||
89 | /* Pool of descriptors available for the DMAC's channels */ | |
90 | struct list_head desc_pool; | |
91 | /* To protect desc_pool manipulation */ | |
92 | spinlock_t pool_lock; | |
93 | ||
94 | /* Peripheral channels connected to this DMAC */ | |
4e0e6109 | 95 | struct dma_pl330_chan *peripherals; /* keep at end */ |
a2f5203f BK |
96 | |
97 | struct clk *clk; | |
b3040e40 JB |
98 | }; |
99 | ||
100 | struct dma_pl330_desc { | |
101 | /* To attach to a queue as child */ | |
102 | struct list_head node; | |
103 | ||
104 | /* Descriptor for the DMA Engine API */ | |
105 | struct dma_async_tx_descriptor txd; | |
106 | ||
107 | /* Xfer for PL330 core */ | |
108 | struct pl330_xfer px; | |
109 | ||
110 | struct pl330_reqcfg rqcfg; | |
111 | struct pl330_req req; | |
112 | ||
113 | enum desc_status status; | |
114 | ||
115 | /* The channel which currently holds this desc */ | |
116 | struct dma_pl330_chan *pchan; | |
117 | }; | |
118 | ||
119 | static inline struct dma_pl330_chan * | |
120 | to_pchan(struct dma_chan *ch) | |
121 | { | |
122 | if (!ch) | |
123 | return NULL; | |
124 | ||
125 | return container_of(ch, struct dma_pl330_chan, chan); | |
126 | } | |
127 | ||
128 | static inline struct dma_pl330_desc * | |
129 | to_desc(struct dma_async_tx_descriptor *tx) | |
130 | { | |
131 | return container_of(tx, struct dma_pl330_desc, txd); | |
132 | } | |
133 | ||
134 | static inline void free_desc_list(struct list_head *list) | |
135 | { | |
136 | struct dma_pl330_dmac *pdmac; | |
137 | struct dma_pl330_desc *desc; | |
138 | struct dma_pl330_chan *pch; | |
139 | unsigned long flags; | |
140 | ||
141 | if (list_empty(list)) | |
142 | return; | |
143 | ||
144 | /* Finish off the work list */ | |
145 | list_for_each_entry(desc, list, node) { | |
146 | dma_async_tx_callback callback; | |
147 | void *param; | |
148 | ||
149 | /* All desc in a list belong to same channel */ | |
150 | pch = desc->pchan; | |
151 | callback = desc->txd.callback; | |
152 | param = desc->txd.callback_param; | |
153 | ||
154 | if (callback) | |
155 | callback(param); | |
156 | ||
157 | desc->pchan = NULL; | |
158 | } | |
159 | ||
160 | pdmac = pch->dmac; | |
161 | ||
162 | spin_lock_irqsave(&pdmac->pool_lock, flags); | |
163 | list_splice_tail_init(list, &pdmac->desc_pool); | |
164 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | |
165 | } | |
166 | ||
42bc9cf4 BK |
167 | static inline void handle_cyclic_desc_list(struct list_head *list) |
168 | { | |
169 | struct dma_pl330_desc *desc; | |
170 | struct dma_pl330_chan *pch; | |
171 | unsigned long flags; | |
172 | ||
173 | if (list_empty(list)) | |
174 | return; | |
175 | ||
176 | list_for_each_entry(desc, list, node) { | |
177 | dma_async_tx_callback callback; | |
178 | ||
179 | /* Change status to reload it */ | |
180 | desc->status = PREP; | |
181 | pch = desc->pchan; | |
182 | callback = desc->txd.callback; | |
183 | if (callback) | |
184 | callback(desc->txd.callback_param); | |
185 | } | |
186 | ||
187 | spin_lock_irqsave(&pch->lock, flags); | |
188 | list_splice_tail_init(list, &pch->work_list); | |
189 | spin_unlock_irqrestore(&pch->lock, flags); | |
190 | } | |
191 | ||
b3040e40 JB |
192 | static inline void fill_queue(struct dma_pl330_chan *pch) |
193 | { | |
194 | struct dma_pl330_desc *desc; | |
195 | int ret; | |
196 | ||
197 | list_for_each_entry(desc, &pch->work_list, node) { | |
198 | ||
199 | /* If already submitted */ | |
200 | if (desc->status == BUSY) | |
201 | break; | |
202 | ||
203 | ret = pl330_submit_req(pch->pl330_chid, | |
204 | &desc->req); | |
205 | if (!ret) { | |
206 | desc->status = BUSY; | |
207 | break; | |
208 | } else if (ret == -EAGAIN) { | |
209 | /* QFull or DMAC Dying */ | |
210 | break; | |
211 | } else { | |
212 | /* Unacceptable request */ | |
213 | desc->status = DONE; | |
214 | dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n", | |
215 | __func__, __LINE__, desc->txd.cookie); | |
216 | tasklet_schedule(&pch->task); | |
217 | } | |
218 | } | |
219 | } | |
220 | ||
221 | static void pl330_tasklet(unsigned long data) | |
222 | { | |
223 | struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; | |
224 | struct dma_pl330_desc *desc, *_dt; | |
225 | unsigned long flags; | |
226 | LIST_HEAD(list); | |
227 | ||
228 | spin_lock_irqsave(&pch->lock, flags); | |
229 | ||
230 | /* Pick up ripe tomatoes */ | |
231 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) | |
232 | if (desc->status == DONE) { | |
233 | pch->completed = desc->txd.cookie; | |
234 | list_move_tail(&desc->node, &list); | |
235 | } | |
236 | ||
237 | /* Try to submit a req imm. next to the last completed cookie */ | |
238 | fill_queue(pch); | |
239 | ||
240 | /* Make sure the PL330 Channel thread is active */ | |
241 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); | |
242 | ||
243 | spin_unlock_irqrestore(&pch->lock, flags); | |
244 | ||
42bc9cf4 BK |
245 | if (pch->cyclic) |
246 | handle_cyclic_desc_list(&list); | |
247 | else | |
248 | free_desc_list(&list); | |
b3040e40 JB |
249 | } |
250 | ||
251 | static void dma_pl330_rqcb(void *token, enum pl330_op_err err) | |
252 | { | |
253 | struct dma_pl330_desc *desc = token; | |
254 | struct dma_pl330_chan *pch = desc->pchan; | |
255 | unsigned long flags; | |
256 | ||
257 | /* If desc aborted */ | |
258 | if (!pch) | |
259 | return; | |
260 | ||
261 | spin_lock_irqsave(&pch->lock, flags); | |
262 | ||
263 | desc->status = DONE; | |
264 | ||
265 | spin_unlock_irqrestore(&pch->lock, flags); | |
266 | ||
267 | tasklet_schedule(&pch->task); | |
268 | } | |
269 | ||
270 | static int pl330_alloc_chan_resources(struct dma_chan *chan) | |
271 | { | |
272 | struct dma_pl330_chan *pch = to_pchan(chan); | |
273 | struct dma_pl330_dmac *pdmac = pch->dmac; | |
274 | unsigned long flags; | |
275 | ||
276 | spin_lock_irqsave(&pch->lock, flags); | |
277 | ||
278 | pch->completed = chan->cookie = 1; | |
42bc9cf4 | 279 | pch->cyclic = false; |
b3040e40 JB |
280 | |
281 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); | |
282 | if (!pch->pl330_chid) { | |
283 | spin_unlock_irqrestore(&pch->lock, flags); | |
284 | return 0; | |
285 | } | |
286 | ||
287 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); | |
288 | ||
289 | spin_unlock_irqrestore(&pch->lock, flags); | |
290 | ||
291 | return 1; | |
292 | } | |
293 | ||
294 | static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) | |
295 | { | |
296 | struct dma_pl330_chan *pch = to_pchan(chan); | |
ae43b886 | 297 | struct dma_pl330_desc *desc, *_dt; |
b3040e40 | 298 | unsigned long flags; |
1d0c1d60 BK |
299 | struct dma_pl330_dmac *pdmac = pch->dmac; |
300 | struct dma_slave_config *slave_config; | |
ae43b886 | 301 | LIST_HEAD(list); |
b3040e40 | 302 | |
1d0c1d60 BK |
303 | switch (cmd) { |
304 | case DMA_TERMINATE_ALL: | |
305 | spin_lock_irqsave(&pch->lock, flags); | |
b3040e40 | 306 | |
1d0c1d60 BK |
307 | /* FLUSH the PL330 Channel thread */ |
308 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); | |
b3040e40 | 309 | |
1d0c1d60 | 310 | /* Mark all desc done */ |
ae43b886 | 311 | list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { |
1d0c1d60 | 312 | desc->status = DONE; |
ae43b886 BK |
313 | pch->completed = desc->txd.cookie; |
314 | list_move_tail(&desc->node, &list); | |
315 | } | |
b3040e40 | 316 | |
ae43b886 | 317 | list_splice_tail_init(&list, &pdmac->desc_pool); |
1d0c1d60 | 318 | spin_unlock_irqrestore(&pch->lock, flags); |
1d0c1d60 BK |
319 | break; |
320 | case DMA_SLAVE_CONFIG: | |
321 | slave_config = (struct dma_slave_config *)arg; | |
322 | ||
323 | if (slave_config->direction == DMA_TO_DEVICE) { | |
324 | if (slave_config->dst_addr) | |
325 | pch->fifo_addr = slave_config->dst_addr; | |
326 | if (slave_config->dst_addr_width) | |
327 | pch->burst_sz = __ffs(slave_config->dst_addr_width); | |
328 | if (slave_config->dst_maxburst) | |
329 | pch->burst_len = slave_config->dst_maxburst; | |
330 | } else if (slave_config->direction == DMA_FROM_DEVICE) { | |
331 | if (slave_config->src_addr) | |
332 | pch->fifo_addr = slave_config->src_addr; | |
333 | if (slave_config->src_addr_width) | |
334 | pch->burst_sz = __ffs(slave_config->src_addr_width); | |
335 | if (slave_config->src_maxburst) | |
336 | pch->burst_len = slave_config->src_maxburst; | |
337 | } | |
338 | break; | |
339 | default: | |
340 | dev_err(pch->dmac->pif.dev, "Not supported command.\n"); | |
341 | return -ENXIO; | |
342 | } | |
b3040e40 JB |
343 | |
344 | return 0; | |
345 | } | |
346 | ||
347 | static void pl330_free_chan_resources(struct dma_chan *chan) | |
348 | { | |
349 | struct dma_pl330_chan *pch = to_pchan(chan); | |
350 | unsigned long flags; | |
351 | ||
352 | spin_lock_irqsave(&pch->lock, flags); | |
353 | ||
354 | tasklet_kill(&pch->task); | |
355 | ||
356 | pl330_release_channel(pch->pl330_chid); | |
357 | pch->pl330_chid = NULL; | |
358 | ||
42bc9cf4 BK |
359 | if (pch->cyclic) |
360 | list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); | |
361 | ||
b3040e40 JB |
362 | spin_unlock_irqrestore(&pch->lock, flags); |
363 | } | |
364 | ||
365 | static enum dma_status | |
366 | pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
367 | struct dma_tx_state *txstate) | |
368 | { | |
369 | struct dma_pl330_chan *pch = to_pchan(chan); | |
370 | dma_cookie_t last_done, last_used; | |
371 | int ret; | |
372 | ||
373 | last_done = pch->completed; | |
374 | last_used = chan->cookie; | |
375 | ||
376 | ret = dma_async_is_complete(cookie, last_done, last_used); | |
377 | ||
378 | dma_set_tx_state(txstate, last_done, last_used, 0); | |
379 | ||
380 | return ret; | |
381 | } | |
382 | ||
383 | static void pl330_issue_pending(struct dma_chan *chan) | |
384 | { | |
385 | pl330_tasklet((unsigned long) to_pchan(chan)); | |
386 | } | |
387 | ||
388 | /* | |
389 | * We returned the last one of the circular list of descriptor(s) | |
390 | * from prep_xxx, so the argument to submit corresponds to the last | |
391 | * descriptor of the list. | |
392 | */ | |
393 | static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | |
394 | { | |
395 | struct dma_pl330_desc *desc, *last = to_desc(tx); | |
396 | struct dma_pl330_chan *pch = to_pchan(tx->chan); | |
397 | dma_cookie_t cookie; | |
398 | unsigned long flags; | |
399 | ||
400 | spin_lock_irqsave(&pch->lock, flags); | |
401 | ||
402 | /* Assign cookies to all nodes */ | |
403 | cookie = tx->chan->cookie; | |
404 | ||
405 | while (!list_empty(&last->node)) { | |
406 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); | |
407 | ||
408 | if (++cookie < 0) | |
409 | cookie = 1; | |
410 | desc->txd.cookie = cookie; | |
411 | ||
412 | list_move_tail(&desc->node, &pch->work_list); | |
413 | } | |
414 | ||
415 | if (++cookie < 0) | |
416 | cookie = 1; | |
417 | last->txd.cookie = cookie; | |
418 | ||
419 | list_add_tail(&last->node, &pch->work_list); | |
420 | ||
421 | tx->chan->cookie = cookie; | |
422 | ||
423 | spin_unlock_irqrestore(&pch->lock, flags); | |
424 | ||
425 | return cookie; | |
426 | } | |
427 | ||
428 | static inline void _init_desc(struct dma_pl330_desc *desc) | |
429 | { | |
430 | desc->pchan = NULL; | |
431 | desc->req.x = &desc->px; | |
432 | desc->req.token = desc; | |
433 | desc->rqcfg.swap = SWAP_NO; | |
434 | desc->rqcfg.privileged = 0; | |
435 | desc->rqcfg.insnaccess = 0; | |
436 | desc->rqcfg.scctl = SCCTRL0; | |
437 | desc->rqcfg.dcctl = DCCTRL0; | |
438 | desc->req.cfg = &desc->rqcfg; | |
439 | desc->req.xfer_cb = dma_pl330_rqcb; | |
440 | desc->txd.tx_submit = pl330_tx_submit; | |
441 | ||
442 | INIT_LIST_HEAD(&desc->node); | |
443 | } | |
444 | ||
445 | /* Returns the number of descriptors added to the DMAC pool */ | |
446 | int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) | |
447 | { | |
448 | struct dma_pl330_desc *desc; | |
449 | unsigned long flags; | |
450 | int i; | |
451 | ||
452 | if (!pdmac) | |
453 | return 0; | |
454 | ||
455 | desc = kmalloc(count * sizeof(*desc), flg); | |
456 | if (!desc) | |
457 | return 0; | |
458 | ||
459 | spin_lock_irqsave(&pdmac->pool_lock, flags); | |
460 | ||
461 | for (i = 0; i < count; i++) { | |
462 | _init_desc(&desc[i]); | |
463 | list_add_tail(&desc[i].node, &pdmac->desc_pool); | |
464 | } | |
465 | ||
466 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | |
467 | ||
468 | return count; | |
469 | } | |
470 | ||
471 | static struct dma_pl330_desc * | |
472 | pluck_desc(struct dma_pl330_dmac *pdmac) | |
473 | { | |
474 | struct dma_pl330_desc *desc = NULL; | |
475 | unsigned long flags; | |
476 | ||
477 | if (!pdmac) | |
478 | return NULL; | |
479 | ||
480 | spin_lock_irqsave(&pdmac->pool_lock, flags); | |
481 | ||
482 | if (!list_empty(&pdmac->desc_pool)) { | |
483 | desc = list_entry(pdmac->desc_pool.next, | |
484 | struct dma_pl330_desc, node); | |
485 | ||
486 | list_del_init(&desc->node); | |
487 | ||
488 | desc->status = PREP; | |
489 | desc->txd.callback = NULL; | |
490 | } | |
491 | ||
492 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | |
493 | ||
494 | return desc; | |
495 | } | |
496 | ||
497 | static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | |
498 | { | |
499 | struct dma_pl330_dmac *pdmac = pch->dmac; | |
500 | struct dma_pl330_peri *peri = pch->chan.private; | |
501 | struct dma_pl330_desc *desc; | |
502 | ||
503 | /* Pluck one desc from the pool of DMAC */ | |
504 | desc = pluck_desc(pdmac); | |
505 | ||
506 | /* If the DMAC pool is empty, alloc new */ | |
507 | if (!desc) { | |
508 | if (!add_desc(pdmac, GFP_ATOMIC, 1)) | |
509 | return NULL; | |
510 | ||
511 | /* Try again */ | |
512 | desc = pluck_desc(pdmac); | |
513 | if (!desc) { | |
514 | dev_err(pch->dmac->pif.dev, | |
515 | "%s:%d ALERT!\n", __func__, __LINE__); | |
516 | return NULL; | |
517 | } | |
518 | } | |
519 | ||
520 | /* Initialize the descriptor */ | |
521 | desc->pchan = pch; | |
522 | desc->txd.cookie = 0; | |
523 | async_tx_ack(&desc->txd); | |
524 | ||
4e0e6109 RH |
525 | if (peri) { |
526 | desc->req.rqtype = peri->rqtype; | |
1b9bb715 | 527 | desc->req.peri = pch->chan.chan_id; |
4e0e6109 RH |
528 | } else { |
529 | desc->req.rqtype = MEMTOMEM; | |
530 | desc->req.peri = 0; | |
531 | } | |
b3040e40 JB |
532 | |
533 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); | |
534 | ||
535 | return desc; | |
536 | } | |
537 | ||
538 | static inline void fill_px(struct pl330_xfer *px, | |
539 | dma_addr_t dst, dma_addr_t src, size_t len) | |
540 | { | |
541 | px->next = NULL; | |
542 | px->bytes = len; | |
543 | px->dst_addr = dst; | |
544 | px->src_addr = src; | |
545 | } | |
546 | ||
547 | static struct dma_pl330_desc * | |
548 | __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, | |
549 | dma_addr_t src, size_t len) | |
550 | { | |
551 | struct dma_pl330_desc *desc = pl330_get_desc(pch); | |
552 | ||
553 | if (!desc) { | |
554 | dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", | |
555 | __func__, __LINE__); | |
556 | return NULL; | |
557 | } | |
558 | ||
559 | /* | |
560 | * Ideally we should lookout for reqs bigger than | |
561 | * those that can be programmed with 256 bytes of | |
562 | * MC buffer, but considering a req size is seldom | |
563 | * going to be word-unaligned and more than 200MB, | |
564 | * we take it easy. | |
565 | * Also, should the limit is reached we'd rather | |
566 | * have the platform increase MC buffer size than | |
567 | * complicating this API driver. | |
568 | */ | |
569 | fill_px(&desc->px, dst, src, len); | |
570 | ||
571 | return desc; | |
572 | } | |
573 | ||
574 | /* Call after fixing burst size */ | |
575 | static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) | |
576 | { | |
577 | struct dma_pl330_chan *pch = desc->pchan; | |
578 | struct pl330_info *pi = &pch->dmac->pif; | |
579 | int burst_len; | |
580 | ||
581 | burst_len = pi->pcfg.data_bus_width / 8; | |
582 | burst_len *= pi->pcfg.data_buf_dep; | |
583 | burst_len >>= desc->rqcfg.brst_size; | |
584 | ||
585 | /* src/dst_burst_len can't be more than 16 */ | |
586 | if (burst_len > 16) | |
587 | burst_len = 16; | |
588 | ||
589 | while (burst_len > 1) { | |
590 | if (!(len % (burst_len << desc->rqcfg.brst_size))) | |
591 | break; | |
592 | burst_len--; | |
593 | } | |
594 | ||
595 | return burst_len; | |
596 | } | |
597 | ||
42bc9cf4 BK |
598 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( |
599 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, | |
600 | size_t period_len, enum dma_data_direction direction) | |
601 | { | |
602 | struct dma_pl330_desc *desc; | |
603 | struct dma_pl330_chan *pch = to_pchan(chan); | |
604 | dma_addr_t dst; | |
605 | dma_addr_t src; | |
606 | ||
607 | desc = pl330_get_desc(pch); | |
608 | if (!desc) { | |
609 | dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", | |
610 | __func__, __LINE__); | |
611 | return NULL; | |
612 | } | |
613 | ||
614 | switch (direction) { | |
615 | case DMA_TO_DEVICE: | |
616 | desc->rqcfg.src_inc = 1; | |
617 | desc->rqcfg.dst_inc = 0; | |
618 | src = dma_addr; | |
619 | dst = pch->fifo_addr; | |
620 | break; | |
621 | case DMA_FROM_DEVICE: | |
622 | desc->rqcfg.src_inc = 0; | |
623 | desc->rqcfg.dst_inc = 1; | |
624 | src = pch->fifo_addr; | |
625 | dst = dma_addr; | |
626 | break; | |
627 | default: | |
628 | dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", | |
629 | __func__, __LINE__); | |
630 | return NULL; | |
631 | } | |
632 | ||
633 | desc->rqcfg.brst_size = pch->burst_sz; | |
634 | desc->rqcfg.brst_len = 1; | |
635 | ||
636 | pch->cyclic = true; | |
637 | ||
638 | fill_px(&desc->px, dst, src, period_len); | |
639 | ||
640 | return &desc->txd; | |
641 | } | |
642 | ||
b3040e40 JB |
643 | static struct dma_async_tx_descriptor * |
644 | pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |
645 | dma_addr_t src, size_t len, unsigned long flags) | |
646 | { | |
647 | struct dma_pl330_desc *desc; | |
648 | struct dma_pl330_chan *pch = to_pchan(chan); | |
649 | struct dma_pl330_peri *peri = chan->private; | |
650 | struct pl330_info *pi; | |
651 | int burst; | |
652 | ||
4e0e6109 | 653 | if (unlikely(!pch || !len)) |
b3040e40 JB |
654 | return NULL; |
655 | ||
4e0e6109 | 656 | if (peri && peri->rqtype != MEMTOMEM) |
b3040e40 JB |
657 | return NULL; |
658 | ||
659 | pi = &pch->dmac->pif; | |
660 | ||
661 | desc = __pl330_prep_dma_memcpy(pch, dst, src, len); | |
662 | if (!desc) | |
663 | return NULL; | |
664 | ||
665 | desc->rqcfg.src_inc = 1; | |
666 | desc->rqcfg.dst_inc = 1; | |
667 | ||
668 | /* Select max possible burst size */ | |
669 | burst = pi->pcfg.data_bus_width / 8; | |
670 | ||
671 | while (burst > 1) { | |
672 | if (!(len % burst)) | |
673 | break; | |
674 | burst /= 2; | |
675 | } | |
676 | ||
677 | desc->rqcfg.brst_size = 0; | |
678 | while (burst != (1 << desc->rqcfg.brst_size)) | |
679 | desc->rqcfg.brst_size++; | |
680 | ||
681 | desc->rqcfg.brst_len = get_burst_len(desc, len); | |
682 | ||
683 | desc->txd.flags = flags; | |
684 | ||
685 | return &desc->txd; | |
686 | } | |
687 | ||
688 | static struct dma_async_tx_descriptor * | |
689 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
690 | unsigned int sg_len, enum dma_data_direction direction, | |
691 | unsigned long flg) | |
692 | { | |
693 | struct dma_pl330_desc *first, *desc = NULL; | |
694 | struct dma_pl330_chan *pch = to_pchan(chan); | |
695 | struct dma_pl330_peri *peri = chan->private; | |
696 | struct scatterlist *sg; | |
697 | unsigned long flags; | |
1b9bb715 | 698 | int i; |
b3040e40 JB |
699 | dma_addr_t addr; |
700 | ||
4e0e6109 | 701 | if (unlikely(!pch || !sgl || !sg_len || !peri)) |
b3040e40 JB |
702 | return NULL; |
703 | ||
704 | /* Make sure the direction is consistent */ | |
705 | if ((direction == DMA_TO_DEVICE && | |
706 | peri->rqtype != MEMTODEV) || | |
707 | (direction == DMA_FROM_DEVICE && | |
708 | peri->rqtype != DEVTOMEM)) { | |
709 | dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n", | |
710 | __func__, __LINE__); | |
711 | return NULL; | |
712 | } | |
713 | ||
1b9bb715 | 714 | addr = pch->fifo_addr; |
b3040e40 JB |
715 | |
716 | first = NULL; | |
717 | ||
718 | for_each_sg(sgl, sg, sg_len, i) { | |
719 | ||
720 | desc = pl330_get_desc(pch); | |
721 | if (!desc) { | |
722 | struct dma_pl330_dmac *pdmac = pch->dmac; | |
723 | ||
724 | dev_err(pch->dmac->pif.dev, | |
725 | "%s:%d Unable to fetch desc\n", | |
726 | __func__, __LINE__); | |
727 | if (!first) | |
728 | return NULL; | |
729 | ||
730 | spin_lock_irqsave(&pdmac->pool_lock, flags); | |
731 | ||
732 | while (!list_empty(&first->node)) { | |
733 | desc = list_entry(first->node.next, | |
734 | struct dma_pl330_desc, node); | |
735 | list_move_tail(&desc->node, &pdmac->desc_pool); | |
736 | } | |
737 | ||
738 | list_move_tail(&first->node, &pdmac->desc_pool); | |
739 | ||
740 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | |
741 | ||
742 | return NULL; | |
743 | } | |
744 | ||
745 | if (!first) | |
746 | first = desc; | |
747 | else | |
748 | list_add_tail(&desc->node, &first->node); | |
749 | ||
750 | if (direction == DMA_TO_DEVICE) { | |
751 | desc->rqcfg.src_inc = 1; | |
752 | desc->rqcfg.dst_inc = 0; | |
753 | fill_px(&desc->px, | |
754 | addr, sg_dma_address(sg), sg_dma_len(sg)); | |
755 | } else { | |
756 | desc->rqcfg.src_inc = 0; | |
757 | desc->rqcfg.dst_inc = 1; | |
758 | fill_px(&desc->px, | |
759 | sg_dma_address(sg), addr, sg_dma_len(sg)); | |
760 | } | |
761 | ||
1b9bb715 | 762 | desc->rqcfg.brst_size = pch->burst_sz; |
b3040e40 JB |
763 | desc->rqcfg.brst_len = 1; |
764 | } | |
765 | ||
766 | /* Return the last desc in the chain */ | |
767 | desc->txd.flags = flg; | |
768 | return &desc->txd; | |
769 | } | |
770 | ||
771 | static irqreturn_t pl330_irq_handler(int irq, void *data) | |
772 | { | |
773 | if (pl330_update(data)) | |
774 | return IRQ_HANDLED; | |
775 | else | |
776 | return IRQ_NONE; | |
777 | } | |
778 | ||
779 | static int __devinit | |
aa25afad | 780 | pl330_probe(struct amba_device *adev, const struct amba_id *id) |
b3040e40 JB |
781 | { |
782 | struct dma_pl330_platdata *pdat; | |
783 | struct dma_pl330_dmac *pdmac; | |
784 | struct dma_pl330_chan *pch; | |
785 | struct pl330_info *pi; | |
786 | struct dma_device *pd; | |
787 | struct resource *res; | |
788 | int i, ret, irq; | |
4e0e6109 | 789 | int num_chan; |
b3040e40 JB |
790 | |
791 | pdat = adev->dev.platform_data; | |
792 | ||
b3040e40 | 793 | /* Allocate a new DMAC and its Channels */ |
4e0e6109 | 794 | pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL); |
b3040e40 JB |
795 | if (!pdmac) { |
796 | dev_err(&adev->dev, "unable to allocate mem\n"); | |
797 | return -ENOMEM; | |
798 | } | |
799 | ||
800 | pi = &pdmac->pif; | |
801 | pi->dev = &adev->dev; | |
802 | pi->pl330_data = NULL; | |
4e0e6109 | 803 | pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; |
b3040e40 JB |
804 | |
805 | res = &adev->res; | |
806 | request_mem_region(res->start, resource_size(res), "dma-pl330"); | |
807 | ||
808 | pi->base = ioremap(res->start, resource_size(res)); | |
809 | if (!pi->base) { | |
810 | ret = -ENXIO; | |
811 | goto probe_err1; | |
812 | } | |
813 | ||
a2f5203f BK |
814 | pdmac->clk = clk_get(&adev->dev, "dma"); |
815 | if (IS_ERR(pdmac->clk)) { | |
816 | dev_err(&adev->dev, "Cannot get operation clock.\n"); | |
817 | ret = -EINVAL; | |
818 | goto probe_err1; | |
819 | } | |
820 | ||
821 | amba_set_drvdata(adev, pdmac); | |
822 | ||
823 | #ifdef CONFIG_PM_RUNTIME | |
824 | /* to use the runtime PM helper functions */ | |
825 | pm_runtime_enable(&adev->dev); | |
826 | ||
827 | /* enable the power domain */ | |
828 | if (pm_runtime_get_sync(&adev->dev)) { | |
829 | dev_err(&adev->dev, "failed to get runtime pm\n"); | |
830 | ret = -ENODEV; | |
831 | goto probe_err1; | |
832 | } | |
833 | #else | |
834 | /* enable dma clk */ | |
835 | clk_enable(pdmac->clk); | |
836 | #endif | |
837 | ||
b3040e40 JB |
838 | irq = adev->irq[0]; |
839 | ret = request_irq(irq, pl330_irq_handler, 0, | |
840 | dev_name(&adev->dev), pi); | |
841 | if (ret) | |
842 | goto probe_err2; | |
843 | ||
844 | ret = pl330_add(pi); | |
845 | if (ret) | |
846 | goto probe_err3; | |
847 | ||
848 | INIT_LIST_HEAD(&pdmac->desc_pool); | |
849 | spin_lock_init(&pdmac->pool_lock); | |
850 | ||
851 | /* Create a descriptor pool of default size */ | |
852 | if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC)) | |
853 | dev_warn(&adev->dev, "unable to allocate desc\n"); | |
854 | ||
855 | pd = &pdmac->ddma; | |
856 | INIT_LIST_HEAD(&pd->channels); | |
857 | ||
858 | /* Initialize channel parameters */ | |
4e0e6109 RH |
859 | num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan); |
860 | pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); | |
b3040e40 | 861 | |
4e0e6109 RH |
862 | for (i = 0; i < num_chan; i++) { |
863 | pch = &pdmac->peripherals[i]; | |
864 | if (pdat) { | |
865 | struct dma_pl330_peri *peri = &pdat->peri[i]; | |
866 | ||
867 | switch (peri->rqtype) { | |
868 | case MEMTOMEM: | |
869 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | |
870 | break; | |
871 | case MEMTODEV: | |
872 | case DEVTOMEM: | |
873 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | |
42bc9cf4 | 874 | dma_cap_set(DMA_CYCLIC, pd->cap_mask); |
4e0e6109 RH |
875 | break; |
876 | default: | |
877 | dev_err(&adev->dev, "DEVTODEV Not Supported\n"); | |
878 | continue; | |
879 | } | |
880 | pch->chan.private = peri; | |
881 | } else { | |
b3040e40 | 882 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); |
4e0e6109 | 883 | pch->chan.private = NULL; |
b3040e40 JB |
884 | } |
885 | ||
886 | INIT_LIST_HEAD(&pch->work_list); | |
887 | spin_lock_init(&pch->lock); | |
888 | pch->pl330_chid = NULL; | |
b3040e40 JB |
889 | pch->chan.device = pd; |
890 | pch->chan.chan_id = i; | |
891 | pch->dmac = pdmac; | |
892 | ||
893 | /* Add the channel to the DMAC list */ | |
894 | pd->chancnt++; | |
895 | list_add_tail(&pch->chan.device_node, &pd->channels); | |
896 | } | |
897 | ||
898 | pd->dev = &adev->dev; | |
899 | ||
900 | pd->device_alloc_chan_resources = pl330_alloc_chan_resources; | |
901 | pd->device_free_chan_resources = pl330_free_chan_resources; | |
902 | pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; | |
42bc9cf4 | 903 | pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; |
b3040e40 JB |
904 | pd->device_tx_status = pl330_tx_status; |
905 | pd->device_prep_slave_sg = pl330_prep_slave_sg; | |
906 | pd->device_control = pl330_control; | |
907 | pd->device_issue_pending = pl330_issue_pending; | |
908 | ||
909 | ret = dma_async_device_register(pd); | |
910 | if (ret) { | |
911 | dev_err(&adev->dev, "unable to register DMAC\n"); | |
912 | goto probe_err4; | |
913 | } | |
914 | ||
b3040e40 JB |
915 | dev_info(&adev->dev, |
916 | "Loaded driver for PL330 DMAC-%d\n", adev->periphid); | |
917 | dev_info(&adev->dev, | |
918 | "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", | |
919 | pi->pcfg.data_buf_dep, | |
920 | pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, | |
921 | pi->pcfg.num_peri, pi->pcfg.num_events); | |
922 | ||
923 | return 0; | |
924 | ||
925 | probe_err4: | |
926 | pl330_del(pi); | |
927 | probe_err3: | |
928 | free_irq(irq, pi); | |
929 | probe_err2: | |
930 | iounmap(pi->base); | |
931 | probe_err1: | |
932 | release_mem_region(res->start, resource_size(res)); | |
933 | kfree(pdmac); | |
934 | ||
935 | return ret; | |
936 | } | |
937 | ||
938 | static int __devexit pl330_remove(struct amba_device *adev) | |
939 | { | |
940 | struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); | |
941 | struct dma_pl330_chan *pch, *_p; | |
942 | struct pl330_info *pi; | |
943 | struct resource *res; | |
944 | int irq; | |
945 | ||
946 | if (!pdmac) | |
947 | return 0; | |
948 | ||
949 | amba_set_drvdata(adev, NULL); | |
950 | ||
951 | /* Idle the DMAC */ | |
952 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, | |
953 | chan.device_node) { | |
954 | ||
955 | /* Remove the channel */ | |
956 | list_del(&pch->chan.device_node); | |
957 | ||
958 | /* Flush the channel */ | |
959 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); | |
960 | pl330_free_chan_resources(&pch->chan); | |
961 | } | |
962 | ||
963 | pi = &pdmac->pif; | |
964 | ||
965 | pl330_del(pi); | |
966 | ||
967 | irq = adev->irq[0]; | |
968 | free_irq(irq, pi); | |
969 | ||
970 | iounmap(pi->base); | |
971 | ||
972 | res = &adev->res; | |
973 | release_mem_region(res->start, resource_size(res)); | |
974 | ||
a2f5203f BK |
975 | #ifdef CONFIG_PM_RUNTIME |
976 | pm_runtime_put(&adev->dev); | |
977 | pm_runtime_disable(&adev->dev); | |
978 | #else | |
979 | clk_disable(pdmac->clk); | |
980 | #endif | |
981 | ||
b3040e40 JB |
982 | kfree(pdmac); |
983 | ||
984 | return 0; | |
985 | } | |
986 | ||
987 | static struct amba_id pl330_ids[] = { | |
988 | { | |
989 | .id = 0x00041330, | |
990 | .mask = 0x000fffff, | |
991 | }, | |
992 | { 0, 0 }, | |
993 | }; | |
994 | ||
a2f5203f BK |
995 | #ifdef CONFIG_PM_RUNTIME |
996 | static int pl330_runtime_suspend(struct device *dev) | |
997 | { | |
998 | struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); | |
999 | ||
1000 | if (!pdmac) { | |
1001 | dev_err(dev, "failed to get dmac\n"); | |
1002 | return -ENODEV; | |
1003 | } | |
1004 | ||
1005 | clk_disable(pdmac->clk); | |
1006 | ||
1007 | return 0; | |
1008 | } | |
1009 | ||
1010 | static int pl330_runtime_resume(struct device *dev) | |
1011 | { | |
1012 | struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); | |
1013 | ||
1014 | if (!pdmac) { | |
1015 | dev_err(dev, "failed to get dmac\n"); | |
1016 | return -ENODEV; | |
1017 | } | |
1018 | ||
1019 | clk_enable(pdmac->clk); | |
1020 | ||
1021 | return 0; | |
1022 | } | |
1023 | #else | |
1024 | #define pl330_runtime_suspend NULL | |
1025 | #define pl330_runtime_resume NULL | |
1026 | #endif /* CONFIG_PM_RUNTIME */ | |
1027 | ||
1028 | static const struct dev_pm_ops pl330_pm_ops = { | |
1029 | .runtime_suspend = pl330_runtime_suspend, | |
1030 | .runtime_resume = pl330_runtime_resume, | |
1031 | }; | |
1032 | ||
b3040e40 JB |
1033 | static struct amba_driver pl330_driver = { |
1034 | .drv = { | |
1035 | .owner = THIS_MODULE, | |
1036 | .name = "dma-pl330", | |
a2f5203f | 1037 | .pm = &pl330_pm_ops, |
b3040e40 JB |
1038 | }, |
1039 | .id_table = pl330_ids, | |
1040 | .probe = pl330_probe, | |
1041 | .remove = pl330_remove, | |
1042 | }; | |
1043 | ||
1044 | static int __init pl330_init(void) | |
1045 | { | |
1046 | return amba_driver_register(&pl330_driver); | |
1047 | } | |
1048 | module_init(pl330_init); | |
1049 | ||
1050 | static void __exit pl330_exit(void) | |
1051 | { | |
1052 | amba_driver_unregister(&pl330_driver); | |
1053 | return; | |
1054 | } | |
1055 | module_exit(pl330_exit); | |
1056 | ||
1057 | MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); | |
1058 | MODULE_DESCRIPTION("API Driver for PL330 DMAC"); | |
1059 | MODULE_LICENSE("GPL"); |