Commit | Line | Data |
---|---|---|
5689ba7f AB |
1 | /* |
2 | * IMG Multi-threaded DMA Controller (MDC) | |
3 | * | |
4 | * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd. | |
5 | * Copyright (C) 2014 Google, Inc. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms and conditions of the GNU General Public License, | |
9 | * version 2, as published by the Free Software Foundation. | |
10 | */ | |
11 | ||
12 | #include <linux/clk.h> | |
13 | #include <linux/dma-mapping.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/dmapool.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/io.h> | |
18 | #include <linux/irq.h> | |
19 | #include <linux/kernel.h> | |
20 | #include <linux/mfd/syscon.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/of.h> | |
23 | #include <linux/of_device.h> | |
24 | #include <linux/of_dma.h> | |
25 | #include <linux/platform_device.h> | |
26 | #include <linux/regmap.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/spinlock.h> | |
29 | ||
30 | #include "dmaengine.h" | |
31 | #include "virt-dma.h" | |
32 | ||
33 | #define MDC_MAX_DMA_CHANNELS 32 | |
34 | ||
35 | #define MDC_GENERAL_CONFIG 0x000 | |
36 | #define MDC_GENERAL_CONFIG_LIST_IEN BIT(31) | |
37 | #define MDC_GENERAL_CONFIG_IEN BIT(29) | |
38 | #define MDC_GENERAL_CONFIG_LEVEL_INT BIT(28) | |
39 | #define MDC_GENERAL_CONFIG_INC_W BIT(12) | |
40 | #define MDC_GENERAL_CONFIG_INC_R BIT(8) | |
41 | #define MDC_GENERAL_CONFIG_PHYSICAL_W BIT(7) | |
42 | #define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT 4 | |
43 | #define MDC_GENERAL_CONFIG_WIDTH_W_MASK 0x7 | |
44 | #define MDC_GENERAL_CONFIG_PHYSICAL_R BIT(3) | |
45 | #define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT 0 | |
46 | #define MDC_GENERAL_CONFIG_WIDTH_R_MASK 0x7 | |
47 | ||
48 | #define MDC_READ_PORT_CONFIG 0x004 | |
49 | #define MDC_READ_PORT_CONFIG_STHREAD_SHIFT 28 | |
50 | #define MDC_READ_PORT_CONFIG_STHREAD_MASK 0xf | |
51 | #define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT 24 | |
52 | #define MDC_READ_PORT_CONFIG_RTHREAD_MASK 0xf | |
53 | #define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT 16 | |
54 | #define MDC_READ_PORT_CONFIG_WTHREAD_MASK 0xf | |
55 | #define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT 4 | |
56 | #define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK 0xff | |
57 | #define MDC_READ_PORT_CONFIG_DREQ_ENABLE BIT(1) | |
58 | ||
59 | #define MDC_READ_ADDRESS 0x008 | |
60 | ||
61 | #define MDC_WRITE_ADDRESS 0x00c | |
62 | ||
63 | #define MDC_TRANSFER_SIZE 0x010 | |
64 | #define MDC_TRANSFER_SIZE_MASK 0xffffff | |
65 | ||
66 | #define MDC_LIST_NODE_ADDRESS 0x014 | |
67 | ||
68 | #define MDC_CMDS_PROCESSED 0x018 | |
69 | #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16 | |
70 | #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f | |
71 | #define MDC_CMDS_PROCESSED_INT_ACTIVE BIT(8) | |
72 | #define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT 0 | |
73 | #define MDC_CMDS_PROCESSED_CMDS_DONE_MASK 0x3f | |
74 | ||
75 | #define MDC_CONTROL_AND_STATUS 0x01c | |
76 | #define MDC_CONTROL_AND_STATUS_CANCEL BIT(20) | |
77 | #define MDC_CONTROL_AND_STATUS_LIST_EN BIT(4) | |
78 | #define MDC_CONTROL_AND_STATUS_EN BIT(0) | |
79 | ||
80 | #define MDC_ACTIVE_TRANSFER_SIZE 0x030 | |
81 | ||
82 | #define MDC_GLOBAL_CONFIG_A 0x900 | |
83 | #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT 16 | |
84 | #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK 0xff | |
85 | #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT 8 | |
86 | #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK 0xff | |
87 | #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT 0 | |
88 | #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK 0xff | |
89 | ||
90 | struct mdc_hw_list_desc { | |
91 | u32 gen_conf; | |
92 | u32 readport_conf; | |
93 | u32 read_addr; | |
94 | u32 write_addr; | |
95 | u32 xfer_size; | |
96 | u32 node_addr; | |
97 | u32 cmds_done; | |
98 | u32 ctrl_status; | |
99 | /* | |
100 | * Not part of the list descriptor, but instead used by the CPU to | |
101 | * traverse the list. | |
102 | */ | |
103 | struct mdc_hw_list_desc *next_desc; | |
104 | }; | |
105 | ||
106 | struct mdc_tx_desc { | |
107 | struct mdc_chan *chan; | |
108 | struct virt_dma_desc vd; | |
109 | dma_addr_t list_phys; | |
110 | struct mdc_hw_list_desc *list; | |
111 | bool cyclic; | |
112 | bool cmd_loaded; | |
113 | unsigned int list_len; | |
114 | unsigned int list_period_len; | |
115 | size_t list_xfer_size; | |
116 | unsigned int list_cmds_done; | |
117 | }; | |
118 | ||
119 | struct mdc_chan { | |
120 | struct mdc_dma *mdma; | |
121 | struct virt_dma_chan vc; | |
122 | struct dma_slave_config config; | |
123 | struct mdc_tx_desc *desc; | |
124 | int irq; | |
125 | unsigned int periph; | |
126 | unsigned int thread; | |
127 | unsigned int chan_nr; | |
128 | }; | |
129 | ||
130 | struct mdc_dma_soc_data { | |
131 | void (*enable_chan)(struct mdc_chan *mchan); | |
132 | void (*disable_chan)(struct mdc_chan *mchan); | |
133 | }; | |
134 | ||
135 | struct mdc_dma { | |
136 | struct dma_device dma_dev; | |
137 | void __iomem *regs; | |
138 | struct clk *clk; | |
139 | struct dma_pool *desc_pool; | |
140 | struct regmap *periph_regs; | |
141 | spinlock_t lock; | |
142 | unsigned int nr_threads; | |
143 | unsigned int nr_channels; | |
144 | unsigned int bus_width; | |
145 | unsigned int max_burst_mult; | |
146 | unsigned int max_xfer_size; | |
147 | const struct mdc_dma_soc_data *soc; | |
148 | struct mdc_chan channels[MDC_MAX_DMA_CHANNELS]; | |
149 | }; | |
150 | ||
151 | static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg) | |
152 | { | |
153 | return readl(mdma->regs + reg); | |
154 | } | |
155 | ||
156 | static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg) | |
157 | { | |
158 | writel(val, mdma->regs + reg); | |
159 | } | |
160 | ||
161 | static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg) | |
162 | { | |
163 | return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg); | |
164 | } | |
165 | ||
166 | static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg) | |
167 | { | |
168 | mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg); | |
169 | } | |
170 | ||
171 | static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c) | |
172 | { | |
173 | return container_of(to_virt_chan(c), struct mdc_chan, vc); | |
174 | } | |
175 | ||
176 | static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t) | |
177 | { | |
178 | struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx); | |
179 | ||
180 | return container_of(vdesc, struct mdc_tx_desc, vd); | |
181 | } | |
182 | ||
183 | static inline struct device *mdma2dev(struct mdc_dma *mdma) | |
184 | { | |
185 | return mdma->dma_dev.dev; | |
186 | } | |
187 | ||
188 | static inline unsigned int to_mdc_width(unsigned int bytes) | |
189 | { | |
190 | return ffs(bytes) - 1; | |
191 | } | |
192 | ||
193 | static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc, | |
194 | unsigned int bytes) | |
195 | { | |
196 | ldesc->gen_conf |= to_mdc_width(bytes) << | |
197 | MDC_GENERAL_CONFIG_WIDTH_R_SHIFT; | |
198 | } | |
199 | ||
200 | static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc, | |
201 | unsigned int bytes) | |
202 | { | |
203 | ldesc->gen_conf |= to_mdc_width(bytes) << | |
204 | MDC_GENERAL_CONFIG_WIDTH_W_SHIFT; | |
205 | } | |
206 | ||
207 | static void mdc_list_desc_config(struct mdc_chan *mchan, | |
208 | struct mdc_hw_list_desc *ldesc, | |
209 | enum dma_transfer_direction dir, | |
210 | dma_addr_t src, dma_addr_t dst, size_t len) | |
211 | { | |
212 | struct mdc_dma *mdma = mchan->mdma; | |
213 | unsigned int max_burst, burst_size; | |
214 | ||
215 | ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN | | |
216 | MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W | | |
217 | MDC_GENERAL_CONFIG_PHYSICAL_R; | |
218 | ldesc->readport_conf = | |
219 | (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) | | |
220 | (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) | | |
221 | (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT); | |
222 | ldesc->read_addr = src; | |
223 | ldesc->write_addr = dst; | |
224 | ldesc->xfer_size = len - 1; | |
225 | ldesc->node_addr = 0; | |
226 | ldesc->cmds_done = 0; | |
227 | ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN | | |
228 | MDC_CONTROL_AND_STATUS_EN; | |
229 | ldesc->next_desc = NULL; | |
230 | ||
231 | if (IS_ALIGNED(dst, mdma->bus_width) && | |
232 | IS_ALIGNED(src, mdma->bus_width)) | |
233 | max_burst = mdma->bus_width * mdma->max_burst_mult; | |
234 | else | |
235 | max_burst = mdma->bus_width * (mdma->max_burst_mult - 1); | |
236 | ||
237 | if (dir == DMA_MEM_TO_DEV) { | |
238 | ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R; | |
239 | ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE; | |
240 | mdc_set_read_width(ldesc, mdma->bus_width); | |
241 | mdc_set_write_width(ldesc, mchan->config.dst_addr_width); | |
242 | burst_size = min(max_burst, mchan->config.dst_maxburst * | |
243 | mchan->config.dst_addr_width); | |
244 | } else if (dir == DMA_DEV_TO_MEM) { | |
245 | ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W; | |
246 | ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE; | |
247 | mdc_set_read_width(ldesc, mchan->config.src_addr_width); | |
248 | mdc_set_write_width(ldesc, mdma->bus_width); | |
249 | burst_size = min(max_burst, mchan->config.src_maxburst * | |
250 | mchan->config.src_addr_width); | |
251 | } else { | |
252 | ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R | | |
253 | MDC_GENERAL_CONFIG_INC_W; | |
254 | mdc_set_read_width(ldesc, mdma->bus_width); | |
255 | mdc_set_write_width(ldesc, mdma->bus_width); | |
256 | burst_size = max_burst; | |
257 | } | |
258 | ldesc->readport_conf |= (burst_size - 1) << | |
259 | MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT; | |
260 | } | |
261 | ||
262 | static void mdc_list_desc_free(struct mdc_tx_desc *mdesc) | |
263 | { | |
264 | struct mdc_dma *mdma = mdesc->chan->mdma; | |
265 | struct mdc_hw_list_desc *curr, *next; | |
266 | dma_addr_t curr_phys, next_phys; | |
267 | ||
268 | curr = mdesc->list; | |
269 | curr_phys = mdesc->list_phys; | |
270 | while (curr) { | |
271 | next = curr->next_desc; | |
272 | next_phys = curr->node_addr; | |
273 | dma_pool_free(mdma->desc_pool, curr, curr_phys); | |
274 | curr = next; | |
275 | curr_phys = next_phys; | |
276 | } | |
277 | } | |
278 | ||
279 | static void mdc_desc_free(struct virt_dma_desc *vd) | |
280 | { | |
281 | struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx); | |
282 | ||
283 | mdc_list_desc_free(mdesc); | |
284 | kfree(mdesc); | |
285 | } | |
286 | ||
287 | static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy( | |
288 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, | |
289 | unsigned long flags) | |
290 | { | |
291 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
292 | struct mdc_dma *mdma = mchan->mdma; | |
293 | struct mdc_tx_desc *mdesc; | |
294 | struct mdc_hw_list_desc *curr, *prev = NULL; | |
295 | dma_addr_t curr_phys, prev_phys; | |
296 | ||
297 | if (!len) | |
298 | return NULL; | |
299 | ||
300 | mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); | |
301 | if (!mdesc) | |
302 | return NULL; | |
303 | mdesc->chan = mchan; | |
304 | mdesc->list_xfer_size = len; | |
305 | ||
306 | while (len > 0) { | |
307 | size_t xfer_size; | |
308 | ||
309 | curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys); | |
310 | if (!curr) | |
311 | goto free_desc; | |
312 | ||
313 | if (prev) { | |
314 | prev->node_addr = curr_phys; | |
315 | prev->next_desc = curr; | |
316 | } else { | |
317 | mdesc->list_phys = curr_phys; | |
318 | mdesc->list = curr; | |
319 | } | |
320 | ||
321 | xfer_size = min_t(size_t, mdma->max_xfer_size, len); | |
322 | ||
323 | mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest, | |
324 | xfer_size); | |
325 | ||
326 | prev = curr; | |
327 | prev_phys = curr_phys; | |
328 | ||
329 | mdesc->list_len++; | |
330 | src += xfer_size; | |
331 | dest += xfer_size; | |
332 | len -= xfer_size; | |
333 | } | |
334 | ||
335 | return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); | |
336 | ||
337 | free_desc: | |
338 | mdc_desc_free(&mdesc->vd); | |
339 | ||
340 | return NULL; | |
341 | } | |
342 | ||
343 | static int mdc_check_slave_width(struct mdc_chan *mchan, | |
344 | enum dma_transfer_direction dir) | |
345 | { | |
346 | enum dma_slave_buswidth width; | |
347 | ||
348 | if (dir == DMA_MEM_TO_DEV) | |
349 | width = mchan->config.dst_addr_width; | |
350 | else | |
351 | width = mchan->config.src_addr_width; | |
352 | ||
353 | switch (width) { | |
354 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
355 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
356 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
357 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | |
358 | break; | |
359 | default: | |
360 | return -EINVAL; | |
361 | } | |
362 | ||
363 | if (width > mchan->mdma->bus_width) | |
364 | return -EINVAL; | |
365 | ||
366 | return 0; | |
367 | } | |
368 | ||
369 | static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic( | |
370 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |
371 | size_t period_len, enum dma_transfer_direction dir, | |
372 | unsigned long flags) | |
373 | { | |
374 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
375 | struct mdc_dma *mdma = mchan->mdma; | |
376 | struct mdc_tx_desc *mdesc; | |
377 | struct mdc_hw_list_desc *curr, *prev = NULL; | |
378 | dma_addr_t curr_phys, prev_phys; | |
379 | ||
380 | if (!buf_len && !period_len) | |
381 | return NULL; | |
382 | ||
383 | if (!is_slave_direction(dir)) | |
384 | return NULL; | |
385 | ||
386 | if (mdc_check_slave_width(mchan, dir) < 0) | |
387 | return NULL; | |
388 | ||
389 | mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); | |
390 | if (!mdesc) | |
391 | return NULL; | |
392 | mdesc->chan = mchan; | |
393 | mdesc->cyclic = true; | |
394 | mdesc->list_xfer_size = buf_len; | |
395 | mdesc->list_period_len = DIV_ROUND_UP(period_len, | |
396 | mdma->max_xfer_size); | |
397 | ||
398 | while (buf_len > 0) { | |
399 | size_t remainder = min(period_len, buf_len); | |
400 | ||
401 | while (remainder > 0) { | |
402 | size_t xfer_size; | |
403 | ||
404 | curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, | |
405 | &curr_phys); | |
406 | if (!curr) | |
407 | goto free_desc; | |
408 | ||
409 | if (!prev) { | |
410 | mdesc->list_phys = curr_phys; | |
411 | mdesc->list = curr; | |
412 | } else { | |
413 | prev->node_addr = curr_phys; | |
414 | prev->next_desc = curr; | |
415 | } | |
416 | ||
417 | xfer_size = min_t(size_t, mdma->max_xfer_size, | |
418 | remainder); | |
419 | ||
420 | if (dir == DMA_MEM_TO_DEV) { | |
421 | mdc_list_desc_config(mchan, curr, dir, | |
422 | buf_addr, | |
423 | mchan->config.dst_addr, | |
424 | xfer_size); | |
425 | } else { | |
426 | mdc_list_desc_config(mchan, curr, dir, | |
427 | mchan->config.src_addr, | |
428 | buf_addr, | |
429 | xfer_size); | |
430 | } | |
431 | ||
432 | prev = curr; | |
433 | prev_phys = curr_phys; | |
434 | ||
435 | mdesc->list_len++; | |
436 | buf_addr += xfer_size; | |
437 | buf_len -= xfer_size; | |
438 | remainder -= xfer_size; | |
439 | } | |
440 | } | |
441 | prev->node_addr = mdesc->list_phys; | |
442 | ||
443 | return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); | |
444 | ||
445 | free_desc: | |
446 | mdc_desc_free(&mdesc->vd); | |
447 | ||
448 | return NULL; | |
449 | } | |
450 | ||
451 | static struct dma_async_tx_descriptor *mdc_prep_slave_sg( | |
452 | struct dma_chan *chan, struct scatterlist *sgl, | |
453 | unsigned int sg_len, enum dma_transfer_direction dir, | |
454 | unsigned long flags, void *context) | |
455 | { | |
456 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
457 | struct mdc_dma *mdma = mchan->mdma; | |
458 | struct mdc_tx_desc *mdesc; | |
459 | struct scatterlist *sg; | |
460 | struct mdc_hw_list_desc *curr, *prev = NULL; | |
461 | dma_addr_t curr_phys, prev_phys; | |
462 | unsigned int i; | |
463 | ||
464 | if (!sgl) | |
465 | return NULL; | |
466 | ||
467 | if (!is_slave_direction(dir)) | |
468 | return NULL; | |
469 | ||
470 | if (mdc_check_slave_width(mchan, dir) < 0) | |
471 | return NULL; | |
472 | ||
473 | mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); | |
474 | if (!mdesc) | |
475 | return NULL; | |
476 | mdesc->chan = mchan; | |
477 | ||
478 | for_each_sg(sgl, sg, sg_len, i) { | |
479 | dma_addr_t buf = sg_dma_address(sg); | |
480 | size_t buf_len = sg_dma_len(sg); | |
481 | ||
482 | while (buf_len > 0) { | |
483 | size_t xfer_size; | |
484 | ||
485 | curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, | |
486 | &curr_phys); | |
487 | if (!curr) | |
488 | goto free_desc; | |
489 | ||
490 | if (!prev) { | |
491 | mdesc->list_phys = curr_phys; | |
492 | mdesc->list = curr; | |
493 | } else { | |
494 | prev->node_addr = curr_phys; | |
495 | prev->next_desc = curr; | |
496 | } | |
497 | ||
498 | xfer_size = min_t(size_t, mdma->max_xfer_size, | |
499 | buf_len); | |
500 | ||
501 | if (dir == DMA_MEM_TO_DEV) { | |
502 | mdc_list_desc_config(mchan, curr, dir, buf, | |
503 | mchan->config.dst_addr, | |
504 | xfer_size); | |
505 | } else { | |
506 | mdc_list_desc_config(mchan, curr, dir, | |
507 | mchan->config.src_addr, | |
508 | buf, xfer_size); | |
509 | } | |
510 | ||
511 | prev = curr; | |
512 | prev_phys = curr_phys; | |
513 | ||
514 | mdesc->list_len++; | |
515 | mdesc->list_xfer_size += xfer_size; | |
516 | buf += xfer_size; | |
517 | buf_len -= xfer_size; | |
518 | } | |
519 | } | |
520 | ||
521 | return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); | |
522 | ||
523 | free_desc: | |
524 | mdc_desc_free(&mdesc->vd); | |
525 | ||
526 | return NULL; | |
527 | } | |
528 | ||
529 | static void mdc_issue_desc(struct mdc_chan *mchan) | |
530 | { | |
531 | struct mdc_dma *mdma = mchan->mdma; | |
532 | struct virt_dma_desc *vd; | |
533 | struct mdc_tx_desc *mdesc; | |
534 | u32 val; | |
535 | ||
536 | vd = vchan_next_desc(&mchan->vc); | |
537 | if (!vd) | |
538 | return; | |
539 | ||
540 | list_del(&vd->node); | |
541 | ||
542 | mdesc = to_mdc_desc(&vd->tx); | |
543 | mchan->desc = mdesc; | |
544 | ||
545 | dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n", | |
546 | mchan->chan_nr); | |
547 | ||
548 | mdma->soc->enable_chan(mchan); | |
549 | ||
550 | val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG); | |
551 | val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN | | |
552 | MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W | | |
553 | MDC_GENERAL_CONFIG_PHYSICAL_R; | |
554 | mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG); | |
555 | val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) | | |
556 | (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) | | |
557 | (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT); | |
558 | mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG); | |
559 | mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS); | |
560 | val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS); | |
561 | val |= MDC_CONTROL_AND_STATUS_LIST_EN; | |
562 | mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS); | |
563 | } | |
564 | ||
565 | static void mdc_issue_pending(struct dma_chan *chan) | |
566 | { | |
567 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
568 | unsigned long flags; | |
569 | ||
570 | spin_lock_irqsave(&mchan->vc.lock, flags); | |
571 | if (vchan_issue_pending(&mchan->vc) && !mchan->desc) | |
572 | mdc_issue_desc(mchan); | |
573 | spin_unlock_irqrestore(&mchan->vc.lock, flags); | |
574 | } | |
575 | ||
576 | static enum dma_status mdc_tx_status(struct dma_chan *chan, | |
577 | dma_cookie_t cookie, struct dma_tx_state *txstate) | |
578 | { | |
579 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
580 | struct mdc_tx_desc *mdesc; | |
581 | struct virt_dma_desc *vd; | |
582 | unsigned long flags; | |
583 | size_t bytes = 0; | |
584 | int ret; | |
585 | ||
586 | ret = dma_cookie_status(chan, cookie, txstate); | |
587 | if (ret == DMA_COMPLETE) | |
588 | return ret; | |
589 | ||
590 | if (!txstate) | |
591 | return ret; | |
592 | ||
593 | spin_lock_irqsave(&mchan->vc.lock, flags); | |
594 | vd = vchan_find_desc(&mchan->vc, cookie); | |
595 | if (vd) { | |
596 | mdesc = to_mdc_desc(&vd->tx); | |
597 | bytes = mdesc->list_xfer_size; | |
598 | } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) { | |
599 | struct mdc_hw_list_desc *ldesc; | |
600 | u32 val1, val2, done, processed, residue; | |
601 | int i, cmds; | |
602 | ||
603 | mdesc = mchan->desc; | |
604 | ||
605 | /* | |
606 | * Determine the number of commands that haven't been | |
607 | * processed (handled by the IRQ handler) yet. | |
608 | */ | |
609 | do { | |
610 | val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) & | |
611 | ~MDC_CMDS_PROCESSED_INT_ACTIVE; | |
612 | residue = mdc_chan_readl(mchan, | |
613 | MDC_ACTIVE_TRANSFER_SIZE); | |
614 | val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) & | |
615 | ~MDC_CMDS_PROCESSED_INT_ACTIVE; | |
616 | } while (val1 != val2); | |
617 | ||
618 | done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | |
619 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | |
620 | processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) & | |
621 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK; | |
622 | cmds = (done - processed) % | |
623 | (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1); | |
624 | ||
625 | /* | |
626 | * If the command loaded event hasn't been processed yet, then | |
627 | * the difference above includes an extra command. | |
628 | */ | |
629 | if (!mdesc->cmd_loaded) | |
630 | cmds--; | |
631 | else | |
632 | cmds += mdesc->list_cmds_done; | |
633 | ||
634 | bytes = mdesc->list_xfer_size; | |
635 | ldesc = mdesc->list; | |
636 | for (i = 0; i < cmds; i++) { | |
637 | bytes -= ldesc->xfer_size + 1; | |
638 | ldesc = ldesc->next_desc; | |
639 | } | |
640 | if (ldesc) { | |
641 | if (residue != MDC_TRANSFER_SIZE_MASK) | |
642 | bytes -= ldesc->xfer_size - residue; | |
643 | else | |
644 | bytes -= ldesc->xfer_size + 1; | |
645 | } | |
646 | } | |
647 | spin_unlock_irqrestore(&mchan->vc.lock, flags); | |
648 | ||
649 | dma_set_residue(txstate, bytes); | |
650 | ||
651 | return ret; | |
652 | } | |
653 | ||
654 | static int mdc_terminate_all(struct dma_chan *chan) | |
655 | { | |
656 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
657 | struct mdc_tx_desc *mdesc; | |
658 | unsigned long flags; | |
659 | LIST_HEAD(head); | |
660 | ||
661 | spin_lock_irqsave(&mchan->vc.lock, flags); | |
662 | ||
663 | mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL, | |
664 | MDC_CONTROL_AND_STATUS); | |
665 | ||
666 | mdesc = mchan->desc; | |
667 | mchan->desc = NULL; | |
668 | vchan_get_all_descriptors(&mchan->vc, &head); | |
669 | ||
670 | spin_unlock_irqrestore(&mchan->vc.lock, flags); | |
671 | ||
672 | if (mdesc) | |
673 | mdc_desc_free(&mdesc->vd); | |
674 | vchan_dma_desc_free_list(&mchan->vc, &head); | |
675 | ||
676 | return 0; | |
677 | } | |
678 | ||
679 | static int mdc_slave_config(struct dma_chan *chan, | |
680 | struct dma_slave_config *config) | |
681 | { | |
682 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
683 | unsigned long flags; | |
684 | ||
685 | spin_lock_irqsave(&mchan->vc.lock, flags); | |
686 | mchan->config = *config; | |
687 | spin_unlock_irqrestore(&mchan->vc.lock, flags); | |
688 | ||
689 | return 0; | |
690 | } | |
691 | ||
5689ba7f AB |
692 | static void mdc_free_chan_resources(struct dma_chan *chan) |
693 | { | |
694 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
695 | struct mdc_dma *mdma = mchan->mdma; | |
696 | ||
697 | mdc_terminate_all(chan); | |
698 | ||
699 | mdma->soc->disable_chan(mchan); | |
700 | } | |
701 | ||
702 | static irqreturn_t mdc_chan_irq(int irq, void *dev_id) | |
703 | { | |
704 | struct mdc_chan *mchan = (struct mdc_chan *)dev_id; | |
705 | struct mdc_tx_desc *mdesc; | |
706 | u32 val, processed, done1, done2; | |
707 | unsigned int i; | |
708 | ||
709 | spin_lock(&mchan->vc.lock); | |
710 | ||
711 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | |
712 | processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) & | |
713 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK; | |
714 | /* | |
715 | * CMDS_DONE may have incremented between reading CMDS_PROCESSED | |
716 | * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we | |
717 | * didn't miss a command completion. | |
718 | */ | |
719 | do { | |
720 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | |
721 | done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | |
722 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | |
723 | val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK << | |
724 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) | | |
725 | MDC_CMDS_PROCESSED_INT_ACTIVE); | |
726 | val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT; | |
727 | mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED); | |
728 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | |
729 | done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | |
730 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | |
731 | } while (done1 != done2); | |
732 | ||
733 | dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr); | |
734 | ||
735 | mdesc = mchan->desc; | |
736 | if (!mdesc) { | |
737 | dev_warn(mdma2dev(mchan->mdma), | |
738 | "IRQ with no active descriptor on channel %d\n", | |
739 | mchan->chan_nr); | |
740 | goto out; | |
741 | } | |
742 | ||
743 | for (i = processed; i != done1; | |
744 | i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) { | |
745 | /* | |
746 | * The first interrupt in a transfer indicates that the | |
747 | * command list has been loaded, not that a command has | |
748 | * been completed. | |
749 | */ | |
750 | if (!mdesc->cmd_loaded) { | |
751 | mdesc->cmd_loaded = true; | |
752 | continue; | |
753 | } | |
754 | ||
755 | mdesc->list_cmds_done++; | |
756 | if (mdesc->cyclic) { | |
757 | mdesc->list_cmds_done %= mdesc->list_len; | |
758 | if (mdesc->list_cmds_done % mdesc->list_period_len == 0) | |
759 | vchan_cyclic_callback(&mdesc->vd); | |
760 | } else if (mdesc->list_cmds_done == mdesc->list_len) { | |
761 | mchan->desc = NULL; | |
762 | vchan_cookie_complete(&mdesc->vd); | |
763 | mdc_issue_desc(mchan); | |
764 | break; | |
765 | } | |
766 | } | |
767 | out: | |
768 | spin_unlock(&mchan->vc.lock); | |
769 | ||
770 | return IRQ_HANDLED; | |
771 | } | |
772 | ||
773 | static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec, | |
774 | struct of_dma *ofdma) | |
775 | { | |
776 | struct mdc_dma *mdma = ofdma->of_dma_data; | |
777 | struct dma_chan *chan; | |
778 | ||
779 | if (dma_spec->args_count != 3) | |
780 | return NULL; | |
781 | ||
782 | list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) { | |
783 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
784 | ||
785 | if (!(dma_spec->args[1] & BIT(mchan->chan_nr))) | |
786 | continue; | |
787 | if (dma_get_slave_channel(chan)) { | |
788 | mchan->periph = dma_spec->args[0]; | |
789 | mchan->thread = dma_spec->args[2]; | |
790 | return chan; | |
791 | } | |
792 | } | |
793 | ||
794 | return NULL; | |
795 | } | |
796 | ||
797 | #define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch) (0x120 + 0x4 * ((ch) / 4)) | |
798 | #define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4)) | |
799 | #define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK 0x3f | |
800 | ||
801 | static void pistachio_mdc_enable_chan(struct mdc_chan *mchan) | |
802 | { | |
803 | struct mdc_dma *mdma = mchan->mdma; | |
804 | ||
805 | regmap_update_bits(mdma->periph_regs, | |
806 | PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr), | |
807 | PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK << | |
808 | PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr), | |
809 | mchan->periph << | |
810 | PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr)); | |
811 | } | |
812 | ||
813 | static void pistachio_mdc_disable_chan(struct mdc_chan *mchan) | |
814 | { | |
815 | struct mdc_dma *mdma = mchan->mdma; | |
816 | ||
817 | regmap_update_bits(mdma->periph_regs, | |
818 | PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr), | |
819 | PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK << | |
820 | PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr), | |
821 | 0); | |
822 | } | |
823 | ||
824 | static const struct mdc_dma_soc_data pistachio_mdc_data = { | |
825 | .enable_chan = pistachio_mdc_enable_chan, | |
826 | .disable_chan = pistachio_mdc_disable_chan, | |
827 | }; | |
828 | ||
829 | static const struct of_device_id mdc_dma_of_match[] = { | |
830 | { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, }, | |
831 | { }, | |
832 | }; | |
833 | MODULE_DEVICE_TABLE(of, mdc_dma_of_match); | |
834 | ||
835 | static int mdc_dma_probe(struct platform_device *pdev) | |
836 | { | |
837 | struct mdc_dma *mdma; | |
838 | struct resource *res; | |
839 | const struct of_device_id *match; | |
840 | unsigned int i; | |
841 | u32 val; | |
842 | int ret; | |
843 | ||
844 | mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL); | |
845 | if (!mdma) | |
846 | return -ENOMEM; | |
847 | platform_set_drvdata(pdev, mdma); | |
848 | ||
849 | match = of_match_device(mdc_dma_of_match, &pdev->dev); | |
850 | mdma->soc = match->data; | |
851 | ||
852 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
853 | mdma->regs = devm_ioremap_resource(&pdev->dev, res); | |
854 | if (IS_ERR(mdma->regs)) | |
855 | return PTR_ERR(mdma->regs); | |
856 | ||
857 | mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | |
858 | "img,cr-periph"); | |
859 | if (IS_ERR(mdma->periph_regs)) | |
860 | return PTR_ERR(mdma->periph_regs); | |
861 | ||
862 | mdma->clk = devm_clk_get(&pdev->dev, "sys"); | |
863 | if (IS_ERR(mdma->clk)) | |
864 | return PTR_ERR(mdma->clk); | |
865 | ||
866 | ret = clk_prepare_enable(mdma->clk); | |
867 | if (ret) | |
868 | return ret; | |
869 | ||
870 | dma_cap_zero(mdma->dma_dev.cap_mask); | |
871 | dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask); | |
872 | dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask); | |
873 | dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask); | |
874 | dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask); | |
875 | ||
876 | val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A); | |
877 | mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) & | |
878 | MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK; | |
879 | mdma->nr_threads = | |
880 | 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) & | |
881 | MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK); | |
882 | mdma->bus_width = | |
883 | (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) & | |
884 | MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8; | |
885 | /* | |
886 | * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes | |
887 | * are supported, this makes it possible for the value reported in | |
888 | * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size | |
889 | * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or | |
890 | * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining. To eliminate this | |
891 | * ambiguity, restrict transfer sizes to one bus-width less than the | |
892 | * actual maximum. | |
893 | */ | |
894 | mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width; | |
895 | ||
896 | of_property_read_u32(pdev->dev.of_node, "dma-channels", | |
897 | &mdma->nr_channels); | |
898 | ret = of_property_read_u32(pdev->dev.of_node, | |
899 | "img,max-burst-multiplier", | |
900 | &mdma->max_burst_mult); | |
901 | if (ret) | |
902 | goto disable_clk; | |
903 | ||
904 | mdma->dma_dev.dev = &pdev->dev; | |
905 | mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg; | |
906 | mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic; | |
907 | mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy; | |
5689ba7f AB |
908 | mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources; |
909 | mdma->dma_dev.device_tx_status = mdc_tx_status; | |
910 | mdma->dma_dev.device_issue_pending = mdc_issue_pending; | |
911 | mdma->dma_dev.device_terminate_all = mdc_terminate_all; | |
912 | mdma->dma_dev.device_config = mdc_slave_config; | |
913 | ||
914 | mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
915 | mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
916 | for (i = 1; i <= mdma->bus_width; i <<= 1) { | |
917 | mdma->dma_dev.src_addr_widths |= BIT(i); | |
918 | mdma->dma_dev.dst_addr_widths |= BIT(i); | |
919 | } | |
920 | ||
921 | INIT_LIST_HEAD(&mdma->dma_dev.channels); | |
922 | for (i = 0; i < mdma->nr_channels; i++) { | |
923 | struct mdc_chan *mchan = &mdma->channels[i]; | |
924 | ||
925 | mchan->mdma = mdma; | |
926 | mchan->chan_nr = i; | |
927 | mchan->irq = platform_get_irq(pdev, i); | |
928 | if (mchan->irq < 0) { | |
929 | ret = mchan->irq; | |
930 | goto disable_clk; | |
931 | } | |
932 | ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq, | |
933 | IRQ_TYPE_LEVEL_HIGH, | |
934 | dev_name(&pdev->dev), mchan); | |
935 | if (ret < 0) | |
936 | goto disable_clk; | |
937 | ||
938 | mchan->vc.desc_free = mdc_desc_free; | |
939 | vchan_init(&mchan->vc, &mdma->dma_dev); | |
940 | } | |
941 | ||
942 | mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, | |
943 | sizeof(struct mdc_hw_list_desc), | |
944 | 4, 0); | |
945 | if (!mdma->desc_pool) { | |
946 | ret = -ENOMEM; | |
947 | goto disable_clk; | |
948 | } | |
949 | ||
950 | ret = dma_async_device_register(&mdma->dma_dev); | |
951 | if (ret) | |
952 | goto disable_clk; | |
953 | ||
954 | ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma); | |
955 | if (ret) | |
956 | goto unregister; | |
957 | ||
958 | dev_info(&pdev->dev, "MDC with %u channels and %u threads\n", | |
959 | mdma->nr_channels, mdma->nr_threads); | |
960 | ||
961 | return 0; | |
962 | ||
963 | unregister: | |
964 | dma_async_device_unregister(&mdma->dma_dev); | |
965 | disable_clk: | |
966 | clk_disable_unprepare(mdma->clk); | |
967 | return ret; | |
968 | } | |
969 | ||
970 | static int mdc_dma_remove(struct platform_device *pdev) | |
971 | { | |
972 | struct mdc_dma *mdma = platform_get_drvdata(pdev); | |
973 | struct mdc_chan *mchan, *next; | |
974 | ||
975 | of_dma_controller_free(pdev->dev.of_node); | |
976 | dma_async_device_unregister(&mdma->dma_dev); | |
977 | ||
978 | list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels, | |
979 | vc.chan.device_node) { | |
980 | list_del(&mchan->vc.chan.device_node); | |
981 | ||
982 | synchronize_irq(mchan->irq); | |
983 | devm_free_irq(&pdev->dev, mchan->irq, mchan); | |
984 | ||
985 | tasklet_kill(&mchan->vc.task); | |
986 | } | |
987 | ||
988 | clk_disable_unprepare(mdma->clk); | |
989 | ||
990 | return 0; | |
991 | } | |
992 | ||
993 | static struct platform_driver mdc_dma_driver = { | |
994 | .driver = { | |
995 | .name = "img-mdc-dma", | |
996 | .of_match_table = of_match_ptr(mdc_dma_of_match), | |
997 | }, | |
998 | .probe = mdc_dma_probe, | |
999 | .remove = mdc_dma_remove, | |
1000 | }; | |
1001 | module_platform_driver(mdc_dma_driver); | |
1002 | ||
1003 | MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver"); | |
1004 | MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>"); | |
1005 | MODULE_LICENSE("GPL v2"); |