Commit | Line | Data |
---|---|---|
ca21a146 RY |
1 | /* |
2 | * DMA controller driver for CSR SiRFprimaII | |
3 | * | |
4 | * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. | |
5 | * | |
6 | * Licensed under GPLv2 or later. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/dmaengine.h> | |
11 | #include <linux/dma-mapping.h> | |
2a76689b | 12 | #include <linux/pm_runtime.h> |
ca21a146 RY |
13 | #include <linux/interrupt.h> |
14 | #include <linux/io.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/of_irq.h> | |
17 | #include <linux/of_address.h> | |
18 | #include <linux/of_device.h> | |
19 | #include <linux/of_platform.h> | |
a7e34065 | 20 | #include <linux/clk.h> |
2e041c94 | 21 | #include <linux/of_dma.h> |
ca21a146 RY |
22 | #include <linux/sirfsoc_dma.h> |
23 | ||
949ff5b8 VK |
24 | #include "dmaengine.h" |
25 | ||
0a45dcab HL |
26 | #define SIRFSOC_DMA_VER_A7V1 1 |
27 | #define SIRFSOC_DMA_VER_A7V2 2 | |
28 | #define SIRFSOC_DMA_VER_A6 4 | |
29 | ||
ca21a146 RY |
30 | #define SIRFSOC_DMA_DESCRIPTORS 16 |
31 | #define SIRFSOC_DMA_CHANNELS 16 | |
0a45dcab | 32 | #define SIRFSOC_DMA_TABLE_NUM 256 |
ca21a146 RY |
33 | |
34 | #define SIRFSOC_DMA_CH_ADDR 0x00 | |
35 | #define SIRFSOC_DMA_CH_XLEN 0x04 | |
36 | #define SIRFSOC_DMA_CH_YLEN 0x08 | |
37 | #define SIRFSOC_DMA_CH_CTRL 0x0C | |
38 | ||
39 | #define SIRFSOC_DMA_WIDTH_0 0x100 | |
40 | #define SIRFSOC_DMA_CH_VALID 0x140 | |
41 | #define SIRFSOC_DMA_CH_INT 0x144 | |
42 | #define SIRFSOC_DMA_INT_EN 0x148 | |
0a45dcab | 43 | #define SIRFSOC_DMA_INT_EN_CLR 0x14C |
ca21a146 | 44 | #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 |
0a45dcab HL |
45 | #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x154 |
46 | #define SIRFSOC_DMA_WIDTH_ATLAS7 0x10 | |
47 | #define SIRFSOC_DMA_VALID_ATLAS7 0x14 | |
48 | #define SIRFSOC_DMA_INT_ATLAS7 0x18 | |
49 | #define SIRFSOC_DMA_INT_EN_ATLAS7 0x1c | |
50 | #define SIRFSOC_DMA_LOOP_CTRL_ATLAS7 0x20 | |
51 | #define SIRFSOC_DMA_CUR_DATA_ADDR 0x34 | |
52 | #define SIRFSOC_DMA_MUL_ATLAS7 0x38 | |
53 | #define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7 0x158 | |
54 | #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7 0x15C | |
55 | #define SIRFSOC_DMA_IOBG_SCMD_EN 0x800 | |
56 | #define SIRFSOC_DMA_EARLY_RESP_SET 0x818 | |
57 | #define SIRFSOC_DMA_EARLY_RESP_CLR 0x81C | |
ca21a146 RY |
58 | |
59 | #define SIRFSOC_DMA_MODE_CTRL_BIT 4 | |
60 | #define SIRFSOC_DMA_DIR_CTRL_BIT 5 | |
0a45dcab HL |
61 | #define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7 2 |
62 | #define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7 3 | |
63 | #define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7 4 | |
64 | #define SIRFSOC_DMA_TAB_NUM_ATLAS7 7 | |
65 | #define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7 5 | |
66 | #define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7 25 | |
67 | #define SIRFSOC_DMA_CHAIN_ADDR_SHIFT 32 | |
68 | ||
69 | #define SIRFSOC_DMA_INT_FINI_INT_ATLAS7 BIT(0) | |
70 | #define SIRFSOC_DMA_INT_CNT_INT_ATLAS7 BIT(1) | |
71 | #define SIRFSOC_DMA_INT_PAU_INT_ATLAS7 BIT(2) | |
72 | #define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7 BIT(3) | |
73 | #define SIRFSOC_DMA_INT_INV_INT_ATLAS7 BIT(4) | |
74 | #define SIRFSOC_DMA_INT_END_INT_ATLAS7 BIT(5) | |
75 | #define SIRFSOC_DMA_INT_ALL_ATLAS7 0x3F | |
ca21a146 RY |
76 | |
77 | /* xlen and dma_width register is in 4 bytes boundary */ | |
78 | #define SIRFSOC_DMA_WORD_LEN 4 | |
0a45dcab HL |
79 | #define SIRFSOC_DMA_XLEN_MAX_V1 0x800 |
80 | #define SIRFSOC_DMA_XLEN_MAX_V2 0x1000 | |
ca21a146 RY |
81 | |
82 | struct sirfsoc_dma_desc { | |
83 | struct dma_async_tx_descriptor desc; | |
84 | struct list_head node; | |
85 | ||
86 | /* SiRFprimaII 2D-DMA parameters */ | |
87 | ||
88 | int xlen; /* DMA xlen */ | |
89 | int ylen; /* DMA ylen */ | |
90 | int width; /* DMA width */ | |
91 | int dir; | |
92 | bool cyclic; /* is loop DMA? */ | |
0a45dcab | 93 | bool chain; /* is chain DMA? */ |
ca21a146 | 94 | u32 addr; /* DMA buffer address */ |
0a45dcab | 95 | u64 chain_table[SIRFSOC_DMA_TABLE_NUM]; /* chain tbl */ |
ca21a146 RY |
96 | }; |
97 | ||
98 | struct sirfsoc_dma_chan { | |
99 | struct dma_chan chan; | |
100 | struct list_head free; | |
101 | struct list_head prepared; | |
102 | struct list_head queued; | |
103 | struct list_head active; | |
104 | struct list_head completed; | |
ca21a146 RY |
105 | unsigned long happened_cyclic; |
106 | unsigned long completed_cyclic; | |
107 | ||
108 | /* Lock for this structure */ | |
109 | spinlock_t lock; | |
110 | ||
111 | int mode; | |
112 | }; | |
113 | ||
2a76689b BS |
114 | struct sirfsoc_dma_regs { |
115 | u32 ctrl[SIRFSOC_DMA_CHANNELS]; | |
116 | u32 interrupt_en; | |
117 | }; | |
118 | ||
ca21a146 RY |
119 | struct sirfsoc_dma { |
120 | struct dma_device dma; | |
121 | struct tasklet_struct tasklet; | |
122 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; | |
123 | void __iomem *base; | |
124 | int irq; | |
a7e34065 | 125 | struct clk *clk; |
0a45dcab HL |
126 | int type; |
127 | void (*exec_desc)(struct sirfsoc_dma_desc *sdesc, | |
128 | int cid, int burst_mode, void __iomem *base); | |
2a76689b | 129 | struct sirfsoc_dma_regs regs_save; |
ca21a146 RY |
130 | }; |
131 | ||
0a45dcab HL |
132 | struct sirfsoc_dmadata { |
133 | void (*exec)(struct sirfsoc_dma_desc *sdesc, | |
134 | int cid, int burst_mode, void __iomem *base); | |
135 | int type; | |
136 | }; | |
137 | ||
138 | enum sirfsoc_dma_chain_flag { | |
139 | SIRFSOC_DMA_CHAIN_NORMAL = 0x01, | |
140 | SIRFSOC_DMA_CHAIN_PAUSE = 0x02, | |
141 | SIRFSOC_DMA_CHAIN_LOOP = 0x03, | |
142 | SIRFSOC_DMA_CHAIN_END = 0x04 | |
143 | }; | |
144 | ||
ca21a146 RY |
145 | #define DRV_NAME "sirfsoc_dma" |
146 | ||
2a76689b BS |
147 | static int sirfsoc_dma_runtime_suspend(struct device *dev); |
148 | ||
ca21a146 RY |
149 | /* Convert struct dma_chan to struct sirfsoc_dma_chan */ |
150 | static inline | |
151 | struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) | |
152 | { | |
153 | return container_of(c, struct sirfsoc_dma_chan, chan); | |
154 | } | |
155 | ||
156 | /* Convert struct dma_chan to struct sirfsoc_dma */ | |
157 | static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) | |
158 | { | |
159 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); | |
160 | return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); | |
161 | } | |
162 | ||
0a45dcab HL |
163 | static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc *sdesc, |
164 | int cid, int burst_mode, void __iomem *base) | |
165 | { | |
166 | if (sdesc->chain) { | |
167 | /* DMA v2 HW chain mode */ | |
168 | writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) | | |
169 | (sdesc->chain << | |
170 | SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) | | |
171 | (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7) | 0x3, | |
172 | base + SIRFSOC_DMA_CH_CTRL); | |
173 | } else { | |
174 | /* DMA v2 legacy mode */ | |
175 | writel_relaxed(sdesc->xlen, base + SIRFSOC_DMA_CH_XLEN); | |
176 | writel_relaxed(sdesc->ylen, base + SIRFSOC_DMA_CH_YLEN); | |
177 | writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_ATLAS7); | |
178 | writel_relaxed((sdesc->width*((sdesc->ylen+1)>>1)), | |
179 | base + SIRFSOC_DMA_MUL_ATLAS7); | |
180 | writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) | | |
181 | (sdesc->chain << | |
182 | SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) | | |
183 | 0x3, base + SIRFSOC_DMA_CH_CTRL); | |
184 | } | |
185 | writel_relaxed(sdesc->chain ? SIRFSOC_DMA_INT_END_INT_ATLAS7 : | |
186 | (SIRFSOC_DMA_INT_FINI_INT_ATLAS7 | | |
187 | SIRFSOC_DMA_INT_LOOP_INT_ATLAS7), | |
188 | base + SIRFSOC_DMA_INT_EN_ATLAS7); | |
189 | writel(sdesc->addr, base + SIRFSOC_DMA_CH_ADDR); | |
190 | if (sdesc->cyclic) | |
191 | writel(0x10001, base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); | |
192 | } | |
193 | ||
194 | static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc *sdesc, | |
195 | int cid, int burst_mode, void __iomem *base) | |
196 | { | |
197 | writel_relaxed(1, base + SIRFSOC_DMA_IOBG_SCMD_EN); | |
198 | writel_relaxed((1 << cid), base + SIRFSOC_DMA_EARLY_RESP_SET); | |
199 | writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4); | |
200 | writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) | | |
201 | (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), | |
202 | base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); | |
203 | writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN); | |
204 | writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN); | |
205 | writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) | | |
206 | (1 << cid), base + SIRFSOC_DMA_INT_EN); | |
207 | writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); | |
208 | if (sdesc->cyclic) { | |
209 | writel((1 << cid) | 1 << (cid + 16) | | |
210 | readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7), | |
211 | base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7); | |
212 | } | |
213 | ||
214 | } | |
215 | ||
216 | static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc *sdesc, | |
217 | int cid, int burst_mode, void __iomem *base) | |
218 | { | |
219 | writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4); | |
220 | writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) | | |
221 | (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), | |
222 | base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); | |
223 | writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN); | |
224 | writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN); | |
225 | writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) | | |
226 | (1 << cid), base + SIRFSOC_DMA_INT_EN); | |
227 | writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); | |
228 | if (sdesc->cyclic) { | |
229 | writel((1 << cid) | 1 << (cid + 16) | | |
230 | readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL), | |
231 | base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
232 | } | |
233 | ||
234 | } | |
235 | ||
ca21a146 RY |
236 | /* Execute all queued DMA descriptors */ |
237 | static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) | |
238 | { | |
239 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
240 | int cid = schan->chan.chan_id; | |
241 | struct sirfsoc_dma_desc *sdesc = NULL; | |
0a45dcab | 242 | void __iomem *base; |
ca21a146 RY |
243 | |
244 | /* | |
245 | * lock has been held by functions calling this, so we don't hold | |
246 | * lock again | |
247 | */ | |
0a45dcab | 248 | base = sdma->base; |
ca21a146 | 249 | sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, |
0a45dcab | 250 | node); |
ca21a146 | 251 | /* Move the first queued descriptor to active list */ |
26fd1220 | 252 | list_move_tail(&sdesc->node, &schan->active); |
ca21a146 | 253 | |
0a45dcab HL |
254 | if (sdma->type == SIRFSOC_DMA_VER_A7V2) |
255 | cid = 0; | |
ca21a146 | 256 | |
0a45dcab HL |
257 | /* Start the DMA transfer */ |
258 | sdma->exec_desc(sdesc, cid, schan->mode, base); | |
ca21a146 | 259 | |
0a45dcab | 260 | if (sdesc->cyclic) |
ca21a146 | 261 | schan->happened_cyclic = schan->completed_cyclic = 0; |
ca21a146 RY |
262 | } |
263 | ||
264 | /* Interrupt handler */ | |
265 | static irqreturn_t sirfsoc_dma_irq(int irq, void *data) | |
266 | { | |
267 | struct sirfsoc_dma *sdma = data; | |
268 | struct sirfsoc_dma_chan *schan; | |
269 | struct sirfsoc_dma_desc *sdesc = NULL; | |
270 | u32 is; | |
0a45dcab | 271 | bool chain; |
ca21a146 | 272 | int ch; |
0a45dcab HL |
273 | void __iomem *reg; |
274 | ||
275 | switch (sdma->type) { | |
276 | case SIRFSOC_DMA_VER_A6: | |
277 | case SIRFSOC_DMA_VER_A7V1: | |
278 | is = readl(sdma->base + SIRFSOC_DMA_CH_INT); | |
279 | reg = sdma->base + SIRFSOC_DMA_CH_INT; | |
280 | while ((ch = fls(is) - 1) >= 0) { | |
281 | is &= ~(1 << ch); | |
282 | writel_relaxed(1 << ch, reg); | |
283 | schan = &sdma->channels[ch]; | |
284 | spin_lock(&schan->lock); | |
285 | sdesc = list_first_entry(&schan->active, | |
286 | struct sirfsoc_dma_desc, node); | |
287 | if (!sdesc->cyclic) { | |
288 | /* Execute queued descriptors */ | |
289 | list_splice_tail_init(&schan->active, | |
290 | &schan->completed); | |
291 | dma_cookie_complete(&sdesc->desc); | |
292 | if (!list_empty(&schan->queued)) | |
293 | sirfsoc_dma_execute(schan); | |
294 | } else | |
295 | schan->happened_cyclic++; | |
296 | spin_unlock(&schan->lock); | |
297 | } | |
298 | break; | |
ca21a146 | 299 | |
0a45dcab HL |
300 | case SIRFSOC_DMA_VER_A7V2: |
301 | is = readl(sdma->base + SIRFSOC_DMA_INT_ATLAS7); | |
ca21a146 | 302 | |
0a45dcab HL |
303 | reg = sdma->base + SIRFSOC_DMA_INT_ATLAS7; |
304 | writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, reg); | |
305 | schan = &sdma->channels[0]; | |
ca21a146 | 306 | spin_lock(&schan->lock); |
0a45dcab HL |
307 | sdesc = list_first_entry(&schan->active, |
308 | struct sirfsoc_dma_desc, node); | |
ca21a146 | 309 | if (!sdesc->cyclic) { |
0a45dcab HL |
310 | chain = sdesc->chain; |
311 | if ((chain && (is & SIRFSOC_DMA_INT_END_INT_ATLAS7)) || | |
312 | (!chain && | |
313 | (is & SIRFSOC_DMA_INT_FINI_INT_ATLAS7))) { | |
314 | /* Execute queued descriptors */ | |
315 | list_splice_tail_init(&schan->active, | |
316 | &schan->completed); | |
317 | dma_cookie_complete(&sdesc->desc); | |
318 | if (!list_empty(&schan->queued)) | |
319 | sirfsoc_dma_execute(schan); | |
320 | } | |
321 | } else if (sdesc->cyclic && (is & | |
322 | SIRFSOC_DMA_INT_LOOP_INT_ATLAS7)) | |
ca21a146 RY |
323 | schan->happened_cyclic++; |
324 | ||
325 | spin_unlock(&schan->lock); | |
0a45dcab HL |
326 | break; |
327 | ||
328 | default: | |
329 | break; | |
ca21a146 RY |
330 | } |
331 | ||
332 | /* Schedule tasklet */ | |
333 | tasklet_schedule(&sdma->tasklet); | |
334 | ||
335 | return IRQ_HANDLED; | |
336 | } | |
337 | ||
338 | /* process completed descriptors */ | |
339 | static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) | |
340 | { | |
341 | dma_cookie_t last_cookie = 0; | |
342 | struct sirfsoc_dma_chan *schan; | |
343 | struct sirfsoc_dma_desc *sdesc; | |
344 | struct dma_async_tx_descriptor *desc; | |
345 | unsigned long flags; | |
346 | unsigned long happened_cyclic; | |
347 | LIST_HEAD(list); | |
348 | int i; | |
349 | ||
350 | for (i = 0; i < sdma->dma.chancnt; i++) { | |
351 | schan = &sdma->channels[i]; | |
352 | ||
353 | /* Get all completed descriptors */ | |
354 | spin_lock_irqsave(&schan->lock, flags); | |
355 | if (!list_empty(&schan->completed)) { | |
356 | list_splice_tail_init(&schan->completed, &list); | |
357 | spin_unlock_irqrestore(&schan->lock, flags); | |
358 | ||
359 | /* Execute callbacks and run dependencies */ | |
360 | list_for_each_entry(sdesc, &list, node) { | |
361 | desc = &sdesc->desc; | |
362 | ||
363 | if (desc->callback) | |
364 | desc->callback(desc->callback_param); | |
365 | ||
366 | last_cookie = desc->cookie; | |
367 | dma_run_dependencies(desc); | |
368 | } | |
369 | ||
370 | /* Free descriptors */ | |
371 | spin_lock_irqsave(&schan->lock, flags); | |
372 | list_splice_tail_init(&list, &schan->free); | |
4d4e58de | 373 | schan->chan.completed_cookie = last_cookie; |
ca21a146 RY |
374 | spin_unlock_irqrestore(&schan->lock, flags); |
375 | } else { | |
0a45dcab | 376 | if (list_empty(&schan->active)) { |
ca21a146 RY |
377 | spin_unlock_irqrestore(&schan->lock, flags); |
378 | continue; | |
379 | } | |
380 | ||
0a45dcab HL |
381 | /* for cyclic channel, desc is always in active list */ |
382 | sdesc = list_first_entry(&schan->active, | |
383 | struct sirfsoc_dma_desc, node); | |
384 | ||
ca21a146 RY |
385 | /* cyclic DMA */ |
386 | happened_cyclic = schan->happened_cyclic; | |
387 | spin_unlock_irqrestore(&schan->lock, flags); | |
388 | ||
389 | desc = &sdesc->desc; | |
390 | while (happened_cyclic != schan->completed_cyclic) { | |
391 | if (desc->callback) | |
392 | desc->callback(desc->callback_param); | |
393 | schan->completed_cyclic++; | |
394 | } | |
395 | } | |
396 | } | |
397 | } | |
398 | ||
399 | /* DMA Tasklet */ | |
400 | static void sirfsoc_dma_tasklet(unsigned long data) | |
401 | { | |
402 | struct sirfsoc_dma *sdma = (void *)data; | |
403 | ||
404 | sirfsoc_dma_process_completed(sdma); | |
405 | } | |
406 | ||
407 | /* Submit descriptor to hardware */ | |
408 | static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |
409 | { | |
410 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); | |
411 | struct sirfsoc_dma_desc *sdesc; | |
412 | unsigned long flags; | |
413 | dma_cookie_t cookie; | |
414 | ||
415 | sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); | |
416 | ||
417 | spin_lock_irqsave(&schan->lock, flags); | |
418 | ||
419 | /* Move descriptor to queue */ | |
420 | list_move_tail(&sdesc->node, &schan->queued); | |
421 | ||
884485e1 | 422 | cookie = dma_cookie_assign(txd); |
ca21a146 RY |
423 | |
424 | spin_unlock_irqrestore(&schan->lock, flags); | |
425 | ||
426 | return cookie; | |
427 | } | |
428 | ||
ed14a7c9 MR |
429 | static int sirfsoc_dma_slave_config(struct dma_chan *chan, |
430 | struct dma_slave_config *config) | |
ca21a146 | 431 | { |
ed14a7c9 | 432 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
ca21a146 RY |
433 | unsigned long flags; |
434 | ||
435 | if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | |
436 | (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) | |
437 | return -EINVAL; | |
438 | ||
439 | spin_lock_irqsave(&schan->lock, flags); | |
440 | schan->mode = (config->src_maxburst == 4 ? 1 : 0); | |
441 | spin_unlock_irqrestore(&schan->lock, flags); | |
442 | ||
443 | return 0; | |
444 | } | |
445 | ||
ed14a7c9 | 446 | static int sirfsoc_dma_terminate_all(struct dma_chan *chan) |
ca21a146 | 447 | { |
ed14a7c9 | 448 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
ca21a146 RY |
449 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); |
450 | int cid = schan->chan.chan_id; | |
451 | unsigned long flags; | |
452 | ||
2b99c259 | 453 | spin_lock_irqsave(&schan->lock, flags); |
ca21a146 | 454 | |
0a45dcab HL |
455 | switch (sdma->type) { |
456 | case SIRFSOC_DMA_VER_A7V1: | |
f7d935dc | 457 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR); |
ac9bd0ef | 458 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT); |
f7d935dc | 459 | writel_relaxed((1 << cid) | 1 << (cid + 16), |
0a45dcab HL |
460 | sdma->base + |
461 | SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); | |
462 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | |
463 | break; | |
464 | case SIRFSOC_DMA_VER_A7V2: | |
465 | writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7); | |
ac9bd0ef YL |
466 | writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, |
467 | sdma->base + SIRFSOC_DMA_INT_ATLAS7); | |
0a45dcab HL |
468 | writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); |
469 | writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7); | |
470 | break; | |
471 | case SIRFSOC_DMA_VER_A6: | |
472 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & | |
473 | ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | |
474 | writel_relaxed(readl_relaxed(sdma->base + | |
475 | SIRFSOC_DMA_CH_LOOP_CTRL) & | |
476 | ~((1 << cid) | 1 << (cid + 16)), | |
477 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
478 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | |
479 | break; | |
480 | default: | |
481 | break; | |
f7d935dc BS |
482 | } |
483 | ||
ca21a146 RY |
484 | list_splice_tail_init(&schan->active, &schan->free); |
485 | list_splice_tail_init(&schan->queued, &schan->free); | |
2b99c259 | 486 | |
ca21a146 RY |
487 | spin_unlock_irqrestore(&schan->lock, flags); |
488 | ||
489 | return 0; | |
490 | } | |
491 | ||
ed14a7c9 | 492 | static int sirfsoc_dma_pause_chan(struct dma_chan *chan) |
2518d1d1 | 493 | { |
ed14a7c9 | 494 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
2518d1d1 BS |
495 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); |
496 | int cid = schan->chan.chan_id; | |
497 | unsigned long flags; | |
498 | ||
499 | spin_lock_irqsave(&schan->lock, flags); | |
500 | ||
0a45dcab HL |
501 | switch (sdma->type) { |
502 | case SIRFSOC_DMA_VER_A7V1: | |
2518d1d1 | 503 | writel_relaxed((1 << cid) | 1 << (cid + 16), |
0a45dcab HL |
504 | sdma->base + |
505 | SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); | |
506 | break; | |
507 | case SIRFSOC_DMA_VER_A7V2: | |
508 | writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); | |
509 | break; | |
510 | case SIRFSOC_DMA_VER_A6: | |
511 | writel_relaxed(readl_relaxed(sdma->base + | |
512 | SIRFSOC_DMA_CH_LOOP_CTRL) & | |
513 | ~((1 << cid) | 1 << (cid + 16)), | |
514 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
515 | break; | |
516 | ||
517 | default: | |
518 | break; | |
519 | } | |
2518d1d1 BS |
520 | |
521 | spin_unlock_irqrestore(&schan->lock, flags); | |
522 | ||
523 | return 0; | |
524 | } | |
525 | ||
ed14a7c9 | 526 | static int sirfsoc_dma_resume_chan(struct dma_chan *chan) |
2518d1d1 | 527 | { |
ed14a7c9 | 528 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
2518d1d1 BS |
529 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); |
530 | int cid = schan->chan.chan_id; | |
531 | unsigned long flags; | |
532 | ||
533 | spin_lock_irqsave(&schan->lock, flags); | |
0a45dcab HL |
534 | switch (sdma->type) { |
535 | case SIRFSOC_DMA_VER_A7V1: | |
2518d1d1 | 536 | writel_relaxed((1 << cid) | 1 << (cid + 16), |
0a45dcab HL |
537 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7); |
538 | break; | |
539 | case SIRFSOC_DMA_VER_A7V2: | |
540 | writel_relaxed(0x10001, | |
541 | sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); | |
542 | break; | |
543 | case SIRFSOC_DMA_VER_A6: | |
544 | writel_relaxed(readl_relaxed(sdma->base + | |
545 | SIRFSOC_DMA_CH_LOOP_CTRL) | | |
546 | ((1 << cid) | 1 << (cid + 16)), | |
547 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
548 | break; | |
549 | ||
550 | default: | |
551 | break; | |
552 | } | |
2518d1d1 | 553 | |
ca21a146 RY |
554 | spin_unlock_irqrestore(&schan->lock, flags); |
555 | ||
556 | return 0; | |
557 | } | |
558 | ||
ca21a146 RY |
559 | /* Alloc channel resources */ |
560 | static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) | |
561 | { | |
562 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | |
563 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
564 | struct sirfsoc_dma_desc *sdesc; | |
565 | unsigned long flags; | |
566 | LIST_HEAD(descs); | |
567 | int i; | |
568 | ||
2a76689b BS |
569 | pm_runtime_get_sync(sdma->dma.dev); |
570 | ||
ca21a146 RY |
571 | /* Alloc descriptors for this channel */ |
572 | for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { | |
573 | sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); | |
574 | if (!sdesc) { | |
575 | dev_notice(sdma->dma.dev, "Memory allocation error. " | |
576 | "Allocated only %u descriptors\n", i); | |
577 | break; | |
578 | } | |
579 | ||
580 | dma_async_tx_descriptor_init(&sdesc->desc, chan); | |
581 | sdesc->desc.flags = DMA_CTRL_ACK; | |
582 | sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; | |
583 | ||
584 | list_add_tail(&sdesc->node, &descs); | |
585 | } | |
586 | ||
587 | /* Return error only if no descriptors were allocated */ | |
588 | if (i == 0) | |
589 | return -ENOMEM; | |
590 | ||
591 | spin_lock_irqsave(&schan->lock, flags); | |
592 | ||
593 | list_splice_tail_init(&descs, &schan->free); | |
594 | spin_unlock_irqrestore(&schan->lock, flags); | |
595 | ||
596 | return i; | |
597 | } | |
598 | ||
599 | /* Free channel resources */ | |
600 | static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) | |
601 | { | |
602 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
2a76689b | 603 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); |
ca21a146 RY |
604 | struct sirfsoc_dma_desc *sdesc, *tmp; |
605 | unsigned long flags; | |
606 | LIST_HEAD(descs); | |
607 | ||
608 | spin_lock_irqsave(&schan->lock, flags); | |
609 | ||
610 | /* Channel must be idle */ | |
611 | BUG_ON(!list_empty(&schan->prepared)); | |
612 | BUG_ON(!list_empty(&schan->queued)); | |
613 | BUG_ON(!list_empty(&schan->active)); | |
614 | BUG_ON(!list_empty(&schan->completed)); | |
615 | ||
616 | /* Move data */ | |
617 | list_splice_tail_init(&schan->free, &descs); | |
618 | ||
619 | spin_unlock_irqrestore(&schan->lock, flags); | |
620 | ||
621 | /* Free descriptors */ | |
622 | list_for_each_entry_safe(sdesc, tmp, &descs, node) | |
623 | kfree(sdesc); | |
2a76689b BS |
624 | |
625 | pm_runtime_put(sdma->dma.dev); | |
ca21a146 RY |
626 | } |
627 | ||
628 | /* Send pending descriptor to hardware */ | |
629 | static void sirfsoc_dma_issue_pending(struct dma_chan *chan) | |
630 | { | |
631 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
632 | unsigned long flags; | |
633 | ||
634 | spin_lock_irqsave(&schan->lock, flags); | |
635 | ||
636 | if (list_empty(&schan->active) && !list_empty(&schan->queued)) | |
637 | sirfsoc_dma_execute(schan); | |
638 | ||
639 | spin_unlock_irqrestore(&schan->lock, flags); | |
640 | } | |
641 | ||
642 | /* Check request completion status */ | |
643 | static enum dma_status | |
644 | sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
645 | struct dma_tx_state *txstate) | |
646 | { | |
add93b57 | 647 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); |
ca21a146 RY |
648 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
649 | unsigned long flags; | |
96a2af41 | 650 | enum dma_status ret; |
add93b57 RY |
651 | struct sirfsoc_dma_desc *sdesc; |
652 | int cid = schan->chan.chan_id; | |
653 | unsigned long dma_pos; | |
654 | unsigned long dma_request_bytes; | |
655 | unsigned long residue; | |
ca21a146 RY |
656 | |
657 | spin_lock_irqsave(&schan->lock, flags); | |
add93b57 | 658 | |
0a45dcab HL |
659 | if (list_empty(&schan->active)) { |
660 | ret = dma_cookie_status(chan, cookie, txstate); | |
661 | dma_set_residue(txstate, 0); | |
662 | spin_unlock_irqrestore(&schan->lock, flags); | |
663 | return ret; | |
664 | } | |
665 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, node); | |
666 | if (sdesc->cyclic) | |
667 | dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) * | |
668 | (sdesc->width * SIRFSOC_DMA_WORD_LEN); | |
669 | else | |
670 | dma_request_bytes = sdesc->xlen * SIRFSOC_DMA_WORD_LEN; | |
add93b57 | 671 | |
96a2af41 | 672 | ret = dma_cookie_status(chan, cookie, txstate); |
0a45dcab HL |
673 | |
674 | if (sdma->type == SIRFSOC_DMA_VER_A7V2) | |
675 | cid = 0; | |
676 | ||
677 | if (sdma->type == SIRFSOC_DMA_VER_A7V2) { | |
678 | dma_pos = readl_relaxed(sdma->base + SIRFSOC_DMA_CUR_DATA_ADDR); | |
679 | } else { | |
680 | dma_pos = readl_relaxed( | |
681 | sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) << 2; | |
682 | } | |
683 | ||
add93b57 RY |
684 | residue = dma_request_bytes - (dma_pos - sdesc->addr); |
685 | dma_set_residue(txstate, residue); | |
686 | ||
ca21a146 RY |
687 | spin_unlock_irqrestore(&schan->lock, flags); |
688 | ||
96a2af41 | 689 | return ret; |
ca21a146 RY |
690 | } |
691 | ||
692 | static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( | |
693 | struct dma_chan *chan, struct dma_interleaved_template *xt, | |
694 | unsigned long flags) | |
695 | { | |
696 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | |
697 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
698 | struct sirfsoc_dma_desc *sdesc = NULL; | |
699 | unsigned long iflags; | |
700 | int ret; | |
701 | ||
5997e089 | 702 | if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) { |
ca21a146 RY |
703 | ret = -EINVAL; |
704 | goto err_dir; | |
705 | } | |
706 | ||
707 | /* Get free descriptor */ | |
708 | spin_lock_irqsave(&schan->lock, iflags); | |
709 | if (!list_empty(&schan->free)) { | |
710 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | |
711 | node); | |
712 | list_del(&sdesc->node); | |
713 | } | |
714 | spin_unlock_irqrestore(&schan->lock, iflags); | |
715 | ||
716 | if (!sdesc) { | |
717 | /* try to free completed descriptors */ | |
718 | sirfsoc_dma_process_completed(sdma); | |
719 | ret = 0; | |
720 | goto no_desc; | |
721 | } | |
722 | ||
723 | /* Place descriptor in prepared list */ | |
724 | spin_lock_irqsave(&schan->lock, iflags); | |
725 | ||
726 | /* | |
727 | * Number of chunks in a frame can only be 1 for prima2 | |
728 | * and ylen (number of frame - 1) must be at least 0 | |
729 | */ | |
730 | if ((xt->frame_size == 1) && (xt->numf > 0)) { | |
731 | sdesc->cyclic = 0; | |
732 | sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; | |
733 | sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / | |
734 | SIRFSOC_DMA_WORD_LEN; | |
735 | sdesc->ylen = xt->numf - 1; | |
736 | if (xt->dir == DMA_MEM_TO_DEV) { | |
737 | sdesc->addr = xt->src_start; | |
738 | sdesc->dir = 1; | |
739 | } else { | |
740 | sdesc->addr = xt->dst_start; | |
741 | sdesc->dir = 0; | |
742 | } | |
743 | ||
744 | list_add_tail(&sdesc->node, &schan->prepared); | |
745 | } else { | |
746 | pr_err("sirfsoc DMA Invalid xfer\n"); | |
747 | ret = -EINVAL; | |
748 | goto err_xfer; | |
749 | } | |
750 | spin_unlock_irqrestore(&schan->lock, iflags); | |
751 | ||
752 | return &sdesc->desc; | |
753 | err_xfer: | |
754 | spin_unlock_irqrestore(&schan->lock, iflags); | |
755 | no_desc: | |
756 | err_dir: | |
757 | return ERR_PTR(ret); | |
758 | } | |
759 | ||
760 | static struct dma_async_tx_descriptor * | |
761 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, | |
762 | size_t buf_len, size_t period_len, | |
31c1e5a1 | 763 | enum dma_transfer_direction direction, unsigned long flags) |
ca21a146 RY |
764 | { |
765 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
766 | struct sirfsoc_dma_desc *sdesc = NULL; | |
767 | unsigned long iflags; | |
768 | ||
769 | /* | |
770 | * we only support cycle transfer with 2 period | |
771 | * If the X-length is set to 0, it would be the loop mode. | |
772 | * The DMA address keeps increasing until reaching the end of a loop | |
773 | * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then | |
774 | * the DMA address goes back to the beginning of this area. | |
775 | * In loop mode, the DMA data region is divided into two parts, BUFA | |
776 | * and BUFB. DMA controller generates interrupts twice in each loop: | |
777 | * when the DMA address reaches the end of BUFA or the end of the | |
778 | * BUFB | |
779 | */ | |
780 | if (buf_len != 2 * period_len) | |
781 | return ERR_PTR(-EINVAL); | |
782 | ||
783 | /* Get free descriptor */ | |
784 | spin_lock_irqsave(&schan->lock, iflags); | |
785 | if (!list_empty(&schan->free)) { | |
786 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | |
787 | node); | |
788 | list_del(&sdesc->node); | |
789 | } | |
790 | spin_unlock_irqrestore(&schan->lock, iflags); | |
791 | ||
792 | if (!sdesc) | |
696b4ff8 | 793 | return NULL; |
ca21a146 RY |
794 | |
795 | /* Place descriptor in prepared list */ | |
796 | spin_lock_irqsave(&schan->lock, iflags); | |
797 | sdesc->addr = addr; | |
798 | sdesc->cyclic = 1; | |
799 | sdesc->xlen = 0; | |
800 | sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; | |
801 | sdesc->width = 1; | |
802 | list_add_tail(&sdesc->node, &schan->prepared); | |
803 | spin_unlock_irqrestore(&schan->lock, iflags); | |
804 | ||
805 | return &sdesc->desc; | |
806 | } | |
807 | ||
808 | /* | |
809 | * The DMA controller consists of 16 independent DMA channels. | |
810 | * Each channel is allocated to a different function | |
811 | */ | |
812 | bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) | |
813 | { | |
814 | unsigned int ch_nr = (unsigned int) chan_id; | |
815 | ||
816 | if (ch_nr == chan->chan_id + | |
817 | chan->device->dev_id * SIRFSOC_DMA_CHANNELS) | |
818 | return true; | |
819 | ||
820 | return false; | |
821 | } | |
822 | EXPORT_SYMBOL(sirfsoc_dma_filter_id); | |
823 | ||
ba07d812 RY |
824 | #define SIRFSOC_DMA_BUSWIDTHS \ |
825 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | |
826 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
827 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
828 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | |
829 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | |
830 | ||
2e041c94 BS |
831 | static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec, |
832 | struct of_dma *ofdma) | |
833 | { | |
834 | struct sirfsoc_dma *sdma = ofdma->of_dma_data; | |
835 | unsigned int request = dma_spec->args[0]; | |
836 | ||
f3817e77 | 837 | if (request >= SIRFSOC_DMA_CHANNELS) |
2e041c94 BS |
838 | return NULL; |
839 | ||
840 | return dma_get_slave_channel(&sdma->channels[request].chan); | |
841 | } | |
842 | ||
463a1f8b | 843 | static int sirfsoc_dma_probe(struct platform_device *op) |
ca21a146 RY |
844 | { |
845 | struct device_node *dn = op->dev.of_node; | |
846 | struct device *dev = &op->dev; | |
847 | struct dma_device *dma; | |
848 | struct sirfsoc_dma *sdma; | |
849 | struct sirfsoc_dma_chan *schan; | |
0a45dcab | 850 | struct sirfsoc_dmadata *data; |
ca21a146 RY |
851 | struct resource res; |
852 | ulong regs_start, regs_size; | |
853 | u32 id; | |
854 | int ret, i; | |
855 | ||
856 | sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); | |
857 | if (!sdma) { | |
858 | dev_err(dev, "Memory exhausted!\n"); | |
859 | return -ENOMEM; | |
860 | } | |
0a45dcab HL |
861 | data = (struct sirfsoc_dmadata *) |
862 | (of_match_device(op->dev.driver->of_match_table, | |
863 | &op->dev)->data); | |
864 | sdma->exec_desc = data->exec; | |
865 | sdma->type = data->type; | |
f7d935dc | 866 | |
ca21a146 RY |
867 | if (of_property_read_u32(dn, "cell-index", &id)) { |
868 | dev_err(dev, "Fail to get DMAC index\n"); | |
94d3901c | 869 | return -ENODEV; |
ca21a146 RY |
870 | } |
871 | ||
872 | sdma->irq = irq_of_parse_and_map(dn, 0); | |
873 | if (sdma->irq == NO_IRQ) { | |
874 | dev_err(dev, "Error mapping IRQ!\n"); | |
94d3901c | 875 | return -EINVAL; |
ca21a146 RY |
876 | } |
877 | ||
a7e34065 BS |
878 | sdma->clk = devm_clk_get(dev, NULL); |
879 | if (IS_ERR(sdma->clk)) { | |
880 | dev_err(dev, "failed to get a clock.\n"); | |
881 | return PTR_ERR(sdma->clk); | |
882 | } | |
883 | ||
ca21a146 RY |
884 | ret = of_address_to_resource(dn, 0, &res); |
885 | if (ret) { | |
886 | dev_err(dev, "Error parsing memory region!\n"); | |
94d3901c | 887 | goto irq_dispose; |
ca21a146 RY |
888 | } |
889 | ||
890 | regs_start = res.start; | |
891 | regs_size = resource_size(&res); | |
892 | ||
893 | sdma->base = devm_ioremap(dev, regs_start, regs_size); | |
894 | if (!sdma->base) { | |
895 | dev_err(dev, "Error mapping memory region!\n"); | |
896 | ret = -ENOMEM; | |
897 | goto irq_dispose; | |
898 | } | |
899 | ||
94d3901c | 900 | ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma); |
ca21a146 RY |
901 | if (ret) { |
902 | dev_err(dev, "Error requesting IRQ!\n"); | |
903 | ret = -EINVAL; | |
94d3901c | 904 | goto irq_dispose; |
ca21a146 RY |
905 | } |
906 | ||
907 | dma = &sdma->dma; | |
908 | dma->dev = dev; | |
ca21a146 RY |
909 | |
910 | dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; | |
911 | dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; | |
912 | dma->device_issue_pending = sirfsoc_dma_issue_pending; | |
ed14a7c9 MR |
913 | dma->device_config = sirfsoc_dma_slave_config; |
914 | dma->device_pause = sirfsoc_dma_pause_chan; | |
915 | dma->device_resume = sirfsoc_dma_resume_chan; | |
916 | dma->device_terminate_all = sirfsoc_dma_terminate_all; | |
ca21a146 RY |
917 | dma->device_tx_status = sirfsoc_dma_tx_status; |
918 | dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; | |
919 | dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; | |
07ffa6ba MR |
920 | dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS; |
921 | dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS; | |
922 | dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
ca21a146 RY |
923 | |
924 | INIT_LIST_HEAD(&dma->channels); | |
925 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | |
926 | dma_cap_set(DMA_CYCLIC, dma->cap_mask); | |
927 | dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); | |
928 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | |
929 | ||
35202451 | 930 | for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) { |
ca21a146 RY |
931 | schan = &sdma->channels[i]; |
932 | ||
933 | schan->chan.device = dma; | |
d3ee98cd | 934 | dma_cookie_init(&schan->chan); |
ca21a146 RY |
935 | |
936 | INIT_LIST_HEAD(&schan->free); | |
937 | INIT_LIST_HEAD(&schan->prepared); | |
938 | INIT_LIST_HEAD(&schan->queued); | |
939 | INIT_LIST_HEAD(&schan->active); | |
940 | INIT_LIST_HEAD(&schan->completed); | |
941 | ||
942 | spin_lock_init(&schan->lock); | |
943 | list_add_tail(&schan->chan.device_node, &dma->channels); | |
944 | } | |
945 | ||
946 | tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); | |
947 | ||
948 | /* Register DMA engine */ | |
949 | dev_set_drvdata(dev, sdma); | |
2a76689b | 950 | |
ca21a146 RY |
951 | ret = dma_async_device_register(dma); |
952 | if (ret) | |
953 | goto free_irq; | |
954 | ||
2e041c94 BS |
955 | /* Device-tree DMA controller registration */ |
956 | ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma); | |
957 | if (ret) { | |
958 | dev_err(dev, "failed to register DMA controller\n"); | |
959 | goto unreg_dma_dev; | |
960 | } | |
961 | ||
2a76689b | 962 | pm_runtime_enable(&op->dev); |
ca21a146 RY |
963 | dev_info(dev, "initialized SIRFSOC DMAC driver\n"); |
964 | ||
965 | return 0; | |
966 | ||
2e041c94 BS |
967 | unreg_dma_dev: |
968 | dma_async_device_unregister(dma); | |
ca21a146 | 969 | free_irq: |
94d3901c | 970 | free_irq(sdma->irq, sdma); |
ca21a146 RY |
971 | irq_dispose: |
972 | irq_dispose_mapping(sdma->irq); | |
ca21a146 RY |
973 | return ret; |
974 | } | |
975 | ||
4bf27b8b | 976 | static int sirfsoc_dma_remove(struct platform_device *op) |
ca21a146 RY |
977 | { |
978 | struct device *dev = &op->dev; | |
979 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
980 | ||
2e041c94 | 981 | of_dma_controller_free(op->dev.of_node); |
ca21a146 | 982 | dma_async_device_unregister(&sdma->dma); |
94d3901c | 983 | free_irq(sdma->irq, sdma); |
ca21a146 | 984 | irq_dispose_mapping(sdma->irq); |
2a76689b BS |
985 | pm_runtime_disable(&op->dev); |
986 | if (!pm_runtime_status_suspended(&op->dev)) | |
987 | sirfsoc_dma_runtime_suspend(&op->dev); | |
988 | ||
989 | return 0; | |
990 | } | |
991 | ||
6ff1cb88 | 992 | static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev) |
2a76689b BS |
993 | { |
994 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
995 | ||
996 | clk_disable_unprepare(sdma->clk); | |
997 | return 0; | |
998 | } | |
999 | ||
6ff1cb88 | 1000 | static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev) |
2a76689b BS |
1001 | { |
1002 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
1003 | int ret; | |
1004 | ||
1005 | ret = clk_prepare_enable(sdma->clk); | |
1006 | if (ret < 0) { | |
1007 | dev_err(dev, "clk_enable failed: %d\n", ret); | |
1008 | return ret; | |
1009 | } | |
1010 | return 0; | |
1011 | } | |
1012 | ||
6ff1cb88 | 1013 | static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev) |
2a76689b BS |
1014 | { |
1015 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
1016 | struct sirfsoc_dma_regs *save = &sdma->regs_save; | |
1017 | struct sirfsoc_dma_desc *sdesc; | |
1018 | struct sirfsoc_dma_chan *schan; | |
1019 | int ch; | |
1020 | int ret; | |
0a45dcab HL |
1021 | int count; |
1022 | u32 int_offset; | |
2a76689b BS |
1023 | |
1024 | /* | |
1025 | * if we were runtime-suspended before, resume to enable clock | |
1026 | * before accessing register | |
1027 | */ | |
1028 | if (pm_runtime_status_suspended(dev)) { | |
1029 | ret = sirfsoc_dma_runtime_resume(dev); | |
1030 | if (ret < 0) | |
1031 | return ret; | |
1032 | } | |
1033 | ||
0a45dcab HL |
1034 | if (sdma->type == SIRFSOC_DMA_VER_A7V2) { |
1035 | count = 1; | |
1036 | int_offset = SIRFSOC_DMA_INT_EN_ATLAS7; | |
1037 | } else { | |
1038 | count = SIRFSOC_DMA_CHANNELS; | |
1039 | int_offset = SIRFSOC_DMA_INT_EN; | |
1040 | } | |
1041 | ||
2a76689b BS |
1042 | /* |
1043 | * DMA controller will lose all registers while suspending | |
1044 | * so we need to save registers for active channels | |
1045 | */ | |
0a45dcab | 1046 | for (ch = 0; ch < count; ch++) { |
2a76689b BS |
1047 | schan = &sdma->channels[ch]; |
1048 | if (list_empty(&schan->active)) | |
1049 | continue; | |
1050 | sdesc = list_first_entry(&schan->active, | |
1051 | struct sirfsoc_dma_desc, | |
1052 | node); | |
1053 | save->ctrl[ch] = readl_relaxed(sdma->base + | |
1054 | ch * 0x10 + SIRFSOC_DMA_CH_CTRL); | |
1055 | } | |
0a45dcab | 1056 | save->interrupt_en = readl_relaxed(sdma->base + int_offset); |
2a76689b BS |
1057 | |
1058 | /* Disable clock */ | |
1059 | sirfsoc_dma_runtime_suspend(dev); | |
1060 | ||
1061 | return 0; | |
1062 | } | |
1063 | ||
6ff1cb88 | 1064 | static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev) |
2a76689b BS |
1065 | { |
1066 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
1067 | struct sirfsoc_dma_regs *save = &sdma->regs_save; | |
1068 | struct sirfsoc_dma_desc *sdesc; | |
1069 | struct sirfsoc_dma_chan *schan; | |
1070 | int ch; | |
1071 | int ret; | |
0a45dcab HL |
1072 | int count; |
1073 | u32 int_offset; | |
1074 | u32 width_offset; | |
2a76689b BS |
1075 | |
1076 | /* Enable clock before accessing register */ | |
1077 | ret = sirfsoc_dma_runtime_resume(dev); | |
1078 | if (ret < 0) | |
1079 | return ret; | |
1080 | ||
0a45dcab HL |
1081 | if (sdma->type == SIRFSOC_DMA_VER_A7V2) { |
1082 | count = 1; | |
1083 | int_offset = SIRFSOC_DMA_INT_EN_ATLAS7; | |
1084 | width_offset = SIRFSOC_DMA_WIDTH_ATLAS7; | |
1085 | } else { | |
1086 | count = SIRFSOC_DMA_CHANNELS; | |
1087 | int_offset = SIRFSOC_DMA_INT_EN; | |
1088 | width_offset = SIRFSOC_DMA_WIDTH_0; | |
1089 | } | |
1090 | ||
1091 | writel_relaxed(save->interrupt_en, sdma->base + int_offset); | |
1092 | for (ch = 0; ch < count; ch++) { | |
2a76689b BS |
1093 | schan = &sdma->channels[ch]; |
1094 | if (list_empty(&schan->active)) | |
1095 | continue; | |
1096 | sdesc = list_first_entry(&schan->active, | |
1097 | struct sirfsoc_dma_desc, | |
1098 | node); | |
1099 | writel_relaxed(sdesc->width, | |
0a45dcab | 1100 | sdma->base + width_offset + ch * 4); |
2a76689b BS |
1101 | writel_relaxed(sdesc->xlen, |
1102 | sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN); | |
1103 | writel_relaxed(sdesc->ylen, | |
1104 | sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN); | |
1105 | writel_relaxed(save->ctrl[ch], | |
1106 | sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL); | |
0a45dcab HL |
1107 | if (sdma->type == SIRFSOC_DMA_VER_A7V2) { |
1108 | writel_relaxed(sdesc->addr, | |
1109 | sdma->base + SIRFSOC_DMA_CH_ADDR); | |
1110 | } else { | |
1111 | writel_relaxed(sdesc->addr >> 2, | |
1112 | sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR); | |
1113 | ||
1114 | } | |
2a76689b BS |
1115 | } |
1116 | ||
1117 | /* if we were runtime-suspended before, suspend again */ | |
1118 | if (pm_runtime_status_suspended(dev)) | |
1119 | sirfsoc_dma_runtime_suspend(dev); | |
1120 | ||
ca21a146 RY |
1121 | return 0; |
1122 | } | |
1123 | ||
2a76689b BS |
1124 | static const struct dev_pm_ops sirfsoc_dma_pm_ops = { |
1125 | SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL) | |
1126 | SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume) | |
1127 | }; | |
1128 | ||
0a45dcab HL |
1129 | struct sirfsoc_dmadata sirfsoc_dmadata_a6 = { |
1130 | .exec = sirfsoc_dma_execute_hw_a6, | |
1131 | .type = SIRFSOC_DMA_VER_A6, | |
1132 | }; | |
1133 | ||
1134 | struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = { | |
1135 | .exec = sirfsoc_dma_execute_hw_a7v1, | |
1136 | .type = SIRFSOC_DMA_VER_A7V1, | |
1137 | }; | |
1138 | ||
1139 | struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = { | |
1140 | .exec = sirfsoc_dma_execute_hw_a7v2, | |
1141 | .type = SIRFSOC_DMA_VER_A7V2, | |
1142 | }; | |
1143 | ||
57c03422 | 1144 | static const struct of_device_id sirfsoc_dma_match[] = { |
0a45dcab HL |
1145 | { .compatible = "sirf,prima2-dmac", .data = &sirfsoc_dmadata_a6,}, |
1146 | { .compatible = "sirf,atlas7-dmac", .data = &sirfsoc_dmadata_a7v1,}, | |
1147 | { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,}, | |
ca21a146 RY |
1148 | {}, |
1149 | }; | |
e0c26f22 | 1150 | MODULE_DEVICE_TABLE(of, sirfsoc_dma_match); |
ca21a146 RY |
1151 | |
1152 | static struct platform_driver sirfsoc_dma_driver = { | |
1153 | .probe = sirfsoc_dma_probe, | |
a7d6e3ec | 1154 | .remove = sirfsoc_dma_remove, |
ca21a146 RY |
1155 | .driver = { |
1156 | .name = DRV_NAME, | |
2a76689b | 1157 | .pm = &sirfsoc_dma_pm_ops, |
ca21a146 RY |
1158 | .of_match_table = sirfsoc_dma_match, |
1159 | }, | |
1160 | }; | |
1161 | ||
42361f20 BS |
1162 | static __init int sirfsoc_dma_init(void) |
1163 | { | |
1164 | return platform_driver_register(&sirfsoc_dma_driver); | |
1165 | } | |
1166 | ||
1167 | static void __exit sirfsoc_dma_exit(void) | |
1168 | { | |
1169 | platform_driver_unregister(&sirfsoc_dma_driver); | |
1170 | } | |
1171 | ||
1172 | subsys_initcall(sirfsoc_dma_init); | |
1173 | module_exit(sirfsoc_dma_exit); | |
ca21a146 | 1174 | |
0a45dcab HL |
1175 | MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>"); |
1176 | MODULE_AUTHOR("Barry Song <baohua.song@csr.com>"); | |
ca21a146 RY |
1177 | MODULE_DESCRIPTION("SIRFSOC DMA control driver"); |
1178 | MODULE_LICENSE("GPL v2"); |