Commit | Line | Data |
---|---|---|
0c42bd0e YW |
1 | /* |
2 | * Topcliff PCH DMA controller driver | |
3 | * Copyright (c) 2010 Intel Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/dmaengine.h> | |
20 | #include <linux/dma-mapping.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/pci.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/pch_dma.h> | |
26 | ||
27 | #define DRV_NAME "pch-dma" | |
28 | ||
29 | #define DMA_CTL0_DISABLE 0x0 | |
30 | #define DMA_CTL0_SG 0x1 | |
31 | #define DMA_CTL0_ONESHOT 0x2 | |
32 | #define DMA_CTL0_MODE_MASK_BITS 0x3 | |
33 | #define DMA_CTL0_DIR_SHIFT_BITS 2 | |
34 | #define DMA_CTL0_BITS_PER_CH 4 | |
35 | ||
36 | #define DMA_CTL2_START_SHIFT_BITS 8 | |
37 | #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1) | |
38 | ||
39 | #define DMA_STATUS_IDLE 0x0 | |
40 | #define DMA_STATUS_DESC_READ 0x1 | |
41 | #define DMA_STATUS_WAIT 0x2 | |
42 | #define DMA_STATUS_ACCESS 0x3 | |
43 | #define DMA_STATUS_BITS_PER_CH 2 | |
44 | #define DMA_STATUS_MASK_BITS 0x3 | |
45 | #define DMA_STATUS_SHIFT_BITS 16 | |
46 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) | |
47 | #define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) | |
48 | ||
49 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 | |
50 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) | |
51 | #define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS) | |
52 | #define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS) | |
53 | #define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF | |
54 | #define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF | |
55 | #define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF | |
56 | #define DMA_DESC_END_WITHOUT_IRQ 0x0 | |
57 | #define DMA_DESC_END_WITH_IRQ 0x1 | |
58 | #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 | |
59 | #define DMA_DESC_FOLLOW_WITH_IRQ 0x3 | |
60 | ||
61 | #define MAX_CHAN_NR 8 | |
62 | ||
63 | static unsigned int init_nr_desc_per_channel = 64; | |
64 | module_param(init_nr_desc_per_channel, uint, 0644); | |
65 | MODULE_PARM_DESC(init_nr_desc_per_channel, | |
66 | "initial descriptors per channel (default: 64)"); | |
67 | ||
68 | struct pch_dma_desc_regs { | |
69 | u32 dev_addr; | |
70 | u32 mem_addr; | |
71 | u32 size; | |
72 | u32 next; | |
73 | }; | |
74 | ||
75 | struct pch_dma_regs { | |
76 | u32 dma_ctl0; | |
77 | u32 dma_ctl1; | |
78 | u32 dma_ctl2; | |
79 | u32 reserved1; | |
80 | u32 dma_sts0; | |
81 | u32 dma_sts1; | |
82 | u32 reserved2; | |
83 | u32 reserved3; | |
84 | struct pch_dma_desc_regs desc[0]; | |
85 | }; | |
86 | ||
87 | struct pch_dma_desc { | |
88 | struct pch_dma_desc_regs regs; | |
89 | struct dma_async_tx_descriptor txd; | |
90 | struct list_head desc_node; | |
91 | struct list_head tx_list; | |
92 | }; | |
93 | ||
94 | struct pch_dma_chan { | |
95 | struct dma_chan chan; | |
96 | void __iomem *membase; | |
97 | enum dma_data_direction dir; | |
98 | struct tasklet_struct tasklet; | |
99 | unsigned long err_status; | |
100 | ||
101 | spinlock_t lock; | |
102 | ||
103 | dma_cookie_t completed_cookie; | |
104 | struct list_head active_list; | |
105 | struct list_head queue; | |
106 | struct list_head free_list; | |
107 | unsigned int descs_allocated; | |
108 | }; | |
109 | ||
110 | #define PDC_DEV_ADDR 0x00 | |
111 | #define PDC_MEM_ADDR 0x04 | |
112 | #define PDC_SIZE 0x08 | |
113 | #define PDC_NEXT 0x0C | |
114 | ||
115 | #define channel_readl(pdc, name) \ | |
116 | readl((pdc)->membase + PDC_##name) | |
117 | #define channel_writel(pdc, name, val) \ | |
118 | writel((val), (pdc)->membase + PDC_##name) | |
119 | ||
120 | struct pch_dma { | |
121 | struct dma_device dma; | |
122 | void __iomem *membase; | |
123 | struct pci_pool *pool; | |
124 | struct pch_dma_regs regs; | |
125 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; | |
126 | struct pch_dma_chan channels[0]; | |
127 | }; | |
128 | ||
129 | #define PCH_DMA_CTL0 0x00 | |
130 | #define PCH_DMA_CTL1 0x04 | |
131 | #define PCH_DMA_CTL2 0x08 | |
132 | #define PCH_DMA_STS0 0x10 | |
133 | #define PCH_DMA_STS1 0x14 | |
134 | ||
135 | #define dma_readl(pd, name) \ | |
61cd2203 | 136 | readl((pd)->membase + PCH_DMA_##name) |
0c42bd0e | 137 | #define dma_writel(pd, name, val) \ |
61cd2203 | 138 | writel((val), (pd)->membase + PCH_DMA_##name) |
0c42bd0e YW |
139 | |
140 | static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) | |
141 | { | |
142 | return container_of(txd, struct pch_dma_desc, txd); | |
143 | } | |
144 | ||
145 | static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan) | |
146 | { | |
147 | return container_of(chan, struct pch_dma_chan, chan); | |
148 | } | |
149 | ||
150 | static inline struct pch_dma *to_pd(struct dma_device *ddev) | |
151 | { | |
152 | return container_of(ddev, struct pch_dma, dma); | |
153 | } | |
154 | ||
155 | static inline struct device *chan2dev(struct dma_chan *chan) | |
156 | { | |
157 | return &chan->dev->device; | |
158 | } | |
159 | ||
160 | static inline struct device *chan2parent(struct dma_chan *chan) | |
161 | { | |
162 | return chan->dev->device.parent; | |
163 | } | |
164 | ||
165 | static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) | |
166 | { | |
167 | return list_first_entry(&pd_chan->active_list, | |
168 | struct pch_dma_desc, desc_node); | |
169 | } | |
170 | ||
171 | static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) | |
172 | { | |
173 | return list_first_entry(&pd_chan->queue, | |
174 | struct pch_dma_desc, desc_node); | |
175 | } | |
176 | ||
177 | static void pdc_enable_irq(struct dma_chan *chan, int enable) | |
178 | { | |
179 | struct pch_dma *pd = to_pd(chan->device); | |
180 | u32 val; | |
181 | ||
182 | val = dma_readl(pd, CTL2); | |
183 | ||
184 | if (enable) | |
185 | val |= 0x1 << chan->chan_id; | |
186 | else | |
187 | val &= ~(0x1 << chan->chan_id); | |
188 | ||
189 | dma_writel(pd, CTL2, val); | |
190 | ||
191 | dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n", | |
192 | chan->chan_id, val); | |
193 | } | |
194 | ||
195 | static void pdc_set_dir(struct dma_chan *chan) | |
196 | { | |
197 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
198 | struct pch_dma *pd = to_pd(chan->device); | |
199 | u32 val; | |
200 | ||
201 | val = dma_readl(pd, CTL0); | |
202 | ||
203 | if (pd_chan->dir == DMA_TO_DEVICE) | |
204 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | |
205 | DMA_CTL0_DIR_SHIFT_BITS); | |
206 | else | |
207 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | |
208 | DMA_CTL0_DIR_SHIFT_BITS)); | |
209 | ||
210 | dma_writel(pd, CTL0, val); | |
211 | ||
212 | dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", | |
213 | chan->chan_id, val); | |
214 | } | |
215 | ||
216 | static void pdc_set_mode(struct dma_chan *chan, u32 mode) | |
217 | { | |
218 | struct pch_dma *pd = to_pd(chan->device); | |
219 | u32 val; | |
220 | ||
221 | val = dma_readl(pd, CTL0); | |
222 | ||
223 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | |
224 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | |
225 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); | |
226 | ||
227 | dma_writel(pd, CTL0, val); | |
228 | ||
229 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", | |
230 | chan->chan_id, val); | |
231 | } | |
232 | ||
233 | static u32 pdc_get_status(struct pch_dma_chan *pd_chan) | |
234 | { | |
235 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | |
236 | u32 val; | |
237 | ||
238 | val = dma_readl(pd, STS0); | |
239 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + | |
240 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); | |
241 | } | |
242 | ||
243 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) | |
244 | { | |
245 | if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) | |
246 | return true; | |
247 | else | |
248 | return false; | |
249 | } | |
250 | ||
251 | static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) | |
252 | { | |
253 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | |
254 | u32 val; | |
255 | ||
256 | if (!pdc_is_idle(pd_chan)) { | |
257 | dev_err(chan2dev(&pd_chan->chan), | |
258 | "BUG: Attempt to start non-idle channel\n"); | |
259 | return; | |
260 | } | |
261 | ||
0c42bd0e YW |
262 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", |
263 | pd_chan->chan.chan_id, desc->regs.dev_addr); | |
264 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", | |
265 | pd_chan->chan.chan_id, desc->regs.mem_addr); | |
266 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n", | |
267 | pd_chan->chan.chan_id, desc->regs.size); | |
268 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", | |
269 | pd_chan->chan.chan_id, desc->regs.next); | |
270 | ||
943d8d8b TM |
271 | if (list_empty(&desc->tx_list)) { |
272 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); | |
273 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); | |
274 | channel_writel(pd_chan, SIZE, desc->regs.size); | |
275 | channel_writel(pd_chan, NEXT, desc->regs.next); | |
0c42bd0e | 276 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); |
943d8d8b TM |
277 | } else { |
278 | channel_writel(pd_chan, NEXT, desc->txd.phys); | |
0c42bd0e | 279 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); |
943d8d8b | 280 | } |
0c42bd0e YW |
281 | |
282 | val = dma_readl(pd, CTL2); | |
283 | val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id); | |
284 | dma_writel(pd, CTL2, val); | |
285 | } | |
286 | ||
287 | static void pdc_chain_complete(struct pch_dma_chan *pd_chan, | |
288 | struct pch_dma_desc *desc) | |
289 | { | |
290 | struct dma_async_tx_descriptor *txd = &desc->txd; | |
291 | dma_async_tx_callback callback = txd->callback; | |
292 | void *param = txd->callback_param; | |
293 | ||
294 | list_splice_init(&desc->tx_list, &pd_chan->free_list); | |
295 | list_move(&desc->desc_node, &pd_chan->free_list); | |
296 | ||
297 | if (callback) | |
298 | callback(param); | |
299 | } | |
300 | ||
301 | static void pdc_complete_all(struct pch_dma_chan *pd_chan) | |
302 | { | |
303 | struct pch_dma_desc *desc, *_d; | |
304 | LIST_HEAD(list); | |
305 | ||
306 | BUG_ON(!pdc_is_idle(pd_chan)); | |
307 | ||
308 | if (!list_empty(&pd_chan->queue)) | |
309 | pdc_dostart(pd_chan, pdc_first_queued(pd_chan)); | |
310 | ||
311 | list_splice_init(&pd_chan->active_list, &list); | |
312 | list_splice_init(&pd_chan->queue, &pd_chan->active_list); | |
313 | ||
314 | list_for_each_entry_safe(desc, _d, &list, desc_node) | |
315 | pdc_chain_complete(pd_chan, desc); | |
316 | } | |
317 | ||
318 | static void pdc_handle_error(struct pch_dma_chan *pd_chan) | |
319 | { | |
320 | struct pch_dma_desc *bad_desc; | |
321 | ||
322 | bad_desc = pdc_first_active(pd_chan); | |
323 | list_del(&bad_desc->desc_node); | |
324 | ||
325 | list_splice_init(&pd_chan->queue, pd_chan->active_list.prev); | |
326 | ||
327 | if (!list_empty(&pd_chan->active_list)) | |
328 | pdc_dostart(pd_chan, pdc_first_active(pd_chan)); | |
329 | ||
330 | dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n"); | |
331 | dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n", | |
332 | bad_desc->txd.cookie); | |
333 | ||
334 | pdc_chain_complete(pd_chan, bad_desc); | |
335 | } | |
336 | ||
337 | static void pdc_advance_work(struct pch_dma_chan *pd_chan) | |
338 | { | |
339 | if (list_empty(&pd_chan->active_list) || | |
340 | list_is_singular(&pd_chan->active_list)) { | |
341 | pdc_complete_all(pd_chan); | |
342 | } else { | |
343 | pdc_chain_complete(pd_chan, pdc_first_active(pd_chan)); | |
344 | pdc_dostart(pd_chan, pdc_first_active(pd_chan)); | |
345 | } | |
346 | } | |
347 | ||
348 | static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan, | |
349 | struct pch_dma_desc *desc) | |
350 | { | |
351 | dma_cookie_t cookie = pd_chan->chan.cookie; | |
352 | ||
353 | if (++cookie < 0) | |
354 | cookie = 1; | |
355 | ||
356 | pd_chan->chan.cookie = cookie; | |
357 | desc->txd.cookie = cookie; | |
358 | ||
359 | return cookie; | |
360 | } | |
361 | ||
362 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |
363 | { | |
364 | struct pch_dma_desc *desc = to_pd_desc(txd); | |
365 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); | |
366 | dma_cookie_t cookie; | |
367 | ||
368 | spin_lock_bh(&pd_chan->lock); | |
369 | cookie = pdc_assign_cookie(pd_chan, desc); | |
370 | ||
371 | if (list_empty(&pd_chan->active_list)) { | |
372 | list_add_tail(&desc->desc_node, &pd_chan->active_list); | |
373 | pdc_dostart(pd_chan, desc); | |
374 | } else { | |
375 | list_add_tail(&desc->desc_node, &pd_chan->queue); | |
376 | } | |
377 | ||
378 | spin_unlock_bh(&pd_chan->lock); | |
379 | return 0; | |
380 | } | |
381 | ||
382 | static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) | |
383 | { | |
384 | struct pch_dma_desc *desc = NULL; | |
385 | struct pch_dma *pd = to_pd(chan->device); | |
386 | dma_addr_t addr; | |
387 | ||
388 | desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); | |
389 | if (desc) { | |
390 | memset(desc, 0, sizeof(struct pch_dma_desc)); | |
391 | INIT_LIST_HEAD(&desc->tx_list); | |
392 | dma_async_tx_descriptor_init(&desc->txd, chan); | |
393 | desc->txd.tx_submit = pd_tx_submit; | |
394 | desc->txd.flags = DMA_CTRL_ACK; | |
395 | desc->txd.phys = addr; | |
396 | } | |
397 | ||
398 | return desc; | |
399 | } | |
400 | ||
401 | static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |
402 | { | |
403 | struct pch_dma_desc *desc, *_d; | |
404 | struct pch_dma_desc *ret = NULL; | |
405 | int i; | |
406 | ||
407 | spin_lock_bh(&pd_chan->lock); | |
408 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { | |
409 | i++; | |
410 | if (async_tx_test_ack(&desc->txd)) { | |
411 | list_del(&desc->desc_node); | |
412 | ret = desc; | |
413 | break; | |
414 | } | |
415 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); | |
416 | } | |
417 | spin_unlock_bh(&pd_chan->lock); | |
418 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); | |
419 | ||
420 | if (!ret) { | |
421 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); | |
422 | if (ret) { | |
423 | spin_lock_bh(&pd_chan->lock); | |
424 | pd_chan->descs_allocated++; | |
425 | spin_unlock_bh(&pd_chan->lock); | |
426 | } else { | |
427 | dev_err(chan2dev(&pd_chan->chan), | |
428 | "failed to alloc desc\n"); | |
429 | } | |
430 | } | |
431 | ||
432 | return ret; | |
433 | } | |
434 | ||
435 | static void pdc_desc_put(struct pch_dma_chan *pd_chan, | |
436 | struct pch_dma_desc *desc) | |
437 | { | |
438 | if (desc) { | |
439 | spin_lock_bh(&pd_chan->lock); | |
440 | list_splice_init(&desc->tx_list, &pd_chan->free_list); | |
441 | list_add(&desc->desc_node, &pd_chan->free_list); | |
442 | spin_unlock_bh(&pd_chan->lock); | |
443 | } | |
444 | } | |
445 | ||
446 | static int pd_alloc_chan_resources(struct dma_chan *chan) | |
447 | { | |
448 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
449 | struct pch_dma_desc *desc; | |
450 | LIST_HEAD(tmp_list); | |
451 | int i; | |
452 | ||
453 | if (!pdc_is_idle(pd_chan)) { | |
454 | dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); | |
455 | return -EIO; | |
456 | } | |
457 | ||
458 | if (!list_empty(&pd_chan->free_list)) | |
459 | return pd_chan->descs_allocated; | |
460 | ||
461 | for (i = 0; i < init_nr_desc_per_channel; i++) { | |
462 | desc = pdc_alloc_desc(chan, GFP_KERNEL); | |
463 | ||
464 | if (!desc) { | |
465 | dev_warn(chan2dev(chan), | |
466 | "Only allocated %d initial descriptors\n", i); | |
467 | break; | |
468 | } | |
469 | ||
470 | list_add_tail(&desc->desc_node, &tmp_list); | |
471 | } | |
472 | ||
473 | spin_lock_bh(&pd_chan->lock); | |
474 | list_splice(&tmp_list, &pd_chan->free_list); | |
475 | pd_chan->descs_allocated = i; | |
476 | pd_chan->completed_cookie = chan->cookie = 1; | |
477 | spin_unlock_bh(&pd_chan->lock); | |
478 | ||
479 | pdc_enable_irq(chan, 1); | |
480 | pdc_set_dir(chan); | |
481 | ||
482 | return pd_chan->descs_allocated; | |
483 | } | |
484 | ||
485 | static void pd_free_chan_resources(struct dma_chan *chan) | |
486 | { | |
487 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
488 | struct pch_dma *pd = to_pd(chan->device); | |
489 | struct pch_dma_desc *desc, *_d; | |
490 | LIST_HEAD(tmp_list); | |
491 | ||
492 | BUG_ON(!pdc_is_idle(pd_chan)); | |
493 | BUG_ON(!list_empty(&pd_chan->active_list)); | |
494 | BUG_ON(!list_empty(&pd_chan->queue)); | |
495 | ||
496 | spin_lock_bh(&pd_chan->lock); | |
497 | list_splice_init(&pd_chan->free_list, &tmp_list); | |
498 | pd_chan->descs_allocated = 0; | |
499 | spin_unlock_bh(&pd_chan->lock); | |
500 | ||
501 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) | |
502 | pci_pool_free(pd->pool, desc, desc->txd.phys); | |
503 | ||
504 | pdc_enable_irq(chan, 0); | |
505 | } | |
506 | ||
507 | static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
508 | struct dma_tx_state *txstate) | |
509 | { | |
510 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
511 | dma_cookie_t last_used; | |
512 | dma_cookie_t last_completed; | |
513 | int ret; | |
514 | ||
515 | spin_lock_bh(&pd_chan->lock); | |
516 | last_completed = pd_chan->completed_cookie; | |
517 | last_used = chan->cookie; | |
518 | spin_unlock_bh(&pd_chan->lock); | |
519 | ||
520 | ret = dma_async_is_complete(cookie, last_completed, last_used); | |
521 | ||
522 | dma_set_tx_state(txstate, last_completed, last_used, 0); | |
523 | ||
524 | return ret; | |
525 | } | |
526 | ||
527 | static void pd_issue_pending(struct dma_chan *chan) | |
528 | { | |
529 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
530 | ||
531 | if (pdc_is_idle(pd_chan)) { | |
532 | spin_lock_bh(&pd_chan->lock); | |
533 | pdc_advance_work(pd_chan); | |
534 | spin_unlock_bh(&pd_chan->lock); | |
535 | } | |
536 | } | |
537 | ||
538 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |
539 | struct scatterlist *sgl, unsigned int sg_len, | |
540 | enum dma_data_direction direction, unsigned long flags) | |
541 | { | |
542 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
543 | struct pch_dma_slave *pd_slave = chan->private; | |
544 | struct pch_dma_desc *first = NULL; | |
545 | struct pch_dma_desc *prev = NULL; | |
546 | struct pch_dma_desc *desc = NULL; | |
547 | struct scatterlist *sg; | |
548 | dma_addr_t reg; | |
549 | int i; | |
550 | ||
551 | if (unlikely(!sg_len)) { | |
552 | dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n"); | |
553 | return NULL; | |
554 | } | |
555 | ||
556 | if (direction == DMA_FROM_DEVICE) | |
557 | reg = pd_slave->rx_reg; | |
558 | else if (direction == DMA_TO_DEVICE) | |
559 | reg = pd_slave->tx_reg; | |
560 | else | |
561 | return NULL; | |
562 | ||
563 | for_each_sg(sgl, sg, sg_len, i) { | |
564 | desc = pdc_desc_get(pd_chan); | |
565 | ||
566 | if (!desc) | |
567 | goto err_desc_get; | |
568 | ||
569 | desc->regs.dev_addr = reg; | |
570 | desc->regs.mem_addr = sg_phys(sg); | |
571 | desc->regs.size = sg_dma_len(sg); | |
572 | desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; | |
573 | ||
574 | switch (pd_slave->width) { | |
575 | case PCH_DMA_WIDTH_1_BYTE: | |
576 | if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE) | |
577 | goto err_desc_get; | |
578 | desc->regs.size |= DMA_DESC_WIDTH_1_BYTE; | |
579 | break; | |
580 | case PCH_DMA_WIDTH_2_BYTES: | |
581 | if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES) | |
582 | goto err_desc_get; | |
583 | desc->regs.size |= DMA_DESC_WIDTH_2_BYTES; | |
584 | break; | |
585 | case PCH_DMA_WIDTH_4_BYTES: | |
586 | if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES) | |
587 | goto err_desc_get; | |
588 | desc->regs.size |= DMA_DESC_WIDTH_4_BYTES; | |
589 | break; | |
590 | default: | |
591 | goto err_desc_get; | |
592 | } | |
593 | ||
594 | ||
595 | if (!first) { | |
596 | first = desc; | |
597 | } else { | |
598 | prev->regs.next |= desc->txd.phys; | |
599 | list_add_tail(&desc->desc_node, &first->tx_list); | |
600 | } | |
601 | ||
602 | prev = desc; | |
603 | } | |
604 | ||
605 | if (flags & DMA_PREP_INTERRUPT) | |
606 | desc->regs.next = DMA_DESC_END_WITH_IRQ; | |
607 | else | |
608 | desc->regs.next = DMA_DESC_END_WITHOUT_IRQ; | |
609 | ||
610 | first->txd.cookie = -EBUSY; | |
611 | desc->txd.flags = flags; | |
612 | ||
613 | return &first->txd; | |
614 | ||
615 | err_desc_get: | |
616 | dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n"); | |
617 | pdc_desc_put(pd_chan, first); | |
618 | return NULL; | |
619 | } | |
620 | ||
621 | static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
622 | unsigned long arg) | |
623 | { | |
624 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
625 | struct pch_dma_desc *desc, *_d; | |
626 | LIST_HEAD(list); | |
627 | ||
628 | if (cmd != DMA_TERMINATE_ALL) | |
629 | return -ENXIO; | |
630 | ||
631 | spin_lock_bh(&pd_chan->lock); | |
632 | ||
633 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); | |
634 | ||
635 | list_splice_init(&pd_chan->active_list, &list); | |
636 | list_splice_init(&pd_chan->queue, &list); | |
637 | ||
638 | list_for_each_entry_safe(desc, _d, &list, desc_node) | |
639 | pdc_chain_complete(pd_chan, desc); | |
640 | ||
641 | spin_unlock_bh(&pd_chan->lock); | |
642 | ||
643 | ||
644 | return 0; | |
645 | } | |
646 | ||
647 | static void pdc_tasklet(unsigned long data) | |
648 | { | |
649 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; | |
650 | ||
651 | if (!pdc_is_idle(pd_chan)) { | |
652 | dev_err(chan2dev(&pd_chan->chan), | |
653 | "BUG: handle non-idle channel in tasklet\n"); | |
654 | return; | |
655 | } | |
656 | ||
657 | spin_lock_bh(&pd_chan->lock); | |
658 | if (test_and_clear_bit(0, &pd_chan->err_status)) | |
659 | pdc_handle_error(pd_chan); | |
660 | else | |
661 | pdc_advance_work(pd_chan); | |
662 | spin_unlock_bh(&pd_chan->lock); | |
663 | } | |
664 | ||
665 | static irqreturn_t pd_irq(int irq, void *devid) | |
666 | { | |
667 | struct pch_dma *pd = (struct pch_dma *)devid; | |
668 | struct pch_dma_chan *pd_chan; | |
669 | u32 sts0; | |
670 | int i; | |
671 | int ret = IRQ_NONE; | |
672 | ||
673 | sts0 = dma_readl(pd, STS0); | |
674 | ||
675 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); | |
676 | ||
677 | for (i = 0; i < pd->dma.chancnt; i++) { | |
678 | pd_chan = &pd->channels[i]; | |
679 | ||
680 | if (sts0 & DMA_STATUS_IRQ(i)) { | |
681 | if (sts0 & DMA_STATUS_ERR(i)) | |
682 | set_bit(0, &pd_chan->err_status); | |
683 | ||
684 | tasklet_schedule(&pd_chan->tasklet); | |
685 | ret = IRQ_HANDLED; | |
686 | } | |
687 | ||
688 | } | |
689 | ||
690 | /* clear interrupt bits in status register */ | |
691 | dma_writel(pd, STS0, sts0); | |
692 | ||
693 | return ret; | |
694 | } | |
695 | ||
696 | static void pch_dma_save_regs(struct pch_dma *pd) | |
697 | { | |
698 | struct pch_dma_chan *pd_chan; | |
699 | struct dma_chan *chan, *_c; | |
700 | int i = 0; | |
701 | ||
702 | pd->regs.dma_ctl0 = dma_readl(pd, CTL0); | |
703 | pd->regs.dma_ctl1 = dma_readl(pd, CTL1); | |
704 | pd->regs.dma_ctl2 = dma_readl(pd, CTL2); | |
705 | ||
706 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { | |
707 | pd_chan = to_pd_chan(chan); | |
708 | ||
709 | pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR); | |
710 | pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR); | |
711 | pd->ch_regs[i].size = channel_readl(pd_chan, SIZE); | |
712 | pd->ch_regs[i].next = channel_readl(pd_chan, NEXT); | |
713 | ||
714 | i++; | |
715 | } | |
716 | } | |
717 | ||
718 | static void pch_dma_restore_regs(struct pch_dma *pd) | |
719 | { | |
720 | struct pch_dma_chan *pd_chan; | |
721 | struct dma_chan *chan, *_c; | |
722 | int i = 0; | |
723 | ||
724 | dma_writel(pd, CTL0, pd->regs.dma_ctl0); | |
725 | dma_writel(pd, CTL1, pd->regs.dma_ctl1); | |
726 | dma_writel(pd, CTL2, pd->regs.dma_ctl2); | |
727 | ||
728 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { | |
729 | pd_chan = to_pd_chan(chan); | |
730 | ||
731 | channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr); | |
732 | channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr); | |
733 | channel_writel(pd_chan, SIZE, pd->ch_regs[i].size); | |
734 | channel_writel(pd_chan, NEXT, pd->ch_regs[i].next); | |
735 | ||
736 | i++; | |
737 | } | |
738 | } | |
739 | ||
740 | static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state) | |
741 | { | |
742 | struct pch_dma *pd = pci_get_drvdata(pdev); | |
743 | ||
744 | if (pd) | |
745 | pch_dma_save_regs(pd); | |
746 | ||
747 | pci_save_state(pdev); | |
748 | pci_disable_device(pdev); | |
749 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
750 | ||
751 | return 0; | |
752 | } | |
753 | ||
754 | static int pch_dma_resume(struct pci_dev *pdev) | |
755 | { | |
756 | struct pch_dma *pd = pci_get_drvdata(pdev); | |
757 | int err; | |
758 | ||
759 | pci_set_power_state(pdev, PCI_D0); | |
760 | pci_restore_state(pdev); | |
761 | ||
762 | err = pci_enable_device(pdev); | |
763 | if (err) { | |
764 | dev_dbg(&pdev->dev, "failed to enable device\n"); | |
765 | return err; | |
766 | } | |
767 | ||
768 | if (pd) | |
769 | pch_dma_restore_regs(pd); | |
770 | ||
771 | return 0; | |
772 | } | |
773 | ||
774 | static int __devinit pch_dma_probe(struct pci_dev *pdev, | |
775 | const struct pci_device_id *id) | |
776 | { | |
777 | struct pch_dma *pd; | |
778 | struct pch_dma_regs *regs; | |
779 | unsigned int nr_channels; | |
780 | int err; | |
781 | int i; | |
782 | ||
783 | nr_channels = id->driver_data; | |
784 | pd = kzalloc(sizeof(struct pch_dma)+ | |
785 | sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL); | |
786 | if (!pd) | |
787 | return -ENOMEM; | |
788 | ||
789 | pci_set_drvdata(pdev, pd); | |
790 | ||
791 | err = pci_enable_device(pdev); | |
792 | if (err) { | |
793 | dev_err(&pdev->dev, "Cannot enable PCI device\n"); | |
794 | goto err_free_mem; | |
795 | } | |
796 | ||
797 | if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | |
798 | dev_err(&pdev->dev, "Cannot find proper base address\n"); | |
799 | goto err_disable_pdev; | |
800 | } | |
801 | ||
802 | err = pci_request_regions(pdev, DRV_NAME); | |
803 | if (err) { | |
804 | dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); | |
805 | goto err_disable_pdev; | |
806 | } | |
807 | ||
808 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
809 | if (err) { | |
810 | dev_err(&pdev->dev, "Cannot set proper DMA config\n"); | |
811 | goto err_free_res; | |
812 | } | |
813 | ||
814 | regs = pd->membase = pci_iomap(pdev, 1, 0); | |
815 | if (!pd->membase) { | |
816 | dev_err(&pdev->dev, "Cannot map MMIO registers\n"); | |
817 | err = -ENOMEM; | |
818 | goto err_free_res; | |
819 | } | |
820 | ||
821 | pci_set_master(pdev); | |
822 | ||
823 | err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); | |
824 | if (err) { | |
825 | dev_err(&pdev->dev, "Failed to request IRQ\n"); | |
826 | goto err_iounmap; | |
827 | } | |
828 | ||
829 | pd->pool = pci_pool_create("pch_dma_desc_pool", pdev, | |
830 | sizeof(struct pch_dma_desc), 4, 0); | |
831 | if (!pd->pool) { | |
832 | dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n"); | |
833 | err = -ENOMEM; | |
834 | goto err_free_irq; | |
835 | } | |
836 | ||
837 | pd->dma.dev = &pdev->dev; | |
838 | pd->dma.chancnt = nr_channels; | |
839 | ||
840 | INIT_LIST_HEAD(&pd->dma.channels); | |
841 | ||
842 | for (i = 0; i < nr_channels; i++) { | |
843 | struct pch_dma_chan *pd_chan = &pd->channels[i]; | |
844 | ||
845 | pd_chan->chan.device = &pd->dma; | |
846 | pd_chan->chan.cookie = 1; | |
847 | pd_chan->chan.chan_id = i; | |
848 | ||
849 | pd_chan->membase = ®s->desc[i]; | |
850 | ||
851 | pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
852 | ||
853 | spin_lock_init(&pd_chan->lock); | |
854 | ||
855 | INIT_LIST_HEAD(&pd_chan->active_list); | |
856 | INIT_LIST_HEAD(&pd_chan->queue); | |
857 | INIT_LIST_HEAD(&pd_chan->free_list); | |
858 | ||
859 | tasklet_init(&pd_chan->tasklet, pdc_tasklet, | |
860 | (unsigned long)pd_chan); | |
861 | list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels); | |
862 | } | |
863 | ||
864 | dma_cap_zero(pd->dma.cap_mask); | |
865 | dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask); | |
866 | dma_cap_set(DMA_SLAVE, pd->dma.cap_mask); | |
867 | ||
868 | pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources; | |
869 | pd->dma.device_free_chan_resources = pd_free_chan_resources; | |
870 | pd->dma.device_tx_status = pd_tx_status; | |
871 | pd->dma.device_issue_pending = pd_issue_pending; | |
872 | pd->dma.device_prep_slave_sg = pd_prep_slave_sg; | |
873 | pd->dma.device_control = pd_device_control; | |
874 | ||
875 | err = dma_async_device_register(&pd->dma); | |
876 | if (err) { | |
877 | dev_err(&pdev->dev, "Failed to register DMA device\n"); | |
878 | goto err_free_pool; | |
879 | } | |
880 | ||
881 | return 0; | |
882 | ||
883 | err_free_pool: | |
884 | pci_pool_destroy(pd->pool); | |
885 | err_free_irq: | |
886 | free_irq(pdev->irq, pd); | |
887 | err_iounmap: | |
888 | pci_iounmap(pdev, pd->membase); | |
889 | err_free_res: | |
890 | pci_release_regions(pdev); | |
891 | err_disable_pdev: | |
892 | pci_disable_device(pdev); | |
893 | err_free_mem: | |
894 | return err; | |
895 | } | |
896 | ||
897 | static void __devexit pch_dma_remove(struct pci_dev *pdev) | |
898 | { | |
899 | struct pch_dma *pd = pci_get_drvdata(pdev); | |
900 | struct pch_dma_chan *pd_chan; | |
901 | struct dma_chan *chan, *_c; | |
902 | ||
903 | if (pd) { | |
904 | dma_async_device_unregister(&pd->dma); | |
905 | ||
906 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, | |
907 | device_node) { | |
908 | pd_chan = to_pd_chan(chan); | |
909 | ||
910 | tasklet_disable(&pd_chan->tasklet); | |
911 | tasklet_kill(&pd_chan->tasklet); | |
912 | } | |
913 | ||
914 | pci_pool_destroy(pd->pool); | |
915 | free_irq(pdev->irq, pd); | |
916 | pci_iounmap(pdev, pd->membase); | |
917 | pci_release_regions(pdev); | |
918 | pci_disable_device(pdev); | |
919 | kfree(pd); | |
920 | } | |
921 | } | |
922 | ||
923 | /* PCI Device ID of DMA device */ | |
924 | #define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810 | |
925 | #define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815 | |
926 | ||
927 | static const struct pci_device_id pch_dma_id_table[] = { | |
928 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 }, | |
929 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 }, | |
87acf5ad | 930 | { 0, }, |
0c42bd0e YW |
931 | }; |
932 | ||
933 | static struct pci_driver pch_dma_driver = { | |
934 | .name = DRV_NAME, | |
935 | .id_table = pch_dma_id_table, | |
936 | .probe = pch_dma_probe, | |
937 | .remove = __devexit_p(pch_dma_remove), | |
938 | #ifdef CONFIG_PM | |
939 | .suspend = pch_dma_suspend, | |
940 | .resume = pch_dma_resume, | |
941 | #endif | |
942 | }; | |
943 | ||
944 | static int __init pch_dma_init(void) | |
945 | { | |
946 | return pci_register_driver(&pch_dma_driver); | |
947 | } | |
948 | ||
949 | static void __exit pch_dma_exit(void) | |
950 | { | |
951 | pci_unregister_driver(&pch_dma_driver); | |
952 | } | |
953 | ||
954 | module_init(pch_dma_init); | |
955 | module_exit(pch_dma_exit); | |
956 | ||
957 | MODULE_DESCRIPTION("Topcliff PCH DMA controller driver"); | |
958 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | |
959 | MODULE_LICENSE("GPL v2"); |