2 * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/interrupt.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
16 #include <linux/dmaengine.h>
17 #include <linux/platform_device.h>
18 #include <linux/device.h>
19 #include <linux/platform_data/mmp_dma.h>
20 #include <linux/dmapool.h>
21 #include <linux/of_device.h>
22 #include <linux/of_dma.h>
24 #include <linux/dma/pxa-dma.h>
26 #include "dmaengine.h"
29 #define DCSR(n) (0x0000 + ((n) << 2))
30 #define DALGN(n) 0x00a0
32 #define DDADR(n) (0x0200 + ((n) << 4))
33 #define DSADR(n) (0x0204 + ((n) << 4))
34 #define DTADR(n) (0x0208 + ((n) << 4))
35 #define DCMD(n) (0x020c + ((n) << 4))
37 #define PXA_DCSR_RUN BIT(31) /* Run Bit (read / write) */
38 #define PXA_DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
39 #define PXA_DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (R/W) */
40 #define PXA_DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
41 #define PXA_DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
42 #define PXA_DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
43 #define PXA_DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
44 #define PXA_DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
46 #define PXA_DCSR_EORIRQEN BIT(28) /* End of Receive IRQ Enable (R/W) */
47 #define PXA_DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
48 #define PXA_DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
49 #define PXA_DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
50 #define PXA_DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
51 #define PXA_DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
52 #define PXA_DCSR_EORINTR BIT(9) /* The end of Receive */
54 #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
55 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
57 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
58 #define DDADR_STOP BIT(0) /* Stop (read / write) */
60 #define PXA_DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
61 #define PXA_DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
62 #define PXA_DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
63 #define PXA_DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
64 #define PXA_DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
65 #define PXA_DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
66 #define PXA_DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
67 #define PXA_DCMD_BURST8 (1 << 16) /* 8 byte burst */
68 #define PXA_DCMD_BURST16 (2 << 16) /* 16 byte burst */
69 #define PXA_DCMD_BURST32 (3 << 16) /* 32 byte burst */
70 #define PXA_DCMD_WIDTH1 (1 << 14) /* 1 byte width */
71 #define PXA_DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
72 #define PXA_DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
73 #define PXA_DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
75 #define PDMA_ALIGNMENT 3
76 #define PDMA_MAX_DESC_BYTES (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
79 u32 ddadr
; /* Points to the next descriptor + flags */
80 u32 dsadr
; /* DSADR value for the current transfer */
81 u32 dtadr
; /* DTADR value for the current transfer */
82 u32 dcmd
; /* DCMD value for the current transfer */
86 struct virt_dma_desc vd
; /* Virtual descriptor */
87 int nb_desc
; /* Number of hw. descriptors */
88 size_t len
; /* Number of bytes xfered */
89 dma_addr_t first
; /* First descriptor's addr */
91 /* At least one descriptor has an src/dst address not multiple of 8 */
94 struct dma_pool
*desc_pool
; /* Channel's used allocator */
96 struct pxad_desc_hw
*hw_desc
[]; /* DMA coherent descriptors */
102 struct pxad_chan
*vchan
;
106 struct virt_dma_chan vc
; /* Virtual channel */
107 u32 drcmr
; /* Requestor of the channel */
108 enum pxad_chan_prio prio
; /* Required priority of phy */
110 * At least one desc_sw in submitted or issued transfers on this channel
111 * has one address such as: addr % 8 != 0. This implies the DALGN
112 * setting on the phy.
115 struct dma_slave_config cfg
; /* Runtime config */
117 /* protected by vc->lock */
118 struct pxad_phy
*phy
;
119 struct dma_pool
*desc_pool
; /* Descriptors pool */
120 dma_cookie_t bus_error
;
124 struct dma_device slave
;
128 struct pxad_phy
*phys
;
129 spinlock_t phy_lock
; /* Phy association */
130 #ifdef CONFIG_DEBUG_FS
131 struct dentry
*dbgfs_root
;
132 struct dentry
*dbgfs_state
;
133 struct dentry
**dbgfs_chan
;
137 #define tx_to_pxad_desc(tx) \
138 container_of(tx, struct pxad_desc_sw, async_tx)
139 #define to_pxad_chan(dchan) \
140 container_of(dchan, struct pxad_chan, vc.chan)
141 #define to_pxad_dev(dmadev) \
142 container_of(dmadev, struct pxad_device, slave)
143 #define to_pxad_sw_desc(_vd) \
144 container_of((_vd), struct pxad_desc_sw, vd)
146 #define _phy_readl_relaxed(phy, _reg) \
147 readl_relaxed((phy)->base + _reg((phy)->idx))
148 #define phy_readl_relaxed(phy, _reg) \
151 _v = readl_relaxed((phy)->base + _reg((phy)->idx)); \
152 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
153 "%s(): readl(%s): 0x%08x\n", __func__, #_reg, \
157 #define phy_writel(phy, val, _reg) \
159 writel((val), (phy)->base + _reg((phy)->idx)); \
160 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
161 "%s(): writel(0x%08x, %s)\n", \
162 __func__, (u32)(val), #_reg); \
164 #define phy_writel_relaxed(phy, val, _reg) \
166 writel_relaxed((val), (phy)->base + _reg((phy)->idx)); \
167 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
168 "%s(): writel_relaxed(0x%08x, %s)\n", \
169 __func__, (u32)(val), #_reg); \
172 static unsigned int pxad_drcmr(unsigned int line
)
175 return 0x100 + line
* 4;
176 return 0x1000 + line
* 4;
182 #ifdef CONFIG_DEBUG_FS
183 #include <linux/debugfs.h>
184 #include <linux/uaccess.h>
185 #include <linux/seq_file.h>
187 static int dbg_show_requester_chan(struct seq_file
*s
, void *p
)
189 struct pxad_phy
*phy
= s
->private;
193 seq_printf(s
, "DMA channel %d requester :\n", phy
->idx
);
194 for (i
= 0; i
< 70; i
++) {
195 drcmr
= readl_relaxed(phy
->base
+ pxad_drcmr(i
));
196 if ((drcmr
& DRCMR_CHLNUM
) == phy
->idx
)
197 seq_printf(s
, "\tRequester %d (MAPVLD=%d)\n", i
,
198 !!(drcmr
& DRCMR_MAPVLD
));
203 static inline int dbg_burst_from_dcmd(u32 dcmd
)
205 int burst
= (dcmd
>> 16) & 0x3;
207 return burst
? 4 << burst
: 0;
210 static int is_phys_valid(unsigned long addr
)
212 return pfn_valid(__phys_to_pfn(addr
));
215 #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
216 #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
218 static int dbg_show_descriptors(struct seq_file
*s
, void *p
)
220 struct pxad_phy
*phy
= s
->private;
221 int i
, max_show
= 20, burst
, width
;
223 unsigned long phys_desc
, ddadr
;
224 struct pxad_desc_hw
*desc
;
226 phys_desc
= ddadr
= _phy_readl_relaxed(phy
, DDADR
);
228 seq_printf(s
, "DMA channel %d descriptors :\n", phy
->idx
);
229 seq_printf(s
, "[%03d] First descriptor unknown\n", 0);
230 for (i
= 1; i
< max_show
&& is_phys_valid(phys_desc
); i
++) {
231 desc
= phys_to_virt(phys_desc
);
233 burst
= dbg_burst_from_dcmd(dcmd
);
234 width
= (1 << ((dcmd
>> 14) & 0x3)) >> 1;
236 seq_printf(s
, "[%03d] Desc at %08lx(virt %p)\n",
238 seq_printf(s
, "\tDDADR = %08x\n", desc
->ddadr
);
239 seq_printf(s
, "\tDSADR = %08x\n", desc
->dsadr
);
240 seq_printf(s
, "\tDTADR = %08x\n", desc
->dtadr
);
241 seq_printf(s
, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
243 PXA_DCMD_STR(INCSRCADDR
), PXA_DCMD_STR(INCTRGADDR
),
244 PXA_DCMD_STR(FLOWSRC
), PXA_DCMD_STR(FLOWTRG
),
245 PXA_DCMD_STR(STARTIRQEN
), PXA_DCMD_STR(ENDIRQEN
),
246 PXA_DCMD_STR(ENDIAN
), burst
, width
,
247 dcmd
& PXA_DCMD_LENGTH
);
248 phys_desc
= desc
->ddadr
;
251 seq_printf(s
, "[%03d] Desc at %08lx ... max display reached\n",
254 seq_printf(s
, "[%03d] Desc at %08lx is %s\n",
255 i
, phys_desc
, phys_desc
== DDADR_STOP
?
256 "DDADR_STOP" : "invalid");
261 static int dbg_show_chan_state(struct seq_file
*s
, void *p
)
263 struct pxad_phy
*phy
= s
->private;
266 static const char * const str_prio
[] = {
267 "high", "normal", "low", "invalid"
270 dcsr
= _phy_readl_relaxed(phy
, DCSR
);
271 dcmd
= _phy_readl_relaxed(phy
, DCMD
);
272 burst
= dbg_burst_from_dcmd(dcmd
);
273 width
= (1 << ((dcmd
>> 14) & 0x3)) >> 1;
275 seq_printf(s
, "DMA channel %d\n", phy
->idx
);
276 seq_printf(s
, "\tPriority : %s\n",
277 str_prio
[(phy
->idx
& 0xf) / 4]);
278 seq_printf(s
, "\tUnaligned transfer bit: %s\n",
279 _phy_readl_relaxed(phy
, DALGN
) & BIT(phy
->idx
) ?
281 seq_printf(s
, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
282 dcsr
, PXA_DCSR_STR(RUN
), PXA_DCSR_STR(NODESC
),
283 PXA_DCSR_STR(STOPIRQEN
), PXA_DCSR_STR(EORIRQEN
),
284 PXA_DCSR_STR(EORJMPEN
), PXA_DCSR_STR(EORSTOPEN
),
285 PXA_DCSR_STR(SETCMPST
), PXA_DCSR_STR(CLRCMPST
),
286 PXA_DCSR_STR(CMPST
), PXA_DCSR_STR(EORINTR
),
287 PXA_DCSR_STR(REQPEND
), PXA_DCSR_STR(STOPSTATE
),
288 PXA_DCSR_STR(ENDINTR
), PXA_DCSR_STR(STARTINTR
),
289 PXA_DCSR_STR(BUSERR
));
291 seq_printf(s
, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
293 PXA_DCMD_STR(INCSRCADDR
), PXA_DCMD_STR(INCTRGADDR
),
294 PXA_DCMD_STR(FLOWSRC
), PXA_DCMD_STR(FLOWTRG
),
295 PXA_DCMD_STR(STARTIRQEN
), PXA_DCMD_STR(ENDIRQEN
),
296 PXA_DCMD_STR(ENDIAN
), burst
, width
, dcmd
& PXA_DCMD_LENGTH
);
297 seq_printf(s
, "\tDSADR = %08x\n", _phy_readl_relaxed(phy
, DSADR
));
298 seq_printf(s
, "\tDTADR = %08x\n", _phy_readl_relaxed(phy
, DTADR
));
299 seq_printf(s
, "\tDDADR = %08x\n", _phy_readl_relaxed(phy
, DDADR
));
304 static int dbg_show_state(struct seq_file
*s
, void *p
)
306 struct pxad_device
*pdev
= s
->private;
308 /* basic device status */
309 seq_puts(s
, "DMA engine status\n");
310 seq_printf(s
, "\tChannel number: %d\n", pdev
->nr_chans
);
315 #define DBGFS_FUNC_DECL(name) \
316 static int dbg_open_##name(struct inode *inode, struct file *file) \
318 return single_open(file, dbg_show_##name, inode->i_private); \
320 static const struct file_operations dbg_fops_##name = { \
321 .open = dbg_open_##name, \
322 .llseek = seq_lseek, \
324 .release = single_release, \
327 DBGFS_FUNC_DECL(state
);
328 DBGFS_FUNC_DECL(chan_state
);
329 DBGFS_FUNC_DECL(descriptors
);
330 DBGFS_FUNC_DECL(requester_chan
);
332 static struct dentry
*pxad_dbg_alloc_chan(struct pxad_device
*pdev
,
333 int ch
, struct dentry
*chandir
)
336 struct dentry
*chan
, *chan_state
= NULL
, *chan_descr
= NULL
;
337 struct dentry
*chan_reqs
= NULL
;
340 scnprintf(chan_name
, sizeof(chan_name
), "%d", ch
);
341 chan
= debugfs_create_dir(chan_name
, chandir
);
342 dt
= (void *)&pdev
->phys
[ch
];
345 chan_state
= debugfs_create_file("state", 0400, chan
, dt
,
346 &dbg_fops_chan_state
);
348 chan_descr
= debugfs_create_file("descriptors", 0400, chan
, dt
,
349 &dbg_fops_descriptors
);
351 chan_reqs
= debugfs_create_file("requesters", 0400, chan
, dt
,
352 &dbg_fops_requester_chan
);
359 debugfs_remove_recursive(chan
);
363 static void pxad_init_debugfs(struct pxad_device
*pdev
)
366 struct dentry
*chandir
;
368 pdev
->dbgfs_root
= debugfs_create_dir(dev_name(pdev
->slave
.dev
), NULL
);
369 if (IS_ERR(pdev
->dbgfs_root
) || !pdev
->dbgfs_root
)
372 pdev
->dbgfs_state
= debugfs_create_file("state", 0400, pdev
->dbgfs_root
,
373 pdev
, &dbg_fops_state
);
374 if (!pdev
->dbgfs_state
)
378 kmalloc_array(pdev
->nr_chans
, sizeof(*pdev
->dbgfs_state
),
380 if (!pdev
->dbgfs_chan
)
383 chandir
= debugfs_create_dir("channels", pdev
->dbgfs_root
);
387 for (i
= 0; i
< pdev
->nr_chans
; i
++) {
388 pdev
->dbgfs_chan
[i
] = pxad_dbg_alloc_chan(pdev
, i
, chandir
);
389 if (!pdev
->dbgfs_chan
[i
])
396 kfree(pdev
->dbgfs_chan
);
399 debugfs_remove_recursive(pdev
->dbgfs_root
);
401 pr_err("pxad: debugfs is not available\n");
404 static void pxad_cleanup_debugfs(struct pxad_device
*pdev
)
406 debugfs_remove_recursive(pdev
->dbgfs_root
);
409 static inline void pxad_init_debugfs(struct pxad_device
*pdev
) {}
410 static inline void pxad_cleanup_debugfs(struct pxad_device
*pdev
) {}
414 * In the transition phase where legacy pxa handling is done at the same time as
415 * mmp_dma, the DMA physical channel split between the 2 DMA providers is done
416 * through legacy_reserved. Legacy code reserves DMA channels by settings
417 * corresponding bits in legacy_reserved.
419 static u32 legacy_reserved
;
420 static u32 legacy_unavailable
;
422 static struct pxad_phy
*lookup_phy(struct pxad_chan
*pchan
)
425 struct pxad_device
*pdev
= to_pxad_dev(pchan
->vc
.chan
.device
);
426 struct pxad_phy
*phy
, *found
= NULL
;
430 * dma channel priorities
431 * ch 0 - 3, 16 - 19 <--> (0)
432 * ch 4 - 7, 20 - 23 <--> (1)
433 * ch 8 - 11, 24 - 27 <--> (2)
434 * ch 12 - 15, 28 - 31 <--> (3)
437 spin_lock_irqsave(&pdev
->phy_lock
, flags
);
438 for (prio
= pchan
->prio
; prio
>= PXAD_PRIO_HIGHEST
; prio
--) {
439 for (i
= 0; i
< pdev
->nr_chans
; i
++) {
440 if (prio
!= (i
& 0xf) >> 2)
442 if ((i
< 32) && (legacy_reserved
& BIT(i
)))
444 phy
= &pdev
->phys
[i
];
449 legacy_unavailable
|= BIT(i
);
456 spin_unlock_irqrestore(&pdev
->phy_lock
, flags
);
457 dev_dbg(&pchan
->vc
.chan
.dev
->device
,
458 "%s(): phy=%p(%d)\n", __func__
, found
,
459 found
? found
->idx
: -1);
464 static void pxad_free_phy(struct pxad_chan
*chan
)
466 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
471 dev_dbg(&chan
->vc
.chan
.dev
->device
,
472 "%s(): freeing\n", __func__
);
476 /* clear the channel mapping in DRCMR */
477 if (chan
->drcmr
<= pdev
->nr_requestors
) {
478 reg
= pxad_drcmr(chan
->drcmr
);
479 writel_relaxed(0, chan
->phy
->base
+ reg
);
482 spin_lock_irqsave(&pdev
->phy_lock
, flags
);
483 for (i
= 0; i
< 32; i
++)
484 if (chan
->phy
== &pdev
->phys
[i
])
485 legacy_unavailable
&= ~BIT(i
);
486 chan
->phy
->vchan
= NULL
;
488 spin_unlock_irqrestore(&pdev
->phy_lock
, flags
);
491 static bool is_chan_running(struct pxad_chan
*chan
)
494 struct pxad_phy
*phy
= chan
->phy
;
498 dcsr
= phy_readl_relaxed(phy
, DCSR
);
499 return dcsr
& PXA_DCSR_RUN
;
502 static bool is_running_chan_misaligned(struct pxad_chan
*chan
)
507 dalgn
= phy_readl_relaxed(chan
->phy
, DALGN
);
508 return dalgn
& (BIT(chan
->phy
->idx
));
511 static void phy_enable(struct pxad_phy
*phy
, bool misaligned
)
513 struct pxad_device
*pdev
;
519 dev_dbg(&phy
->vchan
->vc
.chan
.dev
->device
,
520 "%s(); phy=%p(%d) misaligned=%d\n", __func__
,
521 phy
, phy
->idx
, misaligned
);
523 pdev
= to_pxad_dev(phy
->vchan
->vc
.chan
.device
);
524 if (phy
->vchan
->drcmr
<= pdev
->nr_requestors
) {
525 reg
= pxad_drcmr(phy
->vchan
->drcmr
);
526 writel_relaxed(DRCMR_MAPVLD
| phy
->idx
, phy
->base
+ reg
);
529 dalgn
= phy_readl_relaxed(phy
, DALGN
);
531 dalgn
|= BIT(phy
->idx
);
533 dalgn
&= ~BIT(phy
->idx
);
534 phy_writel_relaxed(phy
, dalgn
, DALGN
);
536 phy_writel(phy
, PXA_DCSR_STOPIRQEN
| PXA_DCSR_ENDINTR
|
537 PXA_DCSR_BUSERR
| PXA_DCSR_RUN
, DCSR
);
540 static void phy_disable(struct pxad_phy
*phy
)
547 dcsr
= phy_readl_relaxed(phy
, DCSR
);
548 dev_dbg(&phy
->vchan
->vc
.chan
.dev
->device
,
549 "%s(): phy=%p(%d)\n", __func__
, phy
, phy
->idx
);
550 phy_writel(phy
, dcsr
& ~PXA_DCSR_RUN
& ~PXA_DCSR_STOPIRQEN
, DCSR
);
553 static void pxad_launch_chan(struct pxad_chan
*chan
,
554 struct pxad_desc_sw
*desc
)
556 dev_dbg(&chan
->vc
.chan
.dev
->device
,
557 "%s(): desc=%p\n", __func__
, desc
);
559 chan
->phy
= lookup_phy(chan
);
561 dev_dbg(&chan
->vc
.chan
.dev
->device
,
562 "%s(): no free dma channel\n", __func__
);
569 * Program the descriptor's address into the DMA controller,
570 * then start the DMA transaction
572 phy_writel(chan
->phy
, desc
->first
, DDADR
);
573 phy_enable(chan
->phy
, chan
->misaligned
);
576 static void set_updater_desc(struct pxad_desc_sw
*sw_desc
,
579 struct pxad_desc_hw
*updater
=
580 sw_desc
->hw_desc
[sw_desc
->nb_desc
- 1];
581 dma_addr_t dma
= sw_desc
->hw_desc
[sw_desc
->nb_desc
- 2]->ddadr
;
583 updater
->ddadr
= DDADR_STOP
;
584 updater
->dsadr
= dma
;
585 updater
->dtadr
= dma
+ 8;
586 updater
->dcmd
= PXA_DCMD_WIDTH4
| PXA_DCMD_BURST32
|
587 (PXA_DCMD_LENGTH
& sizeof(u32
));
588 if (flags
& DMA_PREP_INTERRUPT
)
589 updater
->dcmd
|= PXA_DCMD_ENDIRQEN
;
591 sw_desc
->hw_desc
[sw_desc
->nb_desc
- 2]->ddadr
= sw_desc
->first
;
594 static bool is_desc_completed(struct virt_dma_desc
*vd
)
596 struct pxad_desc_sw
*sw_desc
= to_pxad_sw_desc(vd
);
597 struct pxad_desc_hw
*updater
=
598 sw_desc
->hw_desc
[sw_desc
->nb_desc
- 1];
600 return updater
->dtadr
!= (updater
->dsadr
+ 8);
603 static void pxad_desc_chain(struct virt_dma_desc
*vd1
,
604 struct virt_dma_desc
*vd2
)
606 struct pxad_desc_sw
*desc1
= to_pxad_sw_desc(vd1
);
607 struct pxad_desc_sw
*desc2
= to_pxad_sw_desc(vd2
);
608 dma_addr_t dma_to_chain
;
610 dma_to_chain
= desc2
->first
;
611 desc1
->hw_desc
[desc1
->nb_desc
- 1]->ddadr
= dma_to_chain
;
614 static bool pxad_try_hotchain(struct virt_dma_chan
*vc
,
615 struct virt_dma_desc
*vd
)
617 struct virt_dma_desc
*vd_last_issued
= NULL
;
618 struct pxad_chan
*chan
= to_pxad_chan(&vc
->chan
);
621 * Attempt to hot chain the tx if the phy is still running. This is
622 * considered successful only if either the channel is still running
623 * after the chaining, or if the chained transfer is completed after
624 * having been hot chained.
625 * A change of alignment is not allowed, and forbids hotchaining.
627 if (is_chan_running(chan
)) {
628 BUG_ON(list_empty(&vc
->desc_issued
));
630 if (!is_running_chan_misaligned(chan
) &&
631 to_pxad_sw_desc(vd
)->misaligned
)
634 vd_last_issued
= list_entry(vc
->desc_issued
.prev
,
635 struct virt_dma_desc
, node
);
636 pxad_desc_chain(vd_last_issued
, vd
);
637 if (is_chan_running(chan
) || is_desc_completed(vd_last_issued
))
644 static unsigned int clear_chan_irq(struct pxad_phy
*phy
)
647 u32 dint
= readl(phy
->base
+ DINT
);
649 if (!(dint
& BIT(phy
->idx
)))
653 dcsr
= phy_readl_relaxed(phy
, DCSR
);
654 phy_writel(phy
, dcsr
, DCSR
);
655 if ((dcsr
& PXA_DCSR_BUSERR
) && (phy
->vchan
))
656 dev_warn(&phy
->vchan
->vc
.chan
.dev
->device
,
657 "%s(chan=%p): PXA_DCSR_BUSERR\n",
658 __func__
, &phy
->vchan
);
660 return dcsr
& ~PXA_DCSR_RUN
;
663 static irqreturn_t
pxad_chan_handler(int irq
, void *dev_id
)
665 struct pxad_phy
*phy
= dev_id
;
666 struct pxad_chan
*chan
= phy
->vchan
;
667 struct virt_dma_desc
*vd
, *tmp
;
670 dma_cookie_t last_started
= 0;
674 dcsr
= clear_chan_irq(phy
);
675 if (dcsr
& PXA_DCSR_RUN
)
678 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
679 list_for_each_entry_safe(vd
, tmp
, &chan
->vc
.desc_issued
, node
) {
680 dev_dbg(&chan
->vc
.chan
.dev
->device
,
681 "%s(): checking txd %p[%x]: completed=%d\n",
682 __func__
, vd
, vd
->tx
.cookie
, is_desc_completed(vd
));
683 last_started
= vd
->tx
.cookie
;
684 if (to_pxad_sw_desc(vd
)->cyclic
) {
685 vchan_cyclic_callback(vd
);
688 if (is_desc_completed(vd
)) {
690 vchan_cookie_complete(vd
);
696 if (dcsr
& PXA_DCSR_BUSERR
) {
697 chan
->bus_error
= last_started
;
701 if (!chan
->bus_error
&& dcsr
& PXA_DCSR_STOPSTATE
) {
702 dev_dbg(&chan
->vc
.chan
.dev
->device
,
703 "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
705 list_empty(&chan
->vc
.desc_submitted
),
706 list_empty(&chan
->vc
.desc_issued
));
707 phy_writel_relaxed(phy
, dcsr
& ~PXA_DCSR_STOPIRQEN
, DCSR
);
709 if (list_empty(&chan
->vc
.desc_issued
)) {
711 !list_empty(&chan
->vc
.desc_submitted
);
713 vd
= list_first_entry(&chan
->vc
.desc_issued
,
714 struct virt_dma_desc
, node
);
715 pxad_launch_chan(chan
, to_pxad_sw_desc(vd
));
718 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
723 static irqreturn_t
pxad_int_handler(int irq
, void *dev_id
)
725 struct pxad_device
*pdev
= dev_id
;
726 struct pxad_phy
*phy
;
727 u32 dint
= readl(pdev
->base
+ DINT
);
728 int i
, ret
= IRQ_NONE
;
733 phy
= &pdev
->phys
[i
];
734 if ((i
< 32) && (legacy_reserved
& BIT(i
)))
736 if (pxad_chan_handler(irq
, phy
) == IRQ_HANDLED
)
743 static int pxad_alloc_chan_resources(struct dma_chan
*dchan
)
745 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
746 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
751 chan
->desc_pool
= dma_pool_create(dma_chan_name(dchan
),
753 sizeof(struct pxad_desc_hw
),
754 __alignof__(struct pxad_desc_hw
),
756 if (!chan
->desc_pool
) {
757 dev_err(&chan
->vc
.chan
.dev
->device
,
758 "%s(): unable to allocate descriptor pool\n",
766 static void pxad_free_chan_resources(struct dma_chan
*dchan
)
768 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
770 vchan_free_chan_resources(&chan
->vc
);
771 dma_pool_destroy(chan
->desc_pool
);
772 chan
->desc_pool
= NULL
;
776 static void pxad_free_desc(struct virt_dma_desc
*vd
)
780 struct pxad_desc_sw
*sw_desc
= to_pxad_sw_desc(vd
);
782 BUG_ON(sw_desc
->nb_desc
== 0);
783 for (i
= sw_desc
->nb_desc
- 1; i
>= 0; i
--) {
785 dma
= sw_desc
->hw_desc
[i
- 1]->ddadr
;
787 dma
= sw_desc
->first
;
788 dma_pool_free(sw_desc
->desc_pool
,
789 sw_desc
->hw_desc
[i
], dma
);
791 sw_desc
->nb_desc
= 0;
795 static struct pxad_desc_sw
*
796 pxad_alloc_desc(struct pxad_chan
*chan
, unsigned int nb_hw_desc
)
798 struct pxad_desc_sw
*sw_desc
;
802 sw_desc
= kzalloc(sizeof(*sw_desc
) +
803 nb_hw_desc
* sizeof(struct pxad_desc_hw
*),
807 sw_desc
->desc_pool
= chan
->desc_pool
;
809 for (i
= 0; i
< nb_hw_desc
; i
++) {
810 sw_desc
->hw_desc
[i
] = dma_pool_alloc(sw_desc
->desc_pool
,
812 if (!sw_desc
->hw_desc
[i
]) {
813 dev_err(&chan
->vc
.chan
.dev
->device
,
814 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
815 __func__
, i
, sw_desc
->desc_pool
);
820 sw_desc
->first
= dma
;
822 sw_desc
->hw_desc
[i
- 1]->ddadr
= dma
;
828 pxad_free_desc(&sw_desc
->vd
);
832 static dma_cookie_t
pxad_tx_submit(struct dma_async_tx_descriptor
*tx
)
834 struct virt_dma_chan
*vc
= to_virt_chan(tx
->chan
);
835 struct pxad_chan
*chan
= to_pxad_chan(&vc
->chan
);
836 struct virt_dma_desc
*vd_chained
= NULL
,
837 *vd
= container_of(tx
, struct virt_dma_desc
, tx
);
841 set_updater_desc(to_pxad_sw_desc(vd
), tx
->flags
);
843 spin_lock_irqsave(&vc
->lock
, flags
);
844 cookie
= dma_cookie_assign(tx
);
846 if (list_empty(&vc
->desc_submitted
) && pxad_try_hotchain(vc
, vd
)) {
847 list_move_tail(&vd
->node
, &vc
->desc_issued
);
848 dev_dbg(&chan
->vc
.chan
.dev
->device
,
849 "%s(): txd %p[%x]: submitted (hot linked)\n",
850 __func__
, vd
, cookie
);
855 * Fallback to placing the tx in the submitted queue
857 if (!list_empty(&vc
->desc_submitted
)) {
858 vd_chained
= list_entry(vc
->desc_submitted
.prev
,
859 struct virt_dma_desc
, node
);
861 * Only chain the descriptors if no new misalignment is
862 * introduced. If a new misalignment is chained, let the channel
863 * stop, and be relaunched in misalign mode from the irq
866 if (chan
->misaligned
|| !to_pxad_sw_desc(vd
)->misaligned
)
867 pxad_desc_chain(vd_chained
, vd
);
871 dev_dbg(&chan
->vc
.chan
.dev
->device
,
872 "%s(): txd %p[%x]: submitted (%s linked)\n",
873 __func__
, vd
, cookie
, vd_chained
? "cold" : "not");
874 list_move_tail(&vd
->node
, &vc
->desc_submitted
);
875 chan
->misaligned
|= to_pxad_sw_desc(vd
)->misaligned
;
878 spin_unlock_irqrestore(&vc
->lock
, flags
);
882 static void pxad_issue_pending(struct dma_chan
*dchan
)
884 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
885 struct virt_dma_desc
*vd_first
;
888 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
889 if (list_empty(&chan
->vc
.desc_submitted
))
892 vd_first
= list_first_entry(&chan
->vc
.desc_submitted
,
893 struct virt_dma_desc
, node
);
894 dev_dbg(&chan
->vc
.chan
.dev
->device
,
895 "%s(): txd %p[%x]", __func__
, vd_first
, vd_first
->tx
.cookie
);
897 vchan_issue_pending(&chan
->vc
);
898 if (!pxad_try_hotchain(&chan
->vc
, vd_first
))
899 pxad_launch_chan(chan
, to_pxad_sw_desc(vd_first
));
901 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
904 static inline struct dma_async_tx_descriptor
*
905 pxad_tx_prep(struct virt_dma_chan
*vc
, struct virt_dma_desc
*vd
,
906 unsigned long tx_flags
)
908 struct dma_async_tx_descriptor
*tx
;
909 struct pxad_chan
*chan
= container_of(vc
, struct pxad_chan
, vc
);
911 INIT_LIST_HEAD(&vd
->node
);
912 tx
= vchan_tx_prep(vc
, vd
, tx_flags
);
913 tx
->tx_submit
= pxad_tx_submit
;
914 dev_dbg(&chan
->vc
.chan
.dev
->device
,
915 "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__
,
916 vc
, vd
, vd
->tx
.cookie
,
922 static void pxad_get_config(struct pxad_chan
*chan
,
923 enum dma_transfer_direction dir
,
924 u32
*dcmd
, u32
*dev_src
, u32
*dev_dst
)
926 u32 maxburst
= 0, dev_addr
= 0;
927 enum dma_slave_buswidth width
= DMA_SLAVE_BUSWIDTH_UNDEFINED
;
928 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
931 if (dir
== DMA_DEV_TO_MEM
) {
932 maxburst
= chan
->cfg
.src_maxburst
;
933 width
= chan
->cfg
.src_addr_width
;
934 dev_addr
= chan
->cfg
.src_addr
;
936 *dcmd
|= PXA_DCMD_INCTRGADDR
;
937 if (chan
->drcmr
<= pdev
->nr_requestors
)
938 *dcmd
|= PXA_DCMD_FLOWSRC
;
940 if (dir
== DMA_MEM_TO_DEV
) {
941 maxburst
= chan
->cfg
.dst_maxburst
;
942 width
= chan
->cfg
.dst_addr_width
;
943 dev_addr
= chan
->cfg
.dst_addr
;
945 *dcmd
|= PXA_DCMD_INCSRCADDR
;
946 if (chan
->drcmr
<= pdev
->nr_requestors
)
947 *dcmd
|= PXA_DCMD_FLOWTRG
;
949 if (dir
== DMA_MEM_TO_MEM
)
950 *dcmd
|= PXA_DCMD_BURST32
| PXA_DCMD_INCTRGADDR
|
953 dev_dbg(&chan
->vc
.chan
.dev
->device
,
954 "%s(): dev_addr=0x%x maxburst=%d width=%d dir=%d\n",
955 __func__
, dev_addr
, maxburst
, width
, dir
);
957 if (width
== DMA_SLAVE_BUSWIDTH_1_BYTE
)
958 *dcmd
|= PXA_DCMD_WIDTH1
;
959 else if (width
== DMA_SLAVE_BUSWIDTH_2_BYTES
)
960 *dcmd
|= PXA_DCMD_WIDTH2
;
961 else if (width
== DMA_SLAVE_BUSWIDTH_4_BYTES
)
962 *dcmd
|= PXA_DCMD_WIDTH4
;
965 *dcmd
|= PXA_DCMD_BURST8
;
966 else if (maxburst
== 16)
967 *dcmd
|= PXA_DCMD_BURST16
;
968 else if (maxburst
== 32)
969 *dcmd
|= PXA_DCMD_BURST32
;
971 /* FIXME: drivers should be ported over to use the filter
972 * function. Once that's done, the following two lines can
975 if (chan
->cfg
.slave_id
)
976 chan
->drcmr
= chan
->cfg
.slave_id
;
979 static struct dma_async_tx_descriptor
*
980 pxad_prep_memcpy(struct dma_chan
*dchan
,
981 dma_addr_t dma_dst
, dma_addr_t dma_src
,
982 size_t len
, unsigned long flags
)
984 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
985 struct pxad_desc_sw
*sw_desc
;
986 struct pxad_desc_hw
*hw_desc
;
988 unsigned int i
, nb_desc
= 0;
994 dev_dbg(&chan
->vc
.chan
.dev
->device
,
995 "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
996 __func__
, (unsigned long)dma_dst
, (unsigned long)dma_src
,
998 pxad_get_config(chan
, DMA_MEM_TO_MEM
, &dcmd
, NULL
, NULL
);
1000 nb_desc
= DIV_ROUND_UP(len
, PDMA_MAX_DESC_BYTES
);
1001 sw_desc
= pxad_alloc_desc(chan
, nb_desc
+ 1);
1006 if (!IS_ALIGNED(dma_src
, 1 << PDMA_ALIGNMENT
) ||
1007 !IS_ALIGNED(dma_dst
, 1 << PDMA_ALIGNMENT
))
1008 sw_desc
->misaligned
= true;
1012 hw_desc
= sw_desc
->hw_desc
[i
++];
1013 copy
= min_t(size_t, len
, PDMA_MAX_DESC_BYTES
);
1014 hw_desc
->dcmd
= dcmd
| (PXA_DCMD_LENGTH
& copy
);
1015 hw_desc
->dsadr
= dma_src
;
1016 hw_desc
->dtadr
= dma_dst
;
1021 set_updater_desc(sw_desc
, flags
);
1023 return pxad_tx_prep(&chan
->vc
, &sw_desc
->vd
, flags
);
1026 static struct dma_async_tx_descriptor
*
1027 pxad_prep_slave_sg(struct dma_chan
*dchan
, struct scatterlist
*sgl
,
1028 unsigned int sg_len
, enum dma_transfer_direction dir
,
1029 unsigned long flags
, void *context
)
1031 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1032 struct pxad_desc_sw
*sw_desc
;
1034 struct scatterlist
*sg
;
1036 u32 dcmd
, dsadr
= 0, dtadr
= 0;
1037 unsigned int nb_desc
= 0, i
, j
= 0;
1039 if ((sgl
== NULL
) || (sg_len
== 0))
1042 pxad_get_config(chan
, dir
, &dcmd
, &dsadr
, &dtadr
);
1043 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1044 "%s(): dir=%d flags=%lx\n", __func__
, dir
, flags
);
1046 for_each_sg(sgl
, sg
, sg_len
, i
)
1047 nb_desc
+= DIV_ROUND_UP(sg_dma_len(sg
), PDMA_MAX_DESC_BYTES
);
1048 sw_desc
= pxad_alloc_desc(chan
, nb_desc
+ 1);
1052 for_each_sg(sgl
, sg
, sg_len
, i
) {
1053 dma
= sg_dma_address(sg
);
1054 avail
= sg_dma_len(sg
);
1055 sw_desc
->len
+= avail
;
1058 len
= min_t(size_t, avail
, PDMA_MAX_DESC_BYTES
);
1060 sw_desc
->misaligned
= true;
1062 sw_desc
->hw_desc
[j
]->dcmd
=
1063 dcmd
| (PXA_DCMD_LENGTH
& len
);
1064 sw_desc
->hw_desc
[j
]->dsadr
= dsadr
? dsadr
: dma
;
1065 sw_desc
->hw_desc
[j
++]->dtadr
= dtadr
? dtadr
: dma
;
1071 set_updater_desc(sw_desc
, flags
);
1073 return pxad_tx_prep(&chan
->vc
, &sw_desc
->vd
, flags
);
1076 static struct dma_async_tx_descriptor
*
1077 pxad_prep_dma_cyclic(struct dma_chan
*dchan
,
1078 dma_addr_t buf_addr
, size_t len
, size_t period_len
,
1079 enum dma_transfer_direction dir
, unsigned long flags
)
1081 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1082 struct pxad_desc_sw
*sw_desc
;
1083 struct pxad_desc_hw
**phw_desc
;
1085 u32 dcmd
, dsadr
= 0, dtadr
= 0;
1086 unsigned int nb_desc
= 0;
1088 if (!dchan
|| !len
|| !period_len
)
1090 if ((dir
!= DMA_DEV_TO_MEM
) && (dir
!= DMA_MEM_TO_DEV
)) {
1091 dev_err(&chan
->vc
.chan
.dev
->device
,
1092 "Unsupported direction for cyclic DMA\n");
1095 /* the buffer length must be a multiple of period_len */
1096 if (len
% period_len
!= 0 || period_len
> PDMA_MAX_DESC_BYTES
||
1097 !IS_ALIGNED(period_len
, 1 << PDMA_ALIGNMENT
))
1100 pxad_get_config(chan
, dir
, &dcmd
, &dsadr
, &dtadr
);
1101 dcmd
|= PXA_DCMD_ENDIRQEN
| (PXA_DCMD_LENGTH
& period_len
);
1102 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1103 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1104 __func__
, (unsigned long)buf_addr
, len
, period_len
, dir
, flags
);
1106 nb_desc
= DIV_ROUND_UP(period_len
, PDMA_MAX_DESC_BYTES
);
1107 nb_desc
*= DIV_ROUND_UP(len
, period_len
);
1108 sw_desc
= pxad_alloc_desc(chan
, nb_desc
+ 1);
1111 sw_desc
->cyclic
= true;
1114 phw_desc
= sw_desc
->hw_desc
;
1117 phw_desc
[0]->dsadr
= dsadr
? dsadr
: dma
;
1118 phw_desc
[0]->dtadr
= dtadr
? dtadr
: dma
;
1119 phw_desc
[0]->dcmd
= dcmd
;
1124 set_updater_desc(sw_desc
, flags
);
1126 return pxad_tx_prep(&chan
->vc
, &sw_desc
->vd
, flags
);
1129 static int pxad_config(struct dma_chan
*dchan
,
1130 struct dma_slave_config
*cfg
)
1132 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1141 static int pxad_terminate_all(struct dma_chan
*dchan
)
1143 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1144 struct pxad_device
*pdev
= to_pxad_dev(chan
->vc
.chan
.device
);
1145 struct virt_dma_desc
*vd
= NULL
;
1146 unsigned long flags
;
1147 struct pxad_phy
*phy
;
1150 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1151 "%s(): vchan %p: terminate all\n", __func__
, &chan
->vc
);
1153 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
1154 vchan_get_all_descriptors(&chan
->vc
, &head
);
1156 list_for_each_entry(vd
, &head
, node
) {
1157 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1158 "%s(): cancelling txd %p[%x] (completed=%d)", __func__
,
1159 vd
, vd
->tx
.cookie
, is_desc_completed(vd
));
1164 phy_disable(chan
->phy
);
1165 pxad_free_phy(chan
);
1167 spin_lock(&pdev
->phy_lock
);
1169 spin_unlock(&pdev
->phy_lock
);
1171 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
1172 vchan_dma_desc_free_list(&chan
->vc
, &head
);
1177 static unsigned int pxad_residue(struct pxad_chan
*chan
,
1178 dma_cookie_t cookie
)
1180 struct virt_dma_desc
*vd
= NULL
;
1181 struct pxad_desc_sw
*sw_desc
= NULL
;
1182 struct pxad_desc_hw
*hw_desc
= NULL
;
1183 u32 curr
, start
, len
, end
, residue
= 0;
1184 unsigned long flags
;
1185 bool passed
= false;
1189 * If the channel does not have a phy pointer anymore, it has already
1190 * been completed. Therefore, its residue is 0.
1195 spin_lock_irqsave(&chan
->vc
.lock
, flags
);
1197 vd
= vchan_find_desc(&chan
->vc
, cookie
);
1201 sw_desc
= to_pxad_sw_desc(vd
);
1202 if (sw_desc
->hw_desc
[0]->dcmd
& PXA_DCMD_INCSRCADDR
)
1203 curr
= phy_readl_relaxed(chan
->phy
, DSADR
);
1205 curr
= phy_readl_relaxed(chan
->phy
, DTADR
);
1208 * curr has to be actually read before checking descriptor
1209 * completion, so that a curr inside a status updater
1210 * descriptor implies the following test returns true, and
1211 * preventing reordering of curr load and the test.
1214 if (is_desc_completed(vd
))
1217 for (i
= 0; i
< sw_desc
->nb_desc
- 1; i
++) {
1218 hw_desc
= sw_desc
->hw_desc
[i
];
1219 if (sw_desc
->hw_desc
[0]->dcmd
& PXA_DCMD_INCSRCADDR
)
1220 start
= hw_desc
->dsadr
;
1222 start
= hw_desc
->dtadr
;
1223 len
= hw_desc
->dcmd
& PXA_DCMD_LENGTH
;
1227 * 'passed' will be latched once we found the descriptor
1228 * which lies inside the boundaries of the curr
1229 * pointer. All descriptors that occur in the list
1230 * _after_ we found that partially handled descriptor
1231 * are still to be processed and are hence added to the
1232 * residual bytes counter.
1237 } else if (curr
>= start
&& curr
<= end
) {
1238 residue
+= end
- curr
;
1243 residue
= sw_desc
->len
;
1246 spin_unlock_irqrestore(&chan
->vc
.lock
, flags
);
1247 dev_dbg(&chan
->vc
.chan
.dev
->device
,
1248 "%s(): txd %p[%x] sw_desc=%p: %d\n",
1249 __func__
, vd
, cookie
, sw_desc
, residue
);
1253 static enum dma_status
pxad_tx_status(struct dma_chan
*dchan
,
1254 dma_cookie_t cookie
,
1255 struct dma_tx_state
*txstate
)
1257 struct pxad_chan
*chan
= to_pxad_chan(dchan
);
1258 enum dma_status ret
;
1260 if (cookie
== chan
->bus_error
)
1263 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
1264 if (likely(txstate
&& (ret
!= DMA_ERROR
)))
1265 dma_set_residue(txstate
, pxad_residue(chan
, cookie
));
1270 static void pxad_free_channels(struct dma_device
*dmadev
)
1272 struct pxad_chan
*c
, *cn
;
1274 list_for_each_entry_safe(c
, cn
, &dmadev
->channels
,
1275 vc
.chan
.device_node
) {
1276 list_del(&c
->vc
.chan
.device_node
);
1277 tasklet_kill(&c
->vc
.task
);
1281 static int pxad_remove(struct platform_device
*op
)
1283 struct pxad_device
*pdev
= platform_get_drvdata(op
);
1285 pxad_cleanup_debugfs(pdev
);
1286 pxad_free_channels(&pdev
->slave
);
1287 dma_async_device_unregister(&pdev
->slave
);
1291 static int pxad_init_phys(struct platform_device
*op
,
1292 struct pxad_device
*pdev
,
1293 unsigned int nb_phy_chans
)
1295 int irq0
, irq
, nr_irq
= 0, i
, ret
;
1296 struct pxad_phy
*phy
;
1298 irq0
= platform_get_irq(op
, 0);
1302 pdev
->phys
= devm_kcalloc(&op
->dev
, nb_phy_chans
,
1303 sizeof(pdev
->phys
[0]), GFP_KERNEL
);
1307 for (i
= 0; i
< nb_phy_chans
; i
++)
1308 if (platform_get_irq(op
, i
) > 0)
1311 for (i
= 0; i
< nb_phy_chans
; i
++) {
1312 phy
= &pdev
->phys
[i
];
1313 phy
->base
= pdev
->base
;
1315 irq
= platform_get_irq(op
, i
);
1316 if ((nr_irq
> 1) && (irq
> 0))
1317 ret
= devm_request_irq(&op
->dev
, irq
,
1319 IRQF_SHARED
, "pxa-dma", phy
);
1320 if ((nr_irq
== 1) && (i
== 0))
1321 ret
= devm_request_irq(&op
->dev
, irq0
,
1323 IRQF_SHARED
, "pxa-dma", pdev
);
1325 dev_err(pdev
->slave
.dev
,
1326 "%s(): can't request irq %d:%d\n", __func__
,
1335 static const struct of_device_id pxad_dt_ids
[] = {
1336 { .compatible
= "marvell,pdma-1.0", },
1339 MODULE_DEVICE_TABLE(of
, pxad_dt_ids
);
1341 static struct dma_chan
*pxad_dma_xlate(struct of_phandle_args
*dma_spec
,
1342 struct of_dma
*ofdma
)
1344 struct pxad_device
*d
= ofdma
->of_dma_data
;
1345 struct dma_chan
*chan
;
1347 chan
= dma_get_any_slave_channel(&d
->slave
);
1351 to_pxad_chan(chan
)->drcmr
= dma_spec
->args
[0];
1352 to_pxad_chan(chan
)->prio
= dma_spec
->args
[1];
1357 static int pxad_init_dmadev(struct platform_device
*op
,
1358 struct pxad_device
*pdev
,
1359 unsigned int nr_phy_chans
,
1360 unsigned int nr_requestors
)
1364 struct pxad_chan
*c
;
1366 pdev
->nr_chans
= nr_phy_chans
;
1367 pdev
->nr_requestors
= nr_requestors
;
1368 INIT_LIST_HEAD(&pdev
->slave
.channels
);
1369 pdev
->slave
.device_alloc_chan_resources
= pxad_alloc_chan_resources
;
1370 pdev
->slave
.device_free_chan_resources
= pxad_free_chan_resources
;
1371 pdev
->slave
.device_tx_status
= pxad_tx_status
;
1372 pdev
->slave
.device_issue_pending
= pxad_issue_pending
;
1373 pdev
->slave
.device_config
= pxad_config
;
1374 pdev
->slave
.device_terminate_all
= pxad_terminate_all
;
1376 if (op
->dev
.coherent_dma_mask
)
1377 dma_set_mask(&op
->dev
, op
->dev
.coherent_dma_mask
);
1379 dma_set_mask(&op
->dev
, DMA_BIT_MASK(32));
1381 ret
= pxad_init_phys(op
, pdev
, nr_phy_chans
);
1385 for (i
= 0; i
< nr_phy_chans
; i
++) {
1386 c
= devm_kzalloc(&op
->dev
, sizeof(*c
), GFP_KERNEL
);
1389 c
->vc
.desc_free
= pxad_free_desc
;
1390 vchan_init(&c
->vc
, &pdev
->slave
);
1393 return dma_async_device_register(&pdev
->slave
);
1396 static int pxad_probe(struct platform_device
*op
)
1398 struct pxad_device
*pdev
;
1399 const struct of_device_id
*of_id
;
1400 struct mmp_dma_platdata
*pdata
= dev_get_platdata(&op
->dev
);
1401 struct resource
*iores
;
1402 int ret
, dma_channels
= 0, nb_requestors
= 0;
1403 const enum dma_slave_buswidth widths
=
1404 DMA_SLAVE_BUSWIDTH_1_BYTE
| DMA_SLAVE_BUSWIDTH_2_BYTES
|
1405 DMA_SLAVE_BUSWIDTH_4_BYTES
;
1407 pdev
= devm_kzalloc(&op
->dev
, sizeof(*pdev
), GFP_KERNEL
);
1411 spin_lock_init(&pdev
->phy_lock
);
1413 iores
= platform_get_resource(op
, IORESOURCE_MEM
, 0);
1414 pdev
->base
= devm_ioremap_resource(&op
->dev
, iores
);
1415 if (IS_ERR(pdev
->base
))
1416 return PTR_ERR(pdev
->base
);
1418 of_id
= of_match_device(pxad_dt_ids
, &op
->dev
);
1420 of_property_read_u32(op
->dev
.of_node
, "#dma-channels",
1422 ret
= of_property_read_u32(op
->dev
.of_node
, "#dma-requests",
1425 dev_warn(pdev
->slave
.dev
,
1426 "#dma-requests set to default 32 as missing in OF: %d",
1430 } else if (pdata
&& pdata
->dma_channels
) {
1431 dma_channels
= pdata
->dma_channels
;
1432 nb_requestors
= pdata
->nb_requestors
;
1434 dma_channels
= 32; /* default 32 channel */
1437 dma_cap_set(DMA_SLAVE
, pdev
->slave
.cap_mask
);
1438 dma_cap_set(DMA_MEMCPY
, pdev
->slave
.cap_mask
);
1439 dma_cap_set(DMA_CYCLIC
, pdev
->slave
.cap_mask
);
1440 dma_cap_set(DMA_PRIVATE
, pdev
->slave
.cap_mask
);
1441 pdev
->slave
.device_prep_dma_memcpy
= pxad_prep_memcpy
;
1442 pdev
->slave
.device_prep_slave_sg
= pxad_prep_slave_sg
;
1443 pdev
->slave
.device_prep_dma_cyclic
= pxad_prep_dma_cyclic
;
1445 pdev
->slave
.copy_align
= PDMA_ALIGNMENT
;
1446 pdev
->slave
.src_addr_widths
= widths
;
1447 pdev
->slave
.dst_addr_widths
= widths
;
1448 pdev
->slave
.directions
= BIT(DMA_MEM_TO_DEV
) | BIT(DMA_DEV_TO_MEM
);
1449 pdev
->slave
.residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
1450 pdev
->slave
.descriptor_reuse
= true;
1452 pdev
->slave
.dev
= &op
->dev
;
1453 ret
= pxad_init_dmadev(op
, pdev
, dma_channels
, nb_requestors
);
1455 dev_err(pdev
->slave
.dev
, "unable to register\n");
1459 if (op
->dev
.of_node
) {
1460 /* Device-tree DMA controller registration */
1461 ret
= of_dma_controller_register(op
->dev
.of_node
,
1462 pxad_dma_xlate
, pdev
);
1464 dev_err(pdev
->slave
.dev
,
1465 "of_dma_controller_register failed\n");
1470 platform_set_drvdata(op
, pdev
);
1471 pxad_init_debugfs(pdev
);
1472 dev_info(pdev
->slave
.dev
, "initialized %d channels on %d requestors\n",
1473 dma_channels
, nb_requestors
);
1477 static const struct platform_device_id pxad_id_table
[] = {
1482 static struct platform_driver pxad_driver
= {
1485 .of_match_table
= pxad_dt_ids
,
1487 .id_table
= pxad_id_table
,
1488 .probe
= pxad_probe
,
1489 .remove
= pxad_remove
,
1492 bool pxad_filter_fn(struct dma_chan
*chan
, void *param
)
1494 struct pxad_chan
*c
= to_pxad_chan(chan
);
1495 struct pxad_param
*p
= param
;
1497 if (chan
->device
->dev
->driver
!= &pxad_driver
.driver
)
1500 c
->drcmr
= p
->drcmr
;
1505 EXPORT_SYMBOL_GPL(pxad_filter_fn
);
1507 int pxad_toggle_reserved_channel(int legacy_channel
)
1509 if (legacy_unavailable
& (BIT(legacy_channel
)))
1511 legacy_reserved
^= BIT(legacy_channel
);
1514 EXPORT_SYMBOL_GPL(pxad_toggle_reserved_channel
);
1516 module_platform_driver(pxad_driver
);
1518 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1519 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1520 MODULE_LICENSE("GPL v2");