2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * The full GNU General Public License is in this distribution in the
23 * file called COPYING.
25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels
33 * and the number of incoming DMA signals are two totally different things.
34 * It is usually not possible to theoretically handle all physical signals,
35 * so a multiplexing scheme with possible denial of use is necessary.
37 * The PL080 has a dual bus master, PL081 has a single master.
39 * Memory to peripheral transfer may be visualized as
40 * Get data from memory to DMAC
42 * On burst request from peripheral
43 * Destination burst from DMAC to peripheral
45 * Raise terminal count interrupt
47 * For peripherals with a FIFO:
48 * Source burst size == half the depth of the peripheral FIFO
49 * Destination burst size == the depth of the peripheral FIFO
51 * (Bursts are irrelevant for mem to mem transfers - there are no burst
52 * signals, the DMA controller will simply facilitate its AHB master.)
54 * ASSUMES default (little) endianness for DMA transfers
56 * The PL08x has two flow control settings:
57 * - DMAC flow control: the transfer size defines the number of transfers
58 * which occur for the current LLI entry, and the DMAC raises TC at the
59 * end of every LLI entry. Observed behaviour shows the DMAC listening
60 * to both the BREQ and SREQ signals (contrary to documented),
61 * transferring data if either is active. The LBREQ and LSREQ signals
64 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry.
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
75 * - Break out common code from arch/arm/mach-s3c64xx and share
77 #include <linux/device.h>
78 #include <linux/init.h>
79 #include <linux/module.h>
80 #include <linux/interrupt.h>
81 #include <linux/slab.h>
82 #include <linux/dmapool.h>
83 #include <linux/dmaengine.h>
84 #include <linux/amba/bus.h>
85 #include <linux/amba/pl08x.h>
86 #include <linux/debugfs.h>
87 #include <linux/seq_file.h>
89 #include <asm/hardware/pl080.h>
91 #define DRIVER_NAME "pl08xdmac"
94 * struct vendor_data - vendor-specific config parameters
95 * for PL08x derivatives
96 * @channels: the number of channels available in this variant
97 * @dualmaster: whether this version supports dual AHB masters
106 * PL08X private data structures
107 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
108 * start & end do not - their bus bit info is in cctl. Also note that these
109 * are fixed 32-bit quantities.
119 * struct pl08x_driver_data - the local state holder for the PL08x
120 * @slave: slave engine for this instance
121 * @memcpy: memcpy engine for this instance
122 * @base: virtual memory base (remapped) for the PL08x
123 * @adev: the corresponding AMBA (PrimeCell) bus entry
124 * @vd: vendor data for this PL08x variant
125 * @pd: platform data passed in from the platform/machine
126 * @phy_chans: array of data for the physical channels
127 * @pool: a pool for the LLI descriptors
128 * @pool_ctr: counter of LLIs in the pool
129 * @lock: a spinlock for this struct
131 struct pl08x_driver_data
{
132 struct dma_device slave
;
133 struct dma_device memcpy
;
135 struct amba_device
*adev
;
136 const struct vendor_data
*vd
;
137 struct pl08x_platform_data
*pd
;
138 struct pl08x_phy_chan
*phy_chans
;
139 struct dma_pool
*pool
;
145 * PL08X specific defines
149 * Memory boundaries: the manual for PL08x says that the controller
150 * cannot read past a 1KiB boundary, so these defines are used to
151 * create transfer LLIs that do not cross such boundaries.
153 #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
154 #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
156 /* Minimum period between work queue runs */
157 #define PL08X_WQ_PERIODMIN 20
159 /* Size (bytes) of each LLI buffer allocated for one transfer */
160 # define PL08X_LLI_TSFR_SIZE 0x2000
162 /* Maximum times we call dma_pool_alloc on this pool without freeing */
163 #define PL08X_MAX_ALLOCS 0x40
164 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
165 #define PL08X_ALIGN 8
167 static inline struct pl08x_dma_chan
*to_pl08x_chan(struct dma_chan
*chan
)
169 return container_of(chan
, struct pl08x_dma_chan
, chan
);
173 * Physical channel handling
176 /* Whether a certain channel is busy or not */
177 static int pl08x_phy_channel_busy(struct pl08x_phy_chan
*ch
)
181 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
182 return val
& PL080_CONFIG_ACTIVE
;
186 * Set the initial DMA register values i.e. those for the first LLI
187 * The next LLI pointer and the configuration interrupt bit have
188 * been set when the LLIs were constructed. Poke them into the hardware
189 * and start the transfer.
191 static void pl08x_start_txd(struct pl08x_dma_chan
*plchan
,
192 struct pl08x_txd
*txd
)
194 struct pl08x_driver_data
*pl08x
= plchan
->host
;
195 struct pl08x_phy_chan
*phychan
= plchan
->phychan
;
196 struct pl08x_lli
*lli
= &txd
->llis_va
[0];
197 u32 val
, ccfg
= txd
->ccfg
;
201 /* Assign the flow control signal to this channel */
202 if (txd
->direction
== DMA_TO_DEVICE
)
203 /* Select signal as destination */
204 ccfg
|= phychan
->signal
<< PL080_CONFIG_DST_SEL_SHIFT
;
205 else if (txd
->direction
== DMA_FROM_DEVICE
)
206 /* Select signal as source */
207 ccfg
|= phychan
->signal
<< PL080_CONFIG_SRC_SEL_SHIFT
;
209 /* Wait for channel inactive */
210 while (pl08x_phy_channel_busy(phychan
))
213 dev_vdbg(&pl08x
->adev
->dev
,
214 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
215 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
216 phychan
->id
, lli
->src
, lli
->dst
, lli
->lli
, lli
->cctl
,
219 writel(lli
->src
, phychan
->base
+ PL080_CH_SRC_ADDR
);
220 writel(lli
->dst
, phychan
->base
+ PL080_CH_DST_ADDR
);
221 writel(lli
->lli
, phychan
->base
+ PL080_CH_LLI
);
222 writel(lli
->cctl
, phychan
->base
+ PL080_CH_CONTROL
);
223 writel(ccfg
, phychan
->base
+ PL080_CH_CONFIG
);
225 /* Enable the DMA channel */
226 /* Do not access config register until channel shows as disabled */
227 while (readl(pl08x
->base
+ PL080_EN_CHAN
) & (1 << phychan
->id
))
230 /* Do not access config register until channel shows as inactive */
231 val
= readl(phychan
->base
+ PL080_CH_CONFIG
);
232 while ((val
& PL080_CONFIG_ACTIVE
) || (val
& PL080_CONFIG_ENABLE
))
233 val
= readl(phychan
->base
+ PL080_CH_CONFIG
);
235 writel(val
| PL080_CONFIG_ENABLE
, phychan
->base
+ PL080_CH_CONFIG
);
239 * Overall DMAC remains enabled always.
241 * Disabling individual channels could lose data.
243 * Disable the peripheral DMA after disabling the DMAC
244 * in order to allow the DMAC FIFO to drain, and
245 * hence allow the channel to show inactive
248 static void pl08x_pause_phy_chan(struct pl08x_phy_chan
*ch
)
252 /* Set the HALT bit and wait for the FIFO to drain */
253 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
254 val
|= PL080_CONFIG_HALT
;
255 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
257 /* Wait for channel inactive */
258 while (pl08x_phy_channel_busy(ch
))
262 static void pl08x_resume_phy_chan(struct pl08x_phy_chan
*ch
)
266 /* Clear the HALT bit */
267 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
268 val
&= ~PL080_CONFIG_HALT
;
269 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
273 /* Stops the channel */
274 static void pl08x_stop_phy_chan(struct pl08x_phy_chan
*ch
)
278 pl08x_pause_phy_chan(ch
);
280 /* Disable channel */
281 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
282 val
&= ~PL080_CONFIG_ENABLE
;
283 val
&= ~PL080_CONFIG_ERR_IRQ_MASK
;
284 val
&= ~PL080_CONFIG_TC_IRQ_MASK
;
285 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
288 static inline u32
get_bytes_in_cctl(u32 cctl
)
290 /* The source width defines the number of bytes */
291 u32 bytes
= cctl
& PL080_CONTROL_TRANSFER_SIZE_MASK
;
293 switch (cctl
>> PL080_CONTROL_SWIDTH_SHIFT
) {
294 case PL080_WIDTH_8BIT
:
296 case PL080_WIDTH_16BIT
:
299 case PL080_WIDTH_32BIT
:
306 /* The channel should be paused when calling this */
307 static u32
pl08x_getbytes_chan(struct pl08x_dma_chan
*plchan
)
309 struct pl08x_phy_chan
*ch
;
310 struct pl08x_txd
*txd
;
314 spin_lock_irqsave(&plchan
->lock
, flags
);
315 ch
= plchan
->phychan
;
319 * Follow the LLIs to get the number of remaining
320 * bytes in the currently active transaction.
323 u32 clli
= readl(ch
->base
+ PL080_CH_LLI
) & ~PL080_LLI_LM_AHB2
;
325 /* First get the remaining bytes in the active transfer */
326 bytes
= get_bytes_in_cctl(readl(ch
->base
+ PL080_CH_CONTROL
));
329 struct pl08x_lli
*llis_va
= txd
->llis_va
;
330 dma_addr_t llis_bus
= txd
->llis_bus
;
333 BUG_ON(clli
< llis_bus
|| clli
>= llis_bus
+
334 sizeof(struct pl08x_lli
) * MAX_NUM_TSFR_LLIS
);
337 * Locate the next LLI - as this is an array,
338 * it's simple maths to find.
340 index
= (clli
- llis_bus
) / sizeof(struct pl08x_lli
);
342 for (; index
< MAX_NUM_TSFR_LLIS
; index
++) {
343 bytes
+= get_bytes_in_cctl(llis_va
[index
].cctl
);
346 * A LLI pointer of 0 terminates the LLI list
348 if (!llis_va
[index
].lli
)
354 /* Sum up all queued transactions */
355 if (!list_empty(&plchan
->desc_list
)) {
356 struct pl08x_txd
*txdi
;
357 list_for_each_entry(txdi
, &plchan
->desc_list
, node
) {
362 spin_unlock_irqrestore(&plchan
->lock
, flags
);
368 * Allocate a physical channel for a virtual channel
370 static struct pl08x_phy_chan
*
371 pl08x_get_phy_channel(struct pl08x_driver_data
*pl08x
,
372 struct pl08x_dma_chan
*virt_chan
)
374 struct pl08x_phy_chan
*ch
= NULL
;
379 * Try to locate a physical channel to be used for
380 * this transfer. If all are taken return NULL and
381 * the requester will have to cope by using some fallback
382 * PIO mode or retrying later.
384 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
385 ch
= &pl08x
->phy_chans
[i
];
387 spin_lock_irqsave(&ch
->lock
, flags
);
390 ch
->serving
= virt_chan
;
392 spin_unlock_irqrestore(&ch
->lock
, flags
);
396 spin_unlock_irqrestore(&ch
->lock
, flags
);
399 if (i
== pl08x
->vd
->channels
) {
400 /* No physical channel available, cope with it */
407 static inline void pl08x_put_phy_channel(struct pl08x_driver_data
*pl08x
,
408 struct pl08x_phy_chan
*ch
)
412 /* Stop the channel and clear its interrupts */
413 pl08x_stop_phy_chan(ch
);
414 writel((1 << ch
->id
), pl08x
->base
+ PL080_ERR_CLEAR
);
415 writel((1 << ch
->id
), pl08x
->base
+ PL080_TC_CLEAR
);
417 /* Mark it as free */
418 spin_lock_irqsave(&ch
->lock
, flags
);
420 spin_unlock_irqrestore(&ch
->lock
, flags
);
427 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded
)
430 case PL080_WIDTH_8BIT
:
432 case PL080_WIDTH_16BIT
:
434 case PL080_WIDTH_32BIT
:
443 static inline u32
pl08x_cctl_bits(u32 cctl
, u8 srcwidth
, u8 dstwidth
,
448 /* Remove all src, dst and transfer size bits */
449 retbits
&= ~PL080_CONTROL_DWIDTH_MASK
;
450 retbits
&= ~PL080_CONTROL_SWIDTH_MASK
;
451 retbits
&= ~PL080_CONTROL_TRANSFER_SIZE_MASK
;
453 /* Then set the bits according to the parameters */
456 retbits
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
459 retbits
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
462 retbits
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
471 retbits
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
474 retbits
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
477 retbits
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
484 retbits
|= tsize
<< PL080_CONTROL_TRANSFER_SIZE_SHIFT
;
489 * Autoselect a master bus to use for the transfer
490 * this prefers the destination bus if both available
491 * if fixed address on one bus the other will be chosen
493 static void pl08x_choose_master_bus(struct pl08x_bus_data
*src_bus
,
494 struct pl08x_bus_data
*dst_bus
, struct pl08x_bus_data
**mbus
,
495 struct pl08x_bus_data
**sbus
, u32 cctl
)
497 if (!(cctl
& PL080_CONTROL_DST_INCR
)) {
500 } else if (!(cctl
& PL080_CONTROL_SRC_INCR
)) {
504 if (dst_bus
->buswidth
== 4) {
507 } else if (src_bus
->buswidth
== 4) {
510 } else if (dst_bus
->buswidth
== 2) {
513 } else if (src_bus
->buswidth
== 2) {
517 /* src_bus->buswidth == 1 */
525 * Fills in one LLI for a certain transfer descriptor
526 * and advance the counter
528 static int pl08x_fill_lli_for_desc(struct pl08x_driver_data
*pl08x
,
529 struct pl08x_txd
*txd
, int num_llis
, int len
,
530 u32 cctl
, u32
*remainder
)
532 struct pl08x_lli
*llis_va
= txd
->llis_va
;
533 dma_addr_t llis_bus
= txd
->llis_bus
;
535 BUG_ON(num_llis
>= MAX_NUM_TSFR_LLIS
);
537 llis_va
[num_llis
].cctl
= cctl
;
538 llis_va
[num_llis
].src
= txd
->srcbus
.addr
;
539 llis_va
[num_llis
].dst
= txd
->dstbus
.addr
;
542 * On versions with dual masters, you can optionally AND on
543 * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
544 * in new LLIs with that controller, but we always try to
545 * choose AHB1 to point into memory. The idea is to have AHB2
546 * fixed on the peripheral and AHB1 messing around in the
547 * memory. So we don't manipulate this bit currently.
550 llis_va
[num_llis
].lli
= llis_bus
+ (num_llis
+ 1) * sizeof(struct pl08x_lli
);
552 if (cctl
& PL080_CONTROL_SRC_INCR
)
553 txd
->srcbus
.addr
+= len
;
554 if (cctl
& PL080_CONTROL_DST_INCR
)
555 txd
->dstbus
.addr
+= len
;
557 BUG_ON(*remainder
< len
);
565 * Return number of bytes to fill to boundary, or len
567 static inline size_t pl08x_pre_boundary(u32 addr
, size_t len
)
571 boundary
= ((addr
>> PL08X_BOUNDARY_SHIFT
) + 1)
572 << PL08X_BOUNDARY_SHIFT
;
574 if (boundary
< addr
+ len
)
575 return boundary
- addr
;
581 * This fills in the table of LLIs for the transfer descriptor
582 * Note that we assume we never have to change the burst sizes
585 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data
*pl08x
,
586 struct pl08x_txd
*txd
)
588 struct pl08x_channel_data
*cd
= txd
->cd
;
589 struct pl08x_bus_data
*mbus
, *sbus
;
593 size_t max_bytes_per_lli
;
594 size_t total_bytes
= 0;
595 struct pl08x_lli
*llis_va
;
597 txd
->llis_va
= dma_pool_alloc(pl08x
->pool
, GFP_NOWAIT
,
600 dev_err(&pl08x
->adev
->dev
, "%s no memory for llis\n", __func__
);
607 * Initialize bus values for this transfer
608 * from the passed optimal values
611 dev_err(&pl08x
->adev
->dev
, "%s no channel data\n", __func__
);
615 /* Get the default CCTL from the platform data */
619 * On the PL080 we have two bus masters and we
620 * should select one for source and one for
621 * destination. We try to use AHB2 for the
622 * bus which does not increment (typically the
623 * peripheral) else we just choose something.
625 cctl
&= ~(PL080_CONTROL_DST_AHB2
| PL080_CONTROL_SRC_AHB2
);
626 if (pl08x
->vd
->dualmaster
) {
627 if (cctl
& PL080_CONTROL_SRC_INCR
)
628 /* Source increments, use AHB2 for destination */
629 cctl
|= PL080_CONTROL_DST_AHB2
;
630 else if (cctl
& PL080_CONTROL_DST_INCR
)
631 /* Destination increments, use AHB2 for source */
632 cctl
|= PL080_CONTROL_SRC_AHB2
;
634 /* Just pick something, source AHB1 dest AHB2 */
635 cctl
|= PL080_CONTROL_DST_AHB2
;
638 /* Find maximum width of the source bus */
639 txd
->srcbus
.maxwidth
=
640 pl08x_get_bytes_for_cctl((cctl
& PL080_CONTROL_SWIDTH_MASK
) >>
641 PL080_CONTROL_SWIDTH_SHIFT
);
643 /* Find maximum width of the destination bus */
644 txd
->dstbus
.maxwidth
=
645 pl08x_get_bytes_for_cctl((cctl
& PL080_CONTROL_DWIDTH_MASK
) >>
646 PL080_CONTROL_DWIDTH_SHIFT
);
648 /* Set up the bus widths to the maximum */
649 txd
->srcbus
.buswidth
= txd
->srcbus
.maxwidth
;
650 txd
->dstbus
.buswidth
= txd
->dstbus
.maxwidth
;
651 dev_vdbg(&pl08x
->adev
->dev
,
652 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
653 __func__
, txd
->srcbus
.buswidth
, txd
->dstbus
.buswidth
);
657 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
659 max_bytes_per_lli
= min(txd
->srcbus
.buswidth
, txd
->dstbus
.buswidth
) *
660 PL080_CONTROL_TRANSFER_SIZE_MASK
;
661 dev_vdbg(&pl08x
->adev
->dev
,
662 "%s max bytes per lli = %zu\n",
663 __func__
, max_bytes_per_lli
);
665 /* We need to count this down to zero */
666 remainder
= txd
->len
;
667 dev_vdbg(&pl08x
->adev
->dev
,
668 "%s remainder = %zu\n",
669 __func__
, remainder
);
672 * Choose bus to align to
673 * - prefers destination bus if both available
674 * - if fixed address on one bus chooses other
675 * - modifies cctl to choose an appropriate master
677 pl08x_choose_master_bus(&txd
->srcbus
, &txd
->dstbus
,
682 * The lowest bit of the LLI register
683 * is also used to indicate which master to
684 * use for reading the LLIs.
687 if (txd
->len
< mbus
->buswidth
) {
689 * Less than a bus width available
690 * - send as single bytes
693 dev_vdbg(&pl08x
->adev
->dev
,
694 "%s single byte LLIs for a transfer of "
695 "less than a bus width (remain 0x%08x)\n",
696 __func__
, remainder
);
697 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
699 pl08x_fill_lli_for_desc(pl08x
, txd
, num_llis
, 1,
705 * Make one byte LLIs until master bus is aligned
706 * - slave will then be aligned also
708 while ((mbus
->addr
) % (mbus
->buswidth
)) {
709 dev_vdbg(&pl08x
->adev
->dev
,
710 "%s adjustment lli for less than bus width "
712 __func__
, remainder
);
713 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
714 num_llis
= pl08x_fill_lli_for_desc
715 (pl08x
, txd
, num_llis
, 1, cctl
, &remainder
);
721 * - if slave is not then we must set its width down
723 if (sbus
->addr
% sbus
->buswidth
) {
724 dev_dbg(&pl08x
->adev
->dev
,
725 "%s set down bus width to one byte\n",
732 * Make largest possible LLIs until less than one bus
735 while (remainder
> (mbus
->buswidth
- 1)) {
736 size_t lli_len
, target_len
, tsize
, odd_bytes
;
739 * If enough left try to send max possible,
740 * otherwise try to send the remainder
742 target_len
= remainder
;
743 if (remainder
> max_bytes_per_lli
)
744 target_len
= max_bytes_per_lli
;
747 * Set bus lengths for incrementing buses
748 * to number of bytes which fill to next memory
751 if (cctl
& PL080_CONTROL_SRC_INCR
)
752 txd
->srcbus
.fill_bytes
=
757 txd
->srcbus
.fill_bytes
=
760 if (cctl
& PL080_CONTROL_DST_INCR
)
761 txd
->dstbus
.fill_bytes
=
766 txd
->dstbus
.fill_bytes
=
772 lli_len
= min(txd
->srcbus
.fill_bytes
,
773 txd
->dstbus
.fill_bytes
);
775 BUG_ON(lli_len
> remainder
);
778 dev_err(&pl08x
->adev
->dev
,
779 "%s lli_len is %zu, <= 0\n",
784 if (lli_len
== target_len
) {
786 * Can send what we wanted
791 lli_len
= (lli_len
/mbus
->buswidth
) *
796 * So now we know how many bytes to transfer
797 * to get to the nearest boundary
798 * The next LLI will past the boundary
799 * - however we may be working to a boundary
801 * We need to ensure the master stays aligned
803 odd_bytes
= lli_len
% mbus
->buswidth
;
805 * - and that we are working in multiples
808 lli_len
-= odd_bytes
;
814 * Check against minimum bus alignment:
815 * Calculate actual transfer size in relation
816 * to bus width an get a maximum remainder of
817 * the smallest bus width - 1
819 /* FIXME: use round_down()? */
820 tsize
= lli_len
/ min(mbus
->buswidth
,
822 lli_len
= tsize
* min(mbus
->buswidth
,
825 if (target_len
!= lli_len
) {
826 dev_vdbg(&pl08x
->adev
->dev
,
827 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
828 __func__
, target_len
, lli_len
, txd
->len
);
831 cctl
= pl08x_cctl_bits(cctl
,
832 txd
->srcbus
.buswidth
,
833 txd
->dstbus
.buswidth
,
836 dev_vdbg(&pl08x
->adev
->dev
,
837 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
838 __func__
, lli_len
, remainder
);
839 num_llis
= pl08x_fill_lli_for_desc(pl08x
, txd
,
840 num_llis
, lli_len
, cctl
,
842 total_bytes
+= lli_len
;
848 * Creep past the boundary,
849 * maintaining master alignment
852 for (j
= 0; (j
< mbus
->buswidth
)
853 && (remainder
); j
++) {
854 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
855 dev_vdbg(&pl08x
->adev
->dev
,
856 "%s align with boundary, single byte (remain 0x%08zx)\n",
857 __func__
, remainder
);
859 pl08x_fill_lli_for_desc(pl08x
,
871 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
872 dev_vdbg(&pl08x
->adev
->dev
,
873 "%s align with boundary, single odd byte (remain %zu)\n",
874 __func__
, remainder
);
875 num_llis
= pl08x_fill_lli_for_desc(pl08x
, txd
, num_llis
,
876 1, cctl
, &remainder
);
880 if (total_bytes
!= txd
->len
) {
881 dev_err(&pl08x
->adev
->dev
,
882 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
883 __func__
, total_bytes
, txd
->len
);
887 if (num_llis
>= MAX_NUM_TSFR_LLIS
) {
888 dev_err(&pl08x
->adev
->dev
,
889 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
890 __func__
, (u32
) MAX_NUM_TSFR_LLIS
);
894 llis_va
= txd
->llis_va
;
896 * The final LLI terminates the LLI.
898 llis_va
[num_llis
- 1].lli
= 0;
900 * The final LLI element shall also fire an interrupt
902 llis_va
[num_llis
- 1].cctl
|= PL080_CONTROL_TC_IRQ_EN
;
908 for (i
= 0; i
< num_llis
; i
++) {
909 dev_vdbg(&pl08x
->adev
->dev
,
910 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
925 /* You should call this with the struct pl08x lock held */
926 static void pl08x_free_txd(struct pl08x_driver_data
*pl08x
,
927 struct pl08x_txd
*txd
)
930 dma_pool_free(pl08x
->pool
, txd
->llis_va
, txd
->llis_bus
);
937 static void pl08x_free_txd_list(struct pl08x_driver_data
*pl08x
,
938 struct pl08x_dma_chan
*plchan
)
940 struct pl08x_txd
*txdi
= NULL
;
941 struct pl08x_txd
*next
;
943 if (!list_empty(&plchan
->desc_list
)) {
944 list_for_each_entry_safe(txdi
,
945 next
, &plchan
->desc_list
, node
) {
946 list_del(&txdi
->node
);
947 pl08x_free_txd(pl08x
, txdi
);
956 static int pl08x_alloc_chan_resources(struct dma_chan
*chan
)
961 static void pl08x_free_chan_resources(struct dma_chan
*chan
)
966 * This should be called with the channel plchan->lock held
968 static int prep_phy_channel(struct pl08x_dma_chan
*plchan
,
969 struct pl08x_txd
*txd
)
971 struct pl08x_driver_data
*pl08x
= plchan
->host
;
972 struct pl08x_phy_chan
*ch
;
975 /* Check if we already have a channel */
979 ch
= pl08x_get_phy_channel(pl08x
, plchan
);
981 /* No physical channel available, cope with it */
982 dev_dbg(&pl08x
->adev
->dev
, "no physical channel available for xfer on %s\n", plchan
->name
);
987 * OK we have a physical channel: for memcpy() this is all we
988 * need, but for slaves the physical signals may be muxed!
989 * Can the platform allow us to use this channel?
993 pl08x
->pd
->get_signal
) {
994 ret
= pl08x
->pd
->get_signal(plchan
);
996 dev_dbg(&pl08x
->adev
->dev
,
997 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
998 ch
->id
, plchan
->name
);
999 /* Release physical channel & return */
1000 pl08x_put_phy_channel(pl08x
, ch
);
1006 dev_dbg(&pl08x
->adev
->dev
, "allocated physical channel %d and signal %d for xfer on %s\n",
1011 plchan
->phychan
= ch
;
1016 static void release_phy_channel(struct pl08x_dma_chan
*plchan
)
1018 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1020 if ((plchan
->phychan
->signal
>= 0) && pl08x
->pd
->put_signal
) {
1021 pl08x
->pd
->put_signal(plchan
);
1022 plchan
->phychan
->signal
= -1;
1024 pl08x_put_phy_channel(pl08x
, plchan
->phychan
);
1025 plchan
->phychan
= NULL
;
1028 static dma_cookie_t
pl08x_tx_submit(struct dma_async_tx_descriptor
*tx
)
1030 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(tx
->chan
);
1032 plchan
->chan
.cookie
+= 1;
1033 if (plchan
->chan
.cookie
< 0)
1034 plchan
->chan
.cookie
= 1;
1035 tx
->cookie
= plchan
->chan
.cookie
;
1036 /* This unlock follows the lock in the prep() function */
1037 spin_unlock_irqrestore(&plchan
->lock
, plchan
->lockflags
);
1042 static struct dma_async_tx_descriptor
*pl08x_prep_dma_interrupt(
1043 struct dma_chan
*chan
, unsigned long flags
)
1045 struct dma_async_tx_descriptor
*retval
= NULL
;
1051 * Code accessing dma_async_is_complete() in a tight loop
1052 * may give problems - could schedule where indicated.
1053 * If slaves are relying on interrupts to signal completion this
1054 * function must not be called with interrupts disabled
1056 static enum dma_status
1057 pl08x_dma_tx_status(struct dma_chan
*chan
,
1058 dma_cookie_t cookie
,
1059 struct dma_tx_state
*txstate
)
1061 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1062 dma_cookie_t last_used
;
1063 dma_cookie_t last_complete
;
1064 enum dma_status ret
;
1067 last_used
= plchan
->chan
.cookie
;
1068 last_complete
= plchan
->lc
;
1070 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
1071 if (ret
== DMA_SUCCESS
) {
1072 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
1077 * schedule(); could be inserted here
1081 * This cookie not complete yet
1083 last_used
= plchan
->chan
.cookie
;
1084 last_complete
= plchan
->lc
;
1086 /* Get number of bytes left in the active transactions and queue */
1087 bytesleft
= pl08x_getbytes_chan(plchan
);
1089 dma_set_tx_state(txstate
, last_complete
, last_used
,
1092 if (plchan
->state
== PL08X_CHAN_PAUSED
)
1095 /* Whether waiting or running, we're in progress */
1096 return DMA_IN_PROGRESS
;
1099 /* PrimeCell DMA extension */
1100 struct burst_table
{
1105 static const struct burst_table burst_sizes
[] = {
1108 .reg
= (PL080_BSIZE_256
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1109 (PL080_BSIZE_256
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1113 .reg
= (PL080_BSIZE_128
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1114 (PL080_BSIZE_128
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1118 .reg
= (PL080_BSIZE_64
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1119 (PL080_BSIZE_64
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1123 .reg
= (PL080_BSIZE_32
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1124 (PL080_BSIZE_32
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1128 .reg
= (PL080_BSIZE_16
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1129 (PL080_BSIZE_16
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1133 .reg
= (PL080_BSIZE_8
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1134 (PL080_BSIZE_8
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1138 .reg
= (PL080_BSIZE_4
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1139 (PL080_BSIZE_4
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1143 .reg
= (PL080_BSIZE_1
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1144 (PL080_BSIZE_1
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1148 static void dma_set_runtime_config(struct dma_chan
*chan
,
1149 struct dma_slave_config
*config
)
1151 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1152 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1153 struct pl08x_channel_data
*cd
= plchan
->cd
;
1154 enum dma_slave_buswidth addr_width
;
1159 /* Transfer direction */
1160 plchan
->runtime_direction
= config
->direction
;
1161 if (config
->direction
== DMA_TO_DEVICE
) {
1162 plchan
->runtime_addr
= config
->dst_addr
;
1163 cctl
|= PL080_CONTROL_SRC_INCR
;
1164 addr_width
= config
->dst_addr_width
;
1165 maxburst
= config
->dst_maxburst
;
1166 } else if (config
->direction
== DMA_FROM_DEVICE
) {
1167 plchan
->runtime_addr
= config
->src_addr
;
1168 cctl
|= PL080_CONTROL_DST_INCR
;
1169 addr_width
= config
->src_addr_width
;
1170 maxburst
= config
->src_maxburst
;
1172 dev_err(&pl08x
->adev
->dev
,
1173 "bad runtime_config: alien transfer direction\n");
1177 switch (addr_width
) {
1178 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1179 cctl
|= (PL080_WIDTH_8BIT
<< PL080_CONTROL_SWIDTH_SHIFT
) |
1180 (PL080_WIDTH_8BIT
<< PL080_CONTROL_DWIDTH_SHIFT
);
1182 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1183 cctl
|= (PL080_WIDTH_16BIT
<< PL080_CONTROL_SWIDTH_SHIFT
) |
1184 (PL080_WIDTH_16BIT
<< PL080_CONTROL_DWIDTH_SHIFT
);
1186 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1187 cctl
|= (PL080_WIDTH_32BIT
<< PL080_CONTROL_SWIDTH_SHIFT
) |
1188 (PL080_WIDTH_32BIT
<< PL080_CONTROL_DWIDTH_SHIFT
);
1191 dev_err(&pl08x
->adev
->dev
,
1192 "bad runtime_config: alien address width\n");
1197 * Now decide on a maxburst:
1198 * If this channel will only request single transfers, set this
1199 * down to ONE element. Also select one element if no maxburst
1202 if (plchan
->cd
->single
|| maxburst
== 0) {
1203 cctl
|= (PL080_BSIZE_1
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1204 (PL080_BSIZE_1
<< PL080_CONTROL_DB_SIZE_SHIFT
);
1206 for (i
= 0; i
< ARRAY_SIZE(burst_sizes
); i
++)
1207 if (burst_sizes
[i
].burstwords
<= maxburst
)
1209 cctl
|= burst_sizes
[i
].reg
;
1212 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1213 cctl
&= ~PL080_CONTROL_PROT_MASK
;
1214 cctl
|= PL080_CONTROL_PROT_SYS
;
1216 /* Modify the default channel data to fit PrimeCell request */
1219 dev_dbg(&pl08x
->adev
->dev
,
1220 "configured channel %s (%s) for %s, data width %d, "
1221 "maxburst %d words, LE, CCTL=0x%08x\n",
1222 dma_chan_name(chan
), plchan
->name
,
1223 (config
->direction
== DMA_FROM_DEVICE
) ? "RX" : "TX",
1230 * Slave transactions callback to the slave device to allow
1231 * synchronization of slave DMA signals with the DMAC enable
1233 static void pl08x_issue_pending(struct dma_chan
*chan
)
1235 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1236 unsigned long flags
;
1238 spin_lock_irqsave(&plchan
->lock
, flags
);
1239 /* Something is already active, or we're waiting for a channel... */
1240 if (plchan
->at
|| plchan
->state
== PL08X_CHAN_WAITING
) {
1241 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1245 /* Take the first element in the queue and execute it */
1246 if (!list_empty(&plchan
->desc_list
)) {
1247 struct pl08x_txd
*next
;
1249 next
= list_first_entry(&plchan
->desc_list
,
1252 list_del(&next
->node
);
1253 plchan
->state
= PL08X_CHAN_RUNNING
;
1255 pl08x_start_txd(plchan
, next
);
1258 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1261 static int pl08x_prep_channel_resources(struct pl08x_dma_chan
*plchan
,
1262 struct pl08x_txd
*txd
)
1265 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1268 num_llis
= pl08x_fill_llis_for_desc(pl08x
, txd
);
1274 spin_lock_irqsave(&plchan
->lock
, plchan
->lockflags
);
1276 list_add_tail(&txd
->node
, &plchan
->desc_list
);
1279 * See if we already have a physical channel allocated,
1280 * else this is the time to try to get one.
1282 ret
= prep_phy_channel(plchan
, txd
);
1285 * No physical channel available, we will
1286 * stack up the memcpy channels until there is a channel
1287 * available to handle it whereas slave transfers may
1288 * have been denied due to platform channel muxing restrictions
1289 * and since there is no guarantee that this will ever be
1290 * resolved, and since the signal must be acquired AFTER
1291 * acquiring the physical channel, we will let them be NACK:ed
1292 * with -EBUSY here. The drivers can alway retry the prep()
1293 * call if they are eager on doing this using DMA.
1295 if (plchan
->slave
) {
1296 pl08x_free_txd_list(pl08x
, plchan
);
1297 spin_unlock_irqrestore(&plchan
->lock
, plchan
->lockflags
);
1300 /* Do this memcpy whenever there is a channel ready */
1301 plchan
->state
= PL08X_CHAN_WAITING
;
1302 plchan
->waiting
= txd
;
1305 * Else we're all set, paused and ready to roll,
1306 * status will switch to PL08X_CHAN_RUNNING when
1307 * we call issue_pending(). If there is something
1308 * running on the channel already we don't change
1311 if (plchan
->state
== PL08X_CHAN_IDLE
)
1312 plchan
->state
= PL08X_CHAN_PAUSED
;
1315 * Notice that we leave plchan->lock locked on purpose:
1316 * it will be unlocked in the subsequent tx_submit()
1317 * call. This is a consequence of the current API.
1323 static struct pl08x_txd
*pl08x_get_txd(struct pl08x_dma_chan
*plchan
)
1325 struct pl08x_txd
*txd
= kzalloc(sizeof(struct pl08x_txd
), GFP_NOWAIT
);
1328 dma_async_tx_descriptor_init(&txd
->tx
, &plchan
->chan
);
1329 txd
->tx
.tx_submit
= pl08x_tx_submit
;
1330 INIT_LIST_HEAD(&txd
->node
);
1332 /* Always enable error and terminal interrupts */
1333 txd
->ccfg
= PL080_CONFIG_ERR_IRQ_MASK
|
1334 PL080_CONFIG_TC_IRQ_MASK
;
1340 * Initialize a descriptor to be used by memcpy submit
1342 static struct dma_async_tx_descriptor
*pl08x_prep_dma_memcpy(
1343 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1344 size_t len
, unsigned long flags
)
1346 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1347 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1348 struct pl08x_txd
*txd
;
1351 txd
= pl08x_get_txd(plchan
);
1353 dev_err(&pl08x
->adev
->dev
,
1354 "%s no memory for descriptor\n", __func__
);
1358 txd
->direction
= DMA_NONE
;
1359 txd
->srcbus
.addr
= src
;
1360 txd
->dstbus
.addr
= dest
;
1362 /* Set platform data for m2m */
1363 txd
->cd
= &pl08x
->pd
->memcpy_channel
;
1364 txd
->ccfg
|= PL080_FLOW_MEM2MEM
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1366 /* Both to be incremented or the code will break */
1367 txd
->cd
->cctl
|= PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
;
1370 ret
= pl08x_prep_channel_resources(plchan
, txd
);
1374 * NB: the channel lock is held at this point so tx_submit()
1375 * must be called in direct succession.
1381 static struct dma_async_tx_descriptor
*pl08x_prep_slave_sg(
1382 struct dma_chan
*chan
, struct scatterlist
*sgl
,
1383 unsigned int sg_len
, enum dma_data_direction direction
,
1384 unsigned long flags
)
1386 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1387 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1388 struct pl08x_txd
*txd
;
1392 * Current implementation ASSUMES only one sg
1395 dev_err(&pl08x
->adev
->dev
, "%s prepared too long sglist\n",
1400 dev_dbg(&pl08x
->adev
->dev
, "%s prepare transaction of %d bytes from %s\n",
1401 __func__
, sgl
->length
, plchan
->name
);
1403 txd
= pl08x_get_txd(plchan
);
1405 dev_err(&pl08x
->adev
->dev
, "%s no txd\n", __func__
);
1409 if (direction
!= plchan
->runtime_direction
)
1410 dev_err(&pl08x
->adev
->dev
, "%s DMA setup does not match "
1411 "the direction configured for the PrimeCell\n",
1415 * Set up addresses, the PrimeCell configured address
1416 * will take precedence since this may configure the
1417 * channel target address dynamically at runtime.
1419 txd
->direction
= direction
;
1420 if (direction
== DMA_TO_DEVICE
) {
1421 txd
->ccfg
|= PL080_FLOW_MEM2PER
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1422 txd
->srcbus
.addr
= sgl
->dma_address
;
1423 if (plchan
->runtime_addr
)
1424 txd
->dstbus
.addr
= plchan
->runtime_addr
;
1426 txd
->dstbus
.addr
= plchan
->cd
->addr
;
1427 } else if (direction
== DMA_FROM_DEVICE
) {
1428 txd
->ccfg
|= PL080_FLOW_PER2MEM
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1429 if (plchan
->runtime_addr
)
1430 txd
->srcbus
.addr
= plchan
->runtime_addr
;
1432 txd
->srcbus
.addr
= plchan
->cd
->addr
;
1433 txd
->dstbus
.addr
= sgl
->dma_address
;
1435 dev_err(&pl08x
->adev
->dev
,
1436 "%s direction unsupported\n", __func__
);
1439 txd
->cd
= plchan
->cd
;
1440 txd
->len
= sgl
->length
;
1442 ret
= pl08x_prep_channel_resources(plchan
, txd
);
1446 * NB: the channel lock is held at this point so tx_submit()
1447 * must be called in direct succession.
1453 static int pl08x_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1456 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1457 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1458 unsigned long flags
;
1461 /* Controls applicable to inactive channels */
1462 if (cmd
== DMA_SLAVE_CONFIG
) {
1463 dma_set_runtime_config(chan
,
1464 (struct dma_slave_config
*)
1470 * Anything succeeds on channels with no physical allocation and
1471 * no queued transfers.
1473 spin_lock_irqsave(&plchan
->lock
, flags
);
1474 if (!plchan
->phychan
&& !plchan
->at
) {
1475 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1480 case DMA_TERMINATE_ALL
:
1481 plchan
->state
= PL08X_CHAN_IDLE
;
1483 if (plchan
->phychan
) {
1484 pl08x_stop_phy_chan(plchan
->phychan
);
1487 * Mark physical channel as free and free any slave
1490 release_phy_channel(plchan
);
1492 /* Dequeue jobs and free LLIs */
1494 pl08x_free_txd(pl08x
, plchan
->at
);
1497 /* Dequeue jobs not yet fired as well */
1498 pl08x_free_txd_list(pl08x
, plchan
);
1501 pl08x_pause_phy_chan(plchan
->phychan
);
1502 plchan
->state
= PL08X_CHAN_PAUSED
;
1505 pl08x_resume_phy_chan(plchan
->phychan
);
1506 plchan
->state
= PL08X_CHAN_RUNNING
;
1509 /* Unknown command */
1514 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1519 bool pl08x_filter_id(struct dma_chan
*chan
, void *chan_id
)
1521 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1522 char *name
= chan_id
;
1524 /* Check that the channel is not taken! */
1525 if (!strcmp(plchan
->name
, name
))
1532 * Just check that the device is there and active
1533 * TODO: turn this bit on/off depending on the number of
1534 * physical channels actually used, if it is zero... well
1535 * shut it off. That will save some power. Cut the clock
1538 static void pl08x_ensure_on(struct pl08x_driver_data
*pl08x
)
1542 val
= readl(pl08x
->base
+ PL080_CONFIG
);
1543 val
&= ~(PL080_CONFIG_M2_BE
| PL080_CONFIG_M1_BE
| PL080_CONFIG_ENABLE
);
1544 /* We implicitly clear bit 1 and that means little-endian mode */
1545 val
|= PL080_CONFIG_ENABLE
;
1546 writel(val
, pl08x
->base
+ PL080_CONFIG
);
1549 static void pl08x_tasklet(unsigned long data
)
1551 struct pl08x_dma_chan
*plchan
= (struct pl08x_dma_chan
*) data
;
1552 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1553 unsigned long flags
;
1555 spin_lock_irqsave(&plchan
->lock
, flags
);
1558 dma_async_tx_callback callback
=
1559 plchan
->at
->tx
.callback
;
1560 void *callback_param
=
1561 plchan
->at
->tx
.callback_param
;
1564 * Update last completed
1566 plchan
->lc
= plchan
->at
->tx
.cookie
;
1569 * Callback to signal completion
1572 callback(callback_param
);
1575 * Free the descriptor
1577 pl08x_free_txd(pl08x
, plchan
->at
);
1581 * If a new descriptor is queued, set it up
1582 * plchan->at is NULL here
1584 if (!list_empty(&plchan
->desc_list
)) {
1585 struct pl08x_txd
*next
;
1587 next
= list_first_entry(&plchan
->desc_list
,
1590 list_del(&next
->node
);
1592 pl08x_start_txd(plchan
, next
);
1594 struct pl08x_dma_chan
*waiting
= NULL
;
1597 * No more jobs, so free up the physical channel
1598 * Free any allocated signal on slave transfers too
1600 release_phy_channel(plchan
);
1601 plchan
->state
= PL08X_CHAN_IDLE
;
1604 * And NOW before anyone else can grab that free:d
1605 * up physical channel, see if there is some memcpy
1606 * pending that seriously needs to start because of
1607 * being stacked up while we were choking the
1608 * physical channels with data.
1610 list_for_each_entry(waiting
, &pl08x
->memcpy
.channels
,
1612 if (waiting
->state
== PL08X_CHAN_WAITING
&&
1613 waiting
->waiting
!= NULL
) {
1616 /* This should REALLY not fail now */
1617 ret
= prep_phy_channel(waiting
,
1620 waiting
->state
= PL08X_CHAN_RUNNING
;
1621 waiting
->waiting
= NULL
;
1622 pl08x_issue_pending(&waiting
->chan
);
1628 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1631 static irqreturn_t
pl08x_irq(int irq
, void *dev
)
1633 struct pl08x_driver_data
*pl08x
= dev
;
1638 val
= readl(pl08x
->base
+ PL080_ERR_STATUS
);
1641 * An error interrupt (on one or more channels)
1643 dev_err(&pl08x
->adev
->dev
,
1644 "%s error interrupt, register value 0x%08x\n",
1647 * Simply clear ALL PL08X error interrupts,
1648 * regardless of channel and cause
1649 * FIXME: should be 0x00000003 on PL081 really.
1651 writel(0x000000FF, pl08x
->base
+ PL080_ERR_CLEAR
);
1653 val
= readl(pl08x
->base
+ PL080_INT_STATUS
);
1654 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
1655 if ((1 << i
) & val
) {
1656 /* Locate physical channel */
1657 struct pl08x_phy_chan
*phychan
= &pl08x
->phy_chans
[i
];
1658 struct pl08x_dma_chan
*plchan
= phychan
->serving
;
1660 /* Schedule tasklet on this channel */
1661 tasklet_schedule(&plchan
->tasklet
);
1667 * Clear only the terminal interrupts on channels we processed
1669 writel(mask
, pl08x
->base
+ PL080_TC_CLEAR
);
1671 return mask
? IRQ_HANDLED
: IRQ_NONE
;
1675 * Initialise the DMAC memcpy/slave channels.
1676 * Make a local wrapper to hold required data
1678 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data
*pl08x
,
1679 struct dma_device
*dmadev
,
1680 unsigned int channels
,
1683 struct pl08x_dma_chan
*chan
;
1686 INIT_LIST_HEAD(&dmadev
->channels
);
1688 * Register as many many memcpy as we have physical channels,
1689 * we won't always be able to use all but the code will have
1690 * to cope with that situation.
1692 for (i
= 0; i
< channels
; i
++) {
1693 chan
= kzalloc(sizeof(struct pl08x_dma_chan
), GFP_KERNEL
);
1695 dev_err(&pl08x
->adev
->dev
,
1696 "%s no memory for channel\n", __func__
);
1701 chan
->state
= PL08X_CHAN_IDLE
;
1705 chan
->name
= pl08x
->pd
->slave_channels
[i
].bus_id
;
1706 chan
->cd
= &pl08x
->pd
->slave_channels
[i
];
1708 chan
->cd
= &pl08x
->pd
->memcpy_channel
;
1709 chan
->name
= kasprintf(GFP_KERNEL
, "memcpy%d", i
);
1715 if (chan
->cd
->circular_buffer
) {
1716 dev_err(&pl08x
->adev
->dev
,
1717 "channel %s: circular buffers not supported\n",
1722 dev_info(&pl08x
->adev
->dev
,
1723 "initialize virtual channel \"%s\"\n",
1726 chan
->chan
.device
= dmadev
;
1727 chan
->chan
.cookie
= 0;
1730 spin_lock_init(&chan
->lock
);
1731 INIT_LIST_HEAD(&chan
->desc_list
);
1732 tasklet_init(&chan
->tasklet
, pl08x_tasklet
,
1733 (unsigned long) chan
);
1735 list_add_tail(&chan
->chan
.device_node
, &dmadev
->channels
);
1737 dev_info(&pl08x
->adev
->dev
, "initialized %d virtual %s channels\n",
1738 i
, slave
? "slave" : "memcpy");
1742 static void pl08x_free_virtual_channels(struct dma_device
*dmadev
)
1744 struct pl08x_dma_chan
*chan
= NULL
;
1745 struct pl08x_dma_chan
*next
;
1747 list_for_each_entry_safe(chan
,
1748 next
, &dmadev
->channels
, chan
.device_node
) {
1749 list_del(&chan
->chan
.device_node
);
1754 #ifdef CONFIG_DEBUG_FS
1755 static const char *pl08x_state_str(enum pl08x_dma_chan_state state
)
1758 case PL08X_CHAN_IDLE
:
1760 case PL08X_CHAN_RUNNING
:
1762 case PL08X_CHAN_PAUSED
:
1764 case PL08X_CHAN_WAITING
:
1769 return "UNKNOWN STATE";
1772 static int pl08x_debugfs_show(struct seq_file
*s
, void *data
)
1774 struct pl08x_driver_data
*pl08x
= s
->private;
1775 struct pl08x_dma_chan
*chan
;
1776 struct pl08x_phy_chan
*ch
;
1777 unsigned long flags
;
1780 seq_printf(s
, "PL08x physical channels:\n");
1781 seq_printf(s
, "CHANNEL:\tUSER:\n");
1782 seq_printf(s
, "--------\t-----\n");
1783 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
1784 struct pl08x_dma_chan
*virt_chan
;
1786 ch
= &pl08x
->phy_chans
[i
];
1788 spin_lock_irqsave(&ch
->lock
, flags
);
1789 virt_chan
= ch
->serving
;
1791 seq_printf(s
, "%d\t\t%s\n",
1792 ch
->id
, virt_chan
? virt_chan
->name
: "(none)");
1794 spin_unlock_irqrestore(&ch
->lock
, flags
);
1797 seq_printf(s
, "\nPL08x virtual memcpy channels:\n");
1798 seq_printf(s
, "CHANNEL:\tSTATE:\n");
1799 seq_printf(s
, "--------\t------\n");
1800 list_for_each_entry(chan
, &pl08x
->memcpy
.channels
, chan
.device_node
) {
1801 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
1802 pl08x_state_str(chan
->state
));
1805 seq_printf(s
, "\nPL08x virtual slave channels:\n");
1806 seq_printf(s
, "CHANNEL:\tSTATE:\n");
1807 seq_printf(s
, "--------\t------\n");
1808 list_for_each_entry(chan
, &pl08x
->slave
.channels
, chan
.device_node
) {
1809 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
1810 pl08x_state_str(chan
->state
));
1816 static int pl08x_debugfs_open(struct inode
*inode
, struct file
*file
)
1818 return single_open(file
, pl08x_debugfs_show
, inode
->i_private
);
1821 static const struct file_operations pl08x_debugfs_operations
= {
1822 .open
= pl08x_debugfs_open
,
1824 .llseek
= seq_lseek
,
1825 .release
= single_release
,
1828 static void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
1830 /* Expose a simple debugfs interface to view all clocks */
1831 (void) debugfs_create_file(dev_name(&pl08x
->adev
->dev
), S_IFREG
| S_IRUGO
,
1833 &pl08x_debugfs_operations
);
1837 static inline void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
1842 static int pl08x_probe(struct amba_device
*adev
, struct amba_id
*id
)
1844 struct pl08x_driver_data
*pl08x
;
1845 const struct vendor_data
*vd
= id
->data
;
1849 ret
= amba_request_regions(adev
, NULL
);
1853 /* Create the driver state holder */
1854 pl08x
= kzalloc(sizeof(struct pl08x_driver_data
), GFP_KERNEL
);
1860 /* Initialize memcpy engine */
1861 dma_cap_set(DMA_MEMCPY
, pl08x
->memcpy
.cap_mask
);
1862 pl08x
->memcpy
.dev
= &adev
->dev
;
1863 pl08x
->memcpy
.device_alloc_chan_resources
= pl08x_alloc_chan_resources
;
1864 pl08x
->memcpy
.device_free_chan_resources
= pl08x_free_chan_resources
;
1865 pl08x
->memcpy
.device_prep_dma_memcpy
= pl08x_prep_dma_memcpy
;
1866 pl08x
->memcpy
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
1867 pl08x
->memcpy
.device_tx_status
= pl08x_dma_tx_status
;
1868 pl08x
->memcpy
.device_issue_pending
= pl08x_issue_pending
;
1869 pl08x
->memcpy
.device_control
= pl08x_control
;
1871 /* Initialize slave engine */
1872 dma_cap_set(DMA_SLAVE
, pl08x
->slave
.cap_mask
);
1873 pl08x
->slave
.dev
= &adev
->dev
;
1874 pl08x
->slave
.device_alloc_chan_resources
= pl08x_alloc_chan_resources
;
1875 pl08x
->slave
.device_free_chan_resources
= pl08x_free_chan_resources
;
1876 pl08x
->slave
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
1877 pl08x
->slave
.device_tx_status
= pl08x_dma_tx_status
;
1878 pl08x
->slave
.device_issue_pending
= pl08x_issue_pending
;
1879 pl08x
->slave
.device_prep_slave_sg
= pl08x_prep_slave_sg
;
1880 pl08x
->slave
.device_control
= pl08x_control
;
1882 /* Get the platform data */
1883 pl08x
->pd
= dev_get_platdata(&adev
->dev
);
1885 dev_err(&adev
->dev
, "no platform data supplied\n");
1886 goto out_no_platdata
;
1889 /* Assign useful pointers to the driver state */
1893 /* A DMA memory pool for LLIs, align on 1-byte boundary */
1894 pl08x
->pool
= dma_pool_create(DRIVER_NAME
, &pl08x
->adev
->dev
,
1895 PL08X_LLI_TSFR_SIZE
, PL08X_ALIGN
, 0);
1898 goto out_no_lli_pool
;
1901 spin_lock_init(&pl08x
->lock
);
1903 pl08x
->base
= ioremap(adev
->res
.start
, resource_size(&adev
->res
));
1906 goto out_no_ioremap
;
1909 /* Turn on the PL08x */
1910 pl08x_ensure_on(pl08x
);
1913 * Attach the interrupt handler
1915 writel(0x000000FF, pl08x
->base
+ PL080_ERR_CLEAR
);
1916 writel(0x000000FF, pl08x
->base
+ PL080_TC_CLEAR
);
1918 ret
= request_irq(adev
->irq
[0], pl08x_irq
, IRQF_DISABLED
,
1919 DRIVER_NAME
, pl08x
);
1921 dev_err(&adev
->dev
, "%s failed to request interrupt %d\n",
1922 __func__
, adev
->irq
[0]);
1926 /* Initialize physical channels */
1927 pl08x
->phy_chans
= kmalloc((vd
->channels
* sizeof(struct pl08x_phy_chan
)),
1929 if (!pl08x
->phy_chans
) {
1930 dev_err(&adev
->dev
, "%s failed to allocate "
1931 "physical channel holders\n",
1933 goto out_no_phychans
;
1936 for (i
= 0; i
< vd
->channels
; i
++) {
1937 struct pl08x_phy_chan
*ch
= &pl08x
->phy_chans
[i
];
1940 ch
->base
= pl08x
->base
+ PL080_Cx_BASE(i
);
1941 spin_lock_init(&ch
->lock
);
1944 dev_info(&adev
->dev
,
1945 "physical channel %d is %s\n", i
,
1946 pl08x_phy_channel_busy(ch
) ? "BUSY" : "FREE");
1949 /* Register as many memcpy channels as there are physical channels */
1950 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->memcpy
,
1951 pl08x
->vd
->channels
, false);
1953 dev_warn(&pl08x
->adev
->dev
,
1954 "%s failed to enumerate memcpy channels - %d\n",
1958 pl08x
->memcpy
.chancnt
= ret
;
1960 /* Register slave channels */
1961 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->slave
,
1962 pl08x
->pd
->num_slave_channels
,
1965 dev_warn(&pl08x
->adev
->dev
,
1966 "%s failed to enumerate slave channels - %d\n",
1970 pl08x
->slave
.chancnt
= ret
;
1972 ret
= dma_async_device_register(&pl08x
->memcpy
);
1974 dev_warn(&pl08x
->adev
->dev
,
1975 "%s failed to register memcpy as an async device - %d\n",
1977 goto out_no_memcpy_reg
;
1980 ret
= dma_async_device_register(&pl08x
->slave
);
1982 dev_warn(&pl08x
->adev
->dev
,
1983 "%s failed to register slave as an async device - %d\n",
1985 goto out_no_slave_reg
;
1988 amba_set_drvdata(adev
, pl08x
);
1989 init_pl08x_debugfs(pl08x
);
1990 dev_info(&pl08x
->adev
->dev
, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
1991 amba_part(adev
), amba_rev(adev
),
1992 (unsigned long long)adev
->res
.start
, adev
->irq
[0]);
1996 dma_async_device_unregister(&pl08x
->memcpy
);
1998 pl08x_free_virtual_channels(&pl08x
->slave
);
2000 pl08x_free_virtual_channels(&pl08x
->memcpy
);
2002 kfree(pl08x
->phy_chans
);
2004 free_irq(adev
->irq
[0], pl08x
);
2006 iounmap(pl08x
->base
);
2008 dma_pool_destroy(pl08x
->pool
);
2013 amba_release_regions(adev
);
2017 /* PL080 has 8 channels and the PL080 have just 2 */
2018 static struct vendor_data vendor_pl080
= {
2023 static struct vendor_data vendor_pl081
= {
2025 .dualmaster
= false,
2028 static struct amba_id pl08x_ids
[] = {
2033 .data
= &vendor_pl080
,
2039 .data
= &vendor_pl081
,
2041 /* Nomadik 8815 PL080 variant */
2045 .data
= &vendor_pl080
,
2050 static struct amba_driver pl08x_amba_driver
= {
2051 .drv
.name
= DRIVER_NAME
,
2052 .id_table
= pl08x_ids
,
2053 .probe
= pl08x_probe
,
2056 static int __init
pl08x_init(void)
2059 retval
= amba_driver_register(&pl08x_amba_driver
);
2061 printk(KERN_WARNING DRIVER_NAME
2062 "failed to register as an AMBA device (%d)\n",
2066 subsys_initcall(pl08x_init
);