drm/i915: ILK cdclk seems to be 450MHz
[deliverable/linux.git] / drivers / dma / amba-pl08x.c
CommitLineData
e8689e63
LW
1/*
2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
4 *
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
94ae8522
RKAL
22 * The full GNU General Public License is in this distribution in the file
23 * called COPYING.
e8689e63
LW
24 *
25 * Documentation: ARM DDI 0196G == PL080
94ae8522 26 * Documentation: ARM DDI 0218E == PL081
da1b6c05 27 * Documentation: S3C6410 User's Manual == PL080S
e8689e63 28 *
94ae8522
RKAL
29 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
30 * channel.
e8689e63
LW
31 *
32 * The PL080 has 8 channels available for simultaneous use, and the PL081
33 * has only two channels. So on these DMA controllers the number of channels
34 * and the number of incoming DMA signals are two totally different things.
35 * It is usually not possible to theoretically handle all physical signals,
36 * so a multiplexing scheme with possible denial of use is necessary.
37 *
38 * The PL080 has a dual bus master, PL081 has a single master.
39 *
da1b6c05
TF
40 * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
41 * It differs in following aspects:
42 * - CH_CONFIG register at different offset,
43 * - separate CH_CONTROL2 register for transfer size,
44 * - bigger maximum transfer size,
45 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
46 * - no support for peripheral flow control.
47 *
e8689e63
LW
48 * Memory to peripheral transfer may be visualized as
49 * Get data from memory to DMAC
50 * Until no data left
51 * On burst request from peripheral
52 * Destination burst from DMAC to peripheral
53 * Clear burst request
54 * Raise terminal count interrupt
55 *
56 * For peripherals with a FIFO:
57 * Source burst size == half the depth of the peripheral FIFO
58 * Destination burst size == the depth of the peripheral FIFO
59 *
60 * (Bursts are irrelevant for mem to mem transfers - there are no burst
61 * signals, the DMA controller will simply facilitate its AHB master.)
62 *
63 * ASSUMES default (little) endianness for DMA transfers
64 *
9dc2c200
RKAL
65 * The PL08x has two flow control settings:
66 * - DMAC flow control: the transfer size defines the number of transfers
67 * which occur for the current LLI entry, and the DMAC raises TC at the
68 * end of every LLI entry. Observed behaviour shows the DMAC listening
69 * to both the BREQ and SREQ signals (contrary to documented),
70 * transferring data if either is active. The LBREQ and LSREQ signals
71 * are ignored.
72 *
73 * - Peripheral flow control: the transfer size is ignored (and should be
74 * zero). The data is transferred from the current LLI entry, until
75 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
da1b6c05 76 * will then move to the next LLI entry. Unsupported by PL080S.
e8689e63 77 */
730404ac 78#include <linux/amba/bus.h>
e8689e63
LW
79#include <linux/amba/pl08x.h>
80#include <linux/debugfs.h>
0c38d701
VK
81#include <linux/delay.h>
82#include <linux/device.h>
83#include <linux/dmaengine.h>
84#include <linux/dmapool.h>
8516f52f 85#include <linux/dma-mapping.h>
6d05c9fa 86#include <linux/export.h>
0c38d701
VK
87#include <linux/init.h>
88#include <linux/interrupt.h>
89#include <linux/module.h>
b7b6018b 90#include <linux/pm_runtime.h>
e8689e63 91#include <linux/seq_file.h>
0c38d701 92#include <linux/slab.h>
3a95b9fb 93#include <linux/amba/pl080.h>
e8689e63 94
d2ebfb33 95#include "dmaengine.h"
01d8dc64 96#include "virt-dma.h"
d2ebfb33 97
e8689e63
LW
98#define DRIVER_NAME "pl08xdmac"
99
ea524c7e
MB
100#define PL80X_DMA_BUSWIDTHS \
101 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
102 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
103 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
104 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
105
7703eac9 106static struct amba_driver pl08x_amba_driver;
b23f204c 107struct pl08x_driver_data;
7703eac9 108
e8689e63 109/**
94ae8522 110 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
e8689e63 111 * @channels: the number of channels available in this variant
94ae8522 112 * @dualmaster: whether this version supports dual AHB masters or not.
affa115e
LW
113 * @nomadik: whether the channels have Nomadik security extension bits
114 * that need to be checked for permission before use and some registers are
115 * missing
da1b6c05
TF
116 * @pl080s: whether this version is a PL080S, which has separate register and
117 * LLI word for transfer size.
e8689e63
LW
118 */
119struct vendor_data {
d86ccea7 120 u8 config_offset;
e8689e63
LW
121 u8 channels;
122 bool dualmaster;
affa115e 123 bool nomadik;
da1b6c05 124 bool pl080s;
5110e51d 125 u32 max_transfer_size;
e8689e63
LW
126};
127
b23f204c
RK
128/**
129 * struct pl08x_bus_data - information of source or destination
130 * busses for a transfer
131 * @addr: current address
132 * @maxwidth: the maximum width of a transfer on this bus
133 * @buswidth: the width of this bus in bytes: 1, 2 or 4
134 */
135struct pl08x_bus_data {
136 dma_addr_t addr;
137 u8 maxwidth;
138 u8 buswidth;
139};
140
1c38b289
AP
141#define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
142
b23f204c
RK
143/**
144 * struct pl08x_phy_chan - holder for the physical channels
145 * @id: physical index to this channel
146 * @lock: a lock to use when altering an instance of this struct
b23f204c
RK
147 * @serving: the virtual channel currently being served by this physical
148 * channel
ad0de2ac
RK
149 * @locked: channel unavailable for the system, e.g. dedicated to secure
150 * world
b23f204c
RK
151 */
152struct pl08x_phy_chan {
153 unsigned int id;
154 void __iomem *base;
d86ccea7 155 void __iomem *reg_config;
b23f204c 156 spinlock_t lock;
b23f204c 157 struct pl08x_dma_chan *serving;
ad0de2ac 158 bool locked;
b23f204c
RK
159};
160
161/**
162 * struct pl08x_sg - structure containing data per sg
163 * @src_addr: src address of sg
164 * @dst_addr: dst address of sg
165 * @len: transfer len in bytes
166 * @node: node for txd's dsg_list
167 */
168struct pl08x_sg {
169 dma_addr_t src_addr;
170 dma_addr_t dst_addr;
171 size_t len;
172 struct list_head node;
173};
174
175/**
176 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
01d8dc64 177 * @vd: virtual DMA descriptor
b23f204c 178 * @dsg_list: list of children sg's
b23f204c
RK
179 * @llis_bus: DMA memory address (physical) start for the LLIs
180 * @llis_va: virtual memory address start for the LLIs
181 * @cctl: control reg values for current txd
182 * @ccfg: config reg values for current txd
18536134
RK
183 * @done: this marks completed descriptors, which should not have their
184 * mux released.
3b24c20b 185 * @cyclic: indicate cyclic transfers
b23f204c
RK
186 */
187struct pl08x_txd {
01d8dc64 188 struct virt_dma_desc vd;
b23f204c 189 struct list_head dsg_list;
b23f204c 190 dma_addr_t llis_bus;
ba6785ff 191 u32 *llis_va;
b23f204c
RK
192 /* Default cctl value for LLIs */
193 u32 cctl;
194 /*
195 * Settings to be put into the physical channel when we
196 * trigger this txd. Other registers are in llis_va[0].
197 */
198 u32 ccfg;
18536134 199 bool done;
3b24c20b 200 bool cyclic;
b23f204c
RK
201};
202
203/**
204 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
205 * states
206 * @PL08X_CHAN_IDLE: the channel is idle
207 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
208 * channel and is running a transfer on it
209 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
210 * channel, but the transfer is currently paused
211 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
212 * channel to become available (only pertains to memcpy channels)
213 */
214enum pl08x_dma_chan_state {
215 PL08X_CHAN_IDLE,
216 PL08X_CHAN_RUNNING,
217 PL08X_CHAN_PAUSED,
218 PL08X_CHAN_WAITING,
219};
220
221/**
222 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
01d8dc64 223 * @vc: wrappped virtual channel
b23f204c 224 * @phychan: the physical channel utilized by this channel, if there is one
b23f204c
RK
225 * @name: name of channel
226 * @cd: channel platform data
227 * @runtime_addr: address for RX/TX according to the runtime config
b23f204c
RK
228 * @at: active transaction on this channel
229 * @lock: a lock for this channel data
230 * @host: a pointer to the host (internal use)
231 * @state: whether the channel is idle, paused, running etc
232 * @slave: whether this channel is a device (slave) or for memcpy
ad0de2ac 233 * @signal: the physical DMA request signal which this channel is using
5e2479bd 234 * @mux_use: count of descriptors using this DMA request signal setting
b23f204c
RK
235 */
236struct pl08x_dma_chan {
01d8dc64 237 struct virt_dma_chan vc;
b23f204c 238 struct pl08x_phy_chan *phychan;
550ec36f 239 const char *name;
b23f204c 240 const struct pl08x_channel_data *cd;
ed91c13d 241 struct dma_slave_config cfg;
b23f204c 242 struct pl08x_txd *at;
b23f204c
RK
243 struct pl08x_driver_data *host;
244 enum pl08x_dma_chan_state state;
245 bool slave;
ad0de2ac 246 int signal;
5e2479bd 247 unsigned mux_use;
b23f204c
RK
248};
249
e8689e63
LW
250/**
251 * struct pl08x_driver_data - the local state holder for the PL08x
252 * @slave: slave engine for this instance
253 * @memcpy: memcpy engine for this instance
254 * @base: virtual memory base (remapped) for the PL08x
255 * @adev: the corresponding AMBA (PrimeCell) bus entry
256 * @vd: vendor data for this PL08x variant
257 * @pd: platform data passed in from the platform/machine
258 * @phy_chans: array of data for the physical channels
259 * @pool: a pool for the LLI descriptors
3e27ee84
VK
260 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
261 * fetches
30749cb4 262 * @mem_buses: set to indicate memory transfers on AHB2.
e8689e63
LW
263 * @lock: a spinlock for this struct
264 */
265struct pl08x_driver_data {
266 struct dma_device slave;
267 struct dma_device memcpy;
268 void __iomem *base;
269 struct amba_device *adev;
f96ca9ec 270 const struct vendor_data *vd;
e8689e63
LW
271 struct pl08x_platform_data *pd;
272 struct pl08x_phy_chan *phy_chans;
273 struct dma_pool *pool;
30749cb4
RKAL
274 u8 lli_buses;
275 u8 mem_buses;
ba6785ff 276 u8 lli_words;
e8689e63
LW
277};
278
279/*
280 * PL08X specific defines
281 */
282
ba6785ff
TF
283/* The order of words in an LLI. */
284#define PL080_LLI_SRC 0
285#define PL080_LLI_DST 1
286#define PL080_LLI_LLI 2
287#define PL080_LLI_CCTL 3
da1b6c05 288#define PL080S_LLI_CCTL2 4
ba6785ff
TF
289
290/* Total words in an LLI. */
291#define PL080_LLI_WORDS 4
da1b6c05 292#define PL080S_LLI_WORDS 8
e8689e63 293
ba6785ff
TF
294/*
295 * Number of LLIs in each LLI buffer allocated for one transfer
296 * (maximum times we call dma_pool_alloc on this pool without freeing)
297 */
298#define MAX_NUM_TSFR_LLIS 512
e8689e63
LW
299#define PL08X_ALIGN 8
300
301static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
302{
01d8dc64 303 return container_of(chan, struct pl08x_dma_chan, vc.chan);
e8689e63
LW
304}
305
501e67e8
RKAL
306static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
307{
01d8dc64 308 return container_of(tx, struct pl08x_txd, vd.tx);
501e67e8
RKAL
309}
310
6b16c8b1
RK
311/*
312 * Mux handling.
313 *
314 * This gives us the DMA request input to the PL08x primecell which the
315 * peripheral described by the channel data will be routed to, possibly
316 * via a board/SoC specific external MUX. One important point to note
317 * here is that this does not depend on the physical channel.
318 */
ad0de2ac 319static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
6b16c8b1
RK
320{
321 const struct pl08x_platform_data *pd = plchan->host->pd;
322 int ret;
323
d7cabeed
MB
324 if (plchan->mux_use++ == 0 && pd->get_xfer_signal) {
325 ret = pd->get_xfer_signal(plchan->cd);
5e2479bd
RK
326 if (ret < 0) {
327 plchan->mux_use = 0;
6b16c8b1 328 return ret;
5e2479bd 329 }
6b16c8b1 330
ad0de2ac 331 plchan->signal = ret;
6b16c8b1
RK
332 }
333 return 0;
334}
335
336static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
337{
338 const struct pl08x_platform_data *pd = plchan->host->pd;
339
5e2479bd
RK
340 if (plchan->signal >= 0) {
341 WARN_ON(plchan->mux_use == 0);
342
d7cabeed
MB
343 if (--plchan->mux_use == 0 && pd->put_xfer_signal) {
344 pd->put_xfer_signal(plchan->cd, plchan->signal);
5e2479bd
RK
345 plchan->signal = -1;
346 }
6b16c8b1
RK
347 }
348}
349
e8689e63
LW
350/*
351 * Physical channel handling
352 */
353
354/* Whether a certain channel is busy or not */
355static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
356{
357 unsigned int val;
358
d86ccea7 359 val = readl(ch->reg_config);
e8689e63
LW
360 return val & PL080_CONFIG_ACTIVE;
361}
362
ba6785ff
TF
363static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
364 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
365{
da1b6c05
TF
366 if (pl08x->vd->pl080s)
367 dev_vdbg(&pl08x->adev->dev,
368 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
369 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
370 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
371 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
372 lli[PL080S_LLI_CCTL2], ccfg);
373 else
374 dev_vdbg(&pl08x->adev->dev,
375 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
376 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
377 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
378 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
ba6785ff
TF
379
380 writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
381 writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
382 writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
383 writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
384
da1b6c05
TF
385 if (pl08x->vd->pl080s)
386 writel_relaxed(lli[PL080S_LLI_CCTL2],
387 phychan->base + PL080S_CH_CONTROL2);
388
ba6785ff
TF
389 writel(ccfg, phychan->reg_config);
390}
391
e8689e63
LW
392/*
393 * Set the initial DMA register values i.e. those for the first LLI
e8b5e11d 394 * The next LLI pointer and the configuration interrupt bit have
c885bee4
RKAL
395 * been set when the LLIs were constructed. Poke them into the hardware
396 * and start the transfer.
e8689e63 397 */
eab82533 398static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
e8689e63 399{
c885bee4 400 struct pl08x_driver_data *pl08x = plchan->host;
e8689e63 401 struct pl08x_phy_chan *phychan = plchan->phychan;
879f127b
RK
402 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
403 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
09b3c323 404 u32 val;
c885bee4 405
879f127b 406 list_del(&txd->vd.node);
eab82533 407
c885bee4 408 plchan->at = txd;
e8689e63 409
c885bee4
RKAL
410 /* Wait for channel inactive */
411 while (pl08x_phy_channel_busy(phychan))
412 cpu_relax();
e8689e63 413
ba6785ff 414 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
c885bee4
RKAL
415
416 /* Enable the DMA channel */
417 /* Do not access config register until channel shows as disabled */
418 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
19386b32 419 cpu_relax();
e8689e63 420
c885bee4 421 /* Do not access config register until channel shows as inactive */
d86ccea7 422 val = readl(phychan->reg_config);
e8689e63 423 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
d86ccea7 424 val = readl(phychan->reg_config);
e8689e63 425
d86ccea7 426 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
e8689e63
LW
427}
428
429/*
81796616 430 * Pause the channel by setting the HALT bit.
e8689e63 431 *
81796616
RKAL
432 * For M->P transfers, pause the DMAC first and then stop the peripheral -
433 * the FIFO can only drain if the peripheral is still requesting data.
434 * (note: this can still timeout if the DMAC FIFO never drains of data.)
e8689e63 435 *
81796616
RKAL
436 * For P->M transfers, disable the peripheral first to stop it filling
437 * the DMAC FIFO, and then pause the DMAC.
e8689e63
LW
438 */
439static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
440{
441 u32 val;
81796616 442 int timeout;
e8689e63
LW
443
444 /* Set the HALT bit and wait for the FIFO to drain */
d86ccea7 445 val = readl(ch->reg_config);
e8689e63 446 val |= PL080_CONFIG_HALT;
d86ccea7 447 writel(val, ch->reg_config);
e8689e63
LW
448
449 /* Wait for channel inactive */
81796616
RKAL
450 for (timeout = 1000; timeout; timeout--) {
451 if (!pl08x_phy_channel_busy(ch))
452 break;
453 udelay(1);
454 }
455 if (pl08x_phy_channel_busy(ch))
456 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
e8689e63
LW
457}
458
459static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
460{
461 u32 val;
462
463 /* Clear the HALT bit */
d86ccea7 464 val = readl(ch->reg_config);
e8689e63 465 val &= ~PL080_CONFIG_HALT;
d86ccea7 466 writel(val, ch->reg_config);
e8689e63
LW
467}
468
fb526210
RKAL
469/*
470 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
471 * clears any pending interrupt status. This should not be used for
472 * an on-going transfer, but as a method of shutting down a channel
473 * (eg, when it's no longer used) or terminating a transfer.
474 */
475static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
476 struct pl08x_phy_chan *ch)
e8689e63 477{
d86ccea7 478 u32 val = readl(ch->reg_config);
e8689e63 479
fb526210
RKAL
480 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
481 PL080_CONFIG_TC_IRQ_MASK);
e8689e63 482
d86ccea7 483 writel(val, ch->reg_config);
fb526210
RKAL
484
485 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
486 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
e8689e63
LW
487}
488
489static inline u32 get_bytes_in_cctl(u32 cctl)
490{
491 /* The source width defines the number of bytes */
492 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
493
f3287a52
AB
494 cctl &= PL080_CONTROL_SWIDTH_MASK;
495
e8689e63
LW
496 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
497 case PL080_WIDTH_8BIT:
498 break;
499 case PL080_WIDTH_16BIT:
500 bytes *= 2;
501 break;
502 case PL080_WIDTH_32BIT:
503 bytes *= 4;
504 break;
505 }
506 return bytes;
507}
508
da1b6c05
TF
509static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
510{
511 /* The source width defines the number of bytes */
512 u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
513
f3287a52
AB
514 cctl &= PL080_CONTROL_SWIDTH_MASK;
515
e8689e63
LW
516 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
517 case PL080_WIDTH_8BIT:
518 break;
519 case PL080_WIDTH_16BIT:
520 bytes *= 2;
521 break;
522 case PL080_WIDTH_32BIT:
523 bytes *= 4;
524 break;
525 }
526 return bytes;
527}
528
529/* The channel should be paused when calling this */
530static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
531{
ba6785ff
TF
532 struct pl08x_driver_data *pl08x = plchan->host;
533 const u32 *llis_va, *llis_va_limit;
e8689e63 534 struct pl08x_phy_chan *ch;
68a7faa2 535 dma_addr_t llis_bus;
e8689e63 536 struct pl08x_txd *txd;
ba6785ff 537 u32 llis_max_words;
68a7faa2 538 size_t bytes;
68a7faa2 539 u32 clli;
e8689e63 540
e8689e63
LW
541 ch = plchan->phychan;
542 txd = plchan->at;
543
68a7faa2
TF
544 if (!ch || !txd)
545 return 0;
546
e8689e63 547 /*
db9f136a
RKAL
548 * Follow the LLIs to get the number of remaining
549 * bytes in the currently active transaction.
e8689e63 550 */
68a7faa2 551 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
e8689e63 552
68a7faa2 553 /* First get the remaining bytes in the active transfer */
da1b6c05
TF
554 if (pl08x->vd->pl080s)
555 bytes = get_bytes_in_cctl_pl080s(
556 readl(ch->base + PL080_CH_CONTROL),
557 readl(ch->base + PL080S_CH_CONTROL2));
558 else
e8689e63
LW
559 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
560
68a7faa2
TF
561 if (!clli)
562 return bytes;
db9f136a 563
68a7faa2
TF
564 llis_va = txd->llis_va;
565 llis_bus = txd->llis_bus;
e8689e63 566
ba6785ff 567 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
68a7faa2 568 BUG_ON(clli < llis_bus || clli >= llis_bus +
ba6785ff 569 sizeof(u32) * llis_max_words);
db9f136a 570
68a7faa2
TF
571 /*
572 * Locate the next LLI - as this is an array,
573 * it's simple maths to find.
574 */
ba6785ff 575 llis_va += (clli - llis_bus) / sizeof(u32);
e8689e63 576
ba6785ff
TF
577 llis_va_limit = llis_va + llis_max_words;
578
579 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
da1b6c05
TF
580 if (pl08x->vd->pl080s)
581 bytes += get_bytes_in_cctl_pl080s(
582 llis_va[PL080_LLI_CCTL],
583 llis_va[PL080S_LLI_CCTL2]);
584 else
585 bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
68a7faa2
TF
586
587 /*
3b24c20b 588 * A LLI pointer going backward terminates the LLI list
68a7faa2 589 */
3b24c20b 590 if (llis_va[PL080_LLI_LLI] <= clli)
68a7faa2 591 break;
e8689e63
LW
592 }
593
e8689e63
LW
594 return bytes;
595}
596
597/*
598 * Allocate a physical channel for a virtual channel
94ae8522
RKAL
599 *
600 * Try to locate a physical channel to be used for this transfer. If all
601 * are taken return NULL and the requester will have to cope by using
602 * some fallback PIO mode or retrying later.
e8689e63
LW
603 */
604static struct pl08x_phy_chan *
605pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
606 struct pl08x_dma_chan *virt_chan)
607{
608 struct pl08x_phy_chan *ch = NULL;
609 unsigned long flags;
610 int i;
611
e8689e63
LW
612 for (i = 0; i < pl08x->vd->channels; i++) {
613 ch = &pl08x->phy_chans[i];
614
615 spin_lock_irqsave(&ch->lock, flags);
616
affa115e 617 if (!ch->locked && !ch->serving) {
e8689e63 618 ch->serving = virt_chan;
e8689e63
LW
619 spin_unlock_irqrestore(&ch->lock, flags);
620 break;
621 }
622
623 spin_unlock_irqrestore(&ch->lock, flags);
624 }
625
626 if (i == pl08x->vd->channels) {
627 /* No physical channel available, cope with it */
628 return NULL;
629 }
630
631 return ch;
632}
633
a5a488db 634/* Mark the physical channel as free. Note, this write is atomic. */
e8689e63
LW
635static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
636 struct pl08x_phy_chan *ch)
637{
a5a488db
RK
638 ch->serving = NULL;
639}
e8689e63 640
a5a488db
RK
641/*
642 * Try to allocate a physical channel. When successful, assign it to
643 * this virtual channel, and initiate the next descriptor. The
644 * virtual channel lock must be held at this point.
645 */
646static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
647{
648 struct pl08x_driver_data *pl08x = plchan->host;
649 struct pl08x_phy_chan *ch;
fb526210 650
a5a488db
RK
651 ch = pl08x_get_phy_channel(pl08x, plchan);
652 if (!ch) {
653 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
654 plchan->state = PL08X_CHAN_WAITING;
655 return;
656 }
e8689e63 657
a5a488db
RK
658 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
659 ch->id, plchan->name);
660
661 plchan->phychan = ch;
662 plchan->state = PL08X_CHAN_RUNNING;
663 pl08x_start_next_txd(plchan);
664}
665
666static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
667 struct pl08x_dma_chan *plchan)
668{
669 struct pl08x_driver_data *pl08x = plchan->host;
670
671 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
672 ch->id, plchan->name);
673
674 /*
675 * We do this without taking the lock; we're really only concerned
676 * about whether this pointer is NULL or not, and we're guaranteed
677 * that this will only be called when it _already_ is non-NULL.
678 */
679 ch->serving = plchan;
680 plchan->phychan = ch;
681 plchan->state = PL08X_CHAN_RUNNING;
682 pl08x_start_next_txd(plchan);
683}
684
685/*
686 * Free a physical DMA channel, potentially reallocating it to another
687 * virtual channel if we have any pending.
688 */
689static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
690{
691 struct pl08x_driver_data *pl08x = plchan->host;
692 struct pl08x_dma_chan *p, *next;
693
694 retry:
695 next = NULL;
696
697 /* Find a waiting virtual channel for the next transfer. */
01d8dc64 698 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
a5a488db
RK
699 if (p->state == PL08X_CHAN_WAITING) {
700 next = p;
701 break;
702 }
703
704 if (!next) {
01d8dc64 705 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
a5a488db
RK
706 if (p->state == PL08X_CHAN_WAITING) {
707 next = p;
708 break;
709 }
710 }
711
712 /* Ensure that the physical channel is stopped */
713 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
714
715 if (next) {
716 bool success;
717
718 /*
719 * Eww. We know this isn't going to deadlock
720 * but lockdep probably doesn't.
721 */
083be28a 722 spin_lock(&next->vc.lock);
a5a488db
RK
723 /* Re-check the state now that we have the lock */
724 success = next->state == PL08X_CHAN_WAITING;
725 if (success)
726 pl08x_phy_reassign_start(plchan->phychan, next);
083be28a 727 spin_unlock(&next->vc.lock);
a5a488db
RK
728
729 /* If the state changed, try to find another channel */
730 if (!success)
731 goto retry;
732 } else {
733 /* No more jobs, so free up the physical channel */
734 pl08x_put_phy_channel(pl08x, plchan->phychan);
735 }
736
737 plchan->phychan = NULL;
738 plchan->state = PL08X_CHAN_IDLE;
e8689e63
LW
739}
740
741/*
742 * LLI handling
743 */
744
745static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
746{
747 switch (coded) {
748 case PL080_WIDTH_8BIT:
749 return 1;
750 case PL080_WIDTH_16BIT:
751 return 2;
752 case PL080_WIDTH_32BIT:
753 return 4;
754 default:
755 break;
756 }
757 BUG();
758 return 0;
759}
760
761static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
cace6585 762 size_t tsize)
e8689e63
LW
763{
764 u32 retbits = cctl;
765
e8b5e11d 766 /* Remove all src, dst and transfer size bits */
e8689e63
LW
767 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
768 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
769 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
770
771 /* Then set the bits according to the parameters */
772 switch (srcwidth) {
773 case 1:
774 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
775 break;
776 case 2:
777 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
778 break;
779 case 4:
780 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
781 break;
782 default:
783 BUG();
784 break;
785 }
786
787 switch (dstwidth) {
788 case 1:
789 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
790 break;
791 case 2:
792 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
793 break;
794 case 4:
795 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
796 break;
797 default:
798 BUG();
799 break;
800 }
801
5110e51d 802 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
e8689e63
LW
803 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
804 return retbits;
805}
806
542361f8
RKAL
807struct pl08x_lli_build_data {
808 struct pl08x_txd *txd;
542361f8
RKAL
809 struct pl08x_bus_data srcbus;
810 struct pl08x_bus_data dstbus;
811 size_t remainder;
25c94f7f 812 u32 lli_bus;
542361f8
RKAL
813};
814
e8689e63 815/*
0532e6fc
VK
816 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
817 * victim in case src & dest are not similarly aligned. i.e. If after aligning
818 * masters address with width requirements of transfer (by sending few byte by
819 * byte data), slave is still not aligned, then its width will be reduced to
820 * BYTE.
821 * - prefers the destination bus if both available
036f05fd 822 * - prefers bus with fixed address (i.e. peripheral)
e8689e63 823 */
542361f8
RKAL
824static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
825 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
e8689e63
LW
826{
827 if (!(cctl & PL080_CONTROL_DST_INCR)) {
542361f8
RKAL
828 *mbus = &bd->dstbus;
829 *sbus = &bd->srcbus;
036f05fd
VK
830 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
831 *mbus = &bd->srcbus;
832 *sbus = &bd->dstbus;
e8689e63 833 } else {
036f05fd 834 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
542361f8
RKAL
835 *mbus = &bd->dstbus;
836 *sbus = &bd->srcbus;
036f05fd 837 } else {
542361f8
RKAL
838 *mbus = &bd->srcbus;
839 *sbus = &bd->dstbus;
e8689e63
LW
840 }
841 }
842}
843
844/*
94ae8522 845 * Fills in one LLI for a certain transfer descriptor and advance the counter
e8689e63 846 */
ba6785ff
TF
847static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
848 struct pl08x_lli_build_data *bd,
da1b6c05 849 int num_llis, int len, u32 cctl, u32 cctl2)
e8689e63 850{
ba6785ff
TF
851 u32 offset = num_llis * pl08x->lli_words;
852 u32 *llis_va = bd->txd->llis_va + offset;
542361f8 853 dma_addr_t llis_bus = bd->txd->llis_bus;
e8689e63
LW
854
855 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
856
ba6785ff
TF
857 /* Advance the offset to next LLI. */
858 offset += pl08x->lli_words;
859
860 llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
861 llis_va[PL080_LLI_DST] = bd->dstbus.addr;
862 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
863 llis_va[PL080_LLI_LLI] |= bd->lli_bus;
864 llis_va[PL080_LLI_CCTL] = cctl;
da1b6c05
TF
865 if (pl08x->vd->pl080s)
866 llis_va[PL080S_LLI_CCTL2] = cctl2;
e8689e63
LW
867
868 if (cctl & PL080_CONTROL_SRC_INCR)
542361f8 869 bd->srcbus.addr += len;
e8689e63 870 if (cctl & PL080_CONTROL_DST_INCR)
542361f8 871 bd->dstbus.addr += len;
e8689e63 872
542361f8 873 BUG_ON(bd->remainder < len);
cace6585 874
542361f8 875 bd->remainder -= len;
e8689e63
LW
876}
877
ba6785ff
TF
878static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
879 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
880 int num_llis, size_t *total_bytes)
e8689e63 881{
03af500f 882 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
da1b6c05 883 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
03af500f 884 (*total_bytes) += len;
e8689e63
LW
885}
886
48924e42
TF
887#ifdef VERBOSE_DEBUG
888static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
889 const u32 *llis_va, int num_llis)
890{
891 int i;
892
da1b6c05 893 if (pl08x->vd->pl080s) {
48924e42 894 dev_vdbg(&pl08x->adev->dev,
da1b6c05
TF
895 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
896 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
897 for (i = 0; i < num_llis; i++) {
898 dev_vdbg(&pl08x->adev->dev,
899 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
900 i, llis_va, llis_va[PL080_LLI_SRC],
901 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
902 llis_va[PL080_LLI_CCTL],
903 llis_va[PL080S_LLI_CCTL2]);
904 llis_va += pl08x->lli_words;
905 }
906 } else {
907 dev_vdbg(&pl08x->adev->dev,
908 "%-3s %-9s %-10s %-10s %-10s %s\n",
909 "lli", "", "csrc", "cdst", "clli", "cctl");
910 for (i = 0; i < num_llis; i++) {
911 dev_vdbg(&pl08x->adev->dev,
912 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
913 i, llis_va, llis_va[PL080_LLI_SRC],
914 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
915 llis_va[PL080_LLI_CCTL]);
916 llis_va += pl08x->lli_words;
917 }
48924e42
TF
918 }
919}
920#else
921static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
922 const u32 *llis_va, int num_llis) {}
923#endif
924
e8689e63
LW
925/*
926 * This fills in the table of LLIs for the transfer descriptor
927 * Note that we assume we never have to change the burst sizes
928 * Return 0 for error
929 */
930static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
931 struct pl08x_txd *txd)
932{
e8689e63 933 struct pl08x_bus_data *mbus, *sbus;
542361f8 934 struct pl08x_lli_build_data bd;
e8689e63 935 int num_llis = 0;
03af500f 936 u32 cctl, early_bytes = 0;
b7f69d9d 937 size_t max_bytes_per_lli, total_bytes;
ba6785ff 938 u32 *llis_va, *last_lli;
b7f69d9d 939 struct pl08x_sg *dsg;
e8689e63 940
3e27ee84 941 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
e8689e63
LW
942 if (!txd->llis_va) {
943 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
944 return 0;
945 }
946
542361f8 947 bd.txd = txd;
25c94f7f 948 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
b7f69d9d 949 cctl = txd->cctl;
542361f8 950
e8689e63 951 /* Find maximum width of the source bus */
542361f8 952 bd.srcbus.maxwidth =
e8689e63
LW
953 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
954 PL080_CONTROL_SWIDTH_SHIFT);
955
956 /* Find maximum width of the destination bus */
542361f8 957 bd.dstbus.maxwidth =
e8689e63
LW
958 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
959 PL080_CONTROL_DWIDTH_SHIFT);
960
b7f69d9d
VK
961 list_for_each_entry(dsg, &txd->dsg_list, node) {
962 total_bytes = 0;
963 cctl = txd->cctl;
e8689e63 964
b7f69d9d
VK
965 bd.srcbus.addr = dsg->src_addr;
966 bd.dstbus.addr = dsg->dst_addr;
967 bd.remainder = dsg->len;
968 bd.srcbus.buswidth = bd.srcbus.maxwidth;
969 bd.dstbus.buswidth = bd.dstbus.maxwidth;
e8689e63 970
b7f69d9d 971 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
e8689e63 972
b90ca063
AP
973 dev_vdbg(&pl08x->adev->dev,
974 "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
975 (u64)bd.srcbus.addr,
976 cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
b7f69d9d 977 bd.srcbus.buswidth,
b90ca063
AP
978 (u64)bd.dstbus.addr,
979 cctl & PL080_CONTROL_DST_INCR ? "+" : "",
b7f69d9d
VK
980 bd.dstbus.buswidth,
981 bd.remainder);
982 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
983 mbus == &bd.srcbus ? "src" : "dst",
984 sbus == &bd.srcbus ? "src" : "dst");
fc74eb79 985
b7f69d9d
VK
986 /*
987 * Zero length is only allowed if all these requirements are
988 * met:
989 * - flow controller is peripheral.
990 * - src.addr is aligned to src.width
991 * - dst.addr is aligned to dst.width
992 *
993 * sg_len == 1 should be true, as there can be two cases here:
994 *
995 * - Memory addresses are contiguous and are not scattered.
996 * Here, Only one sg will be passed by user driver, with
997 * memory address and zero length. We pass this to controller
998 * and after the transfer it will receive the last burst
999 * request from peripheral and so transfer finishes.
1000 *
1001 * - Memory addresses are scattered and are not contiguous.
1002 * Here, Obviously as DMA controller doesn't know when a lli's
1003 * transfer gets over, it can't load next lli. So in this
1004 * case, there has to be an assumption that only one lli is
1005 * supported. Thus, we can't have scattered addresses.
1006 */
1007 if (!bd.remainder) {
1008 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
1009 PL080_CONFIG_FLOW_CONTROL_SHIFT;
1010 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
0a235657 1011 (fc <= PL080_FLOW_SRC2DST_SRC))) {
b7f69d9d
VK
1012 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
1013 __func__);
1014 return 0;
1015 }
0a235657 1016
1c38b289
AP
1017 if (!IS_BUS_ALIGNED(&bd.srcbus) ||
1018 !IS_BUS_ALIGNED(&bd.dstbus)) {
b7f69d9d
VK
1019 dev_err(&pl08x->adev->dev,
1020 "%s src & dst address must be aligned to src"
1021 " & dst width if peripheral is flow controller",
1022 __func__);
1023 return 0;
1024 }
03af500f 1025
b7f69d9d
VK
1026 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
1027 bd.dstbus.buswidth, 0);
ba6785ff 1028 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
da1b6c05 1029 0, cctl, 0);
b7f69d9d
VK
1030 break;
1031 }
e8689e63
LW
1032
1033 /*
b7f69d9d
VK
1034 * Send byte by byte for following cases
1035 * - Less than a bus width available
1036 * - until master bus is aligned
e8689e63 1037 */
b7f69d9d
VK
1038 if (bd.remainder < mbus->buswidth)
1039 early_bytes = bd.remainder;
1c38b289
AP
1040 else if (!IS_BUS_ALIGNED(mbus)) {
1041 early_bytes = mbus->buswidth -
1042 (mbus->addr & (mbus->buswidth - 1));
b7f69d9d
VK
1043 if ((bd.remainder - early_bytes) < mbus->buswidth)
1044 early_bytes = bd.remainder;
1045 }
e8689e63 1046
b7f69d9d
VK
1047 if (early_bytes) {
1048 dev_vdbg(&pl08x->adev->dev,
6fc8ae78 1049 "%s byte width LLIs (remain 0x%08zx)\n",
b7f69d9d 1050 __func__, bd.remainder);
ba6785ff
TF
1051 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
1052 num_llis++, &total_bytes);
e8689e63
LW
1053 }
1054
b7f69d9d
VK
1055 if (bd.remainder) {
1056 /*
1057 * Master now aligned
1058 * - if slave is not then we must set its width down
1059 */
1c38b289 1060 if (!IS_BUS_ALIGNED(sbus)) {
b7f69d9d
VK
1061 dev_dbg(&pl08x->adev->dev,
1062 "%s set down bus width to one byte\n",
1063 __func__);
fa6a940b 1064
b7f69d9d
VK
1065 sbus->buswidth = 1;
1066 }
e8689e63
LW
1067
1068 /*
b7f69d9d
VK
1069 * Bytes transferred = tsize * src width, not
1070 * MIN(buswidths)
e8689e63 1071 */
b7f69d9d 1072 max_bytes_per_lli = bd.srcbus.buswidth *
5110e51d 1073 pl08x->vd->max_transfer_size;
b7f69d9d
VK
1074 dev_vdbg(&pl08x->adev->dev,
1075 "%s max bytes per lli = %zu\n",
1076 __func__, max_bytes_per_lli);
e8689e63
LW
1077
1078 /*
b7f69d9d
VK
1079 * Make largest possible LLIs until less than one bus
1080 * width left
e8689e63 1081 */
b7f69d9d
VK
1082 while (bd.remainder > (mbus->buswidth - 1)) {
1083 size_t lli_len, tsize, width;
e8689e63 1084
b7f69d9d
VK
1085 /*
1086 * If enough left try to send max possible,
1087 * otherwise try to send the remainder
1088 */
1089 lli_len = min(bd.remainder, max_bytes_per_lli);
16a2e7d3 1090
b7f69d9d
VK
1091 /*
1092 * Check against maximum bus alignment:
1093 * Calculate actual transfer size in relation to
1094 * bus width an get a maximum remainder of the
1095 * highest bus width - 1
1096 */
1097 width = max(mbus->buswidth, sbus->buswidth);
1098 lli_len = (lli_len / width) * width;
1099 tsize = lli_len / bd.srcbus.buswidth;
1100
1101 dev_vdbg(&pl08x->adev->dev,
1102 "%s fill lli with single lli chunk of "
1103 "size 0x%08zx (remainder 0x%08zx)\n",
1104 __func__, lli_len, bd.remainder);
1105
1106 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
16a2e7d3 1107 bd.dstbus.buswidth, tsize);
ba6785ff 1108 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
da1b6c05 1109 lli_len, cctl, tsize);
b7f69d9d
VK
1110 total_bytes += lli_len;
1111 }
e8689e63 1112
b7f69d9d
VK
1113 /*
1114 * Send any odd bytes
1115 */
1116 if (bd.remainder) {
1117 dev_vdbg(&pl08x->adev->dev,
1118 "%s align with boundary, send odd bytes (remain %zu)\n",
1119 __func__, bd.remainder);
ba6785ff
TF
1120 prep_byte_width_lli(pl08x, &bd, &cctl,
1121 bd.remainder, num_llis++, &total_bytes);
b7f69d9d 1122 }
e8689e63 1123 }
16a2e7d3 1124
b7f69d9d
VK
1125 if (total_bytes != dsg->len) {
1126 dev_err(&pl08x->adev->dev,
1127 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
1128 __func__, total_bytes, dsg->len);
1129 return 0;
1130 }
e8689e63 1131
b7f69d9d
VK
1132 if (num_llis >= MAX_NUM_TSFR_LLIS) {
1133 dev_err(&pl08x->adev->dev,
1134 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
ba6785ff 1135 __func__, MAX_NUM_TSFR_LLIS);
b7f69d9d
VK
1136 return 0;
1137 }
e8689e63 1138 }
b58b6b5b
RKAL
1139
1140 llis_va = txd->llis_va;
ba6785ff 1141 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
e8689e63 1142
3b24c20b
AB
1143 if (txd->cyclic) {
1144 /* Link back to the first LLI. */
1145 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
1146 } else {
1147 /* The final LLI terminates the LLI. */
1148 last_lli[PL080_LLI_LLI] = 0;
1149 /* The final LLI element shall also fire an interrupt. */
1150 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
e8689e63 1151 }
e8689e63 1152
48924e42 1153 pl08x_dump_lli(pl08x, llis_va, num_llis);
e8689e63
LW
1154
1155 return num_llis;
1156}
1157
e8689e63
LW
1158static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
1159 struct pl08x_txd *txd)
1160{
b7f69d9d
VK
1161 struct pl08x_sg *dsg, *_dsg;
1162
c1205646
VK
1163 if (txd->llis_va)
1164 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
e8689e63 1165
b7f69d9d
VK
1166 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
1167 list_del(&dsg->node);
1168 kfree(dsg);
1169 }
1170
e8689e63
LW
1171 kfree(txd);
1172}
1173
18536134
RK
1174static void pl08x_desc_free(struct virt_dma_desc *vd)
1175{
1176 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1177 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
18536134 1178
89116bf9 1179 dma_descriptor_unmap(&vd->tx);
18536134
RK
1180 if (!txd->done)
1181 pl08x_release_mux(plchan);
1182
18536134 1183 pl08x_free_txd(plchan->host, txd);
18536134
RK
1184}
1185
e8689e63
LW
1186static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
1187 struct pl08x_dma_chan *plchan)
1188{
ea160561 1189 LIST_HEAD(head);
e8689e63 1190
879f127b 1191 vchan_get_all_descriptors(&plchan->vc, &head);
91998261 1192 vchan_dma_desc_free_list(&plchan->vc, &head);
e8689e63
LW
1193}
1194
1195/*
1196 * The DMA ENGINE API
1197 */
1198static int pl08x_alloc_chan_resources(struct dma_chan *chan)
1199{
1200 return 0;
1201}
1202
1203static void pl08x_free_chan_resources(struct dma_chan *chan)
1204{
a068682c
RK
1205 /* Ensure all queued descriptors are freed */
1206 vchan_free_chan_resources(to_virt_chan(chan));
e8689e63
LW
1207}
1208
e8689e63
LW
1209static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1210 struct dma_chan *chan, unsigned long flags)
1211{
1212 struct dma_async_tx_descriptor *retval = NULL;
1213
1214 return retval;
1215}
1216
1217/*
94ae8522
RKAL
1218 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1219 * If slaves are relying on interrupts to signal completion this function
1220 * must not be called with interrupts disabled.
e8689e63 1221 */
3e27ee84
VK
1222static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1223 dma_cookie_t cookie, struct dma_tx_state *txstate)
e8689e63
LW
1224{
1225 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
06e885b7
RK
1226 struct virt_dma_desc *vd;
1227 unsigned long flags;
e8689e63 1228 enum dma_status ret;
06e885b7 1229 size_t bytes = 0;
e8689e63 1230
96a2af41 1231 ret = dma_cookie_status(chan, cookie, txstate);
0996e895 1232 if (ret == DMA_COMPLETE)
e8689e63 1233 return ret;
e8689e63 1234
06e885b7
RK
1235 /*
1236 * There's no point calculating the residue if there's
1237 * no txstate to store the value.
1238 */
1239 if (!txstate) {
1240 if (plchan->state == PL08X_CHAN_PAUSED)
1241 ret = DMA_PAUSED;
1242 return ret;
1243 }
1244
1245 spin_lock_irqsave(&plchan->vc.lock, flags);
1246 ret = dma_cookie_status(chan, cookie, txstate);
0996e895 1247 if (ret != DMA_COMPLETE) {
06e885b7
RK
1248 vd = vchan_find_desc(&plchan->vc, cookie);
1249 if (vd) {
1250 /* On the issued list, so hasn't been processed yet */
1251 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1252 struct pl08x_sg *dsg;
1253
1254 list_for_each_entry(dsg, &txd->dsg_list, node)
1255 bytes += dsg->len;
1256 } else {
1257 bytes = pl08x_getbytes_chan(plchan);
1258 }
1259 }
1260 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1261
e8689e63
LW
1262 /*
1263 * This cookie not complete yet
96a2af41 1264 * Get number of bytes left in the active transactions and queue
e8689e63 1265 */
06e885b7 1266 dma_set_residue(txstate, bytes);
e8689e63 1267
06e885b7
RK
1268 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
1269 ret = DMA_PAUSED;
e8689e63
LW
1270
1271 /* Whether waiting or running, we're in progress */
06e885b7 1272 return ret;
e8689e63
LW
1273}
1274
1275/* PrimeCell DMA extension */
1276struct burst_table {
760596c6 1277 u32 burstwords;
e8689e63
LW
1278 u32 reg;
1279};
1280
1281static const struct burst_table burst_sizes[] = {
1282 {
1283 .burstwords = 256,
760596c6 1284 .reg = PL080_BSIZE_256,
e8689e63
LW
1285 },
1286 {
1287 .burstwords = 128,
760596c6 1288 .reg = PL080_BSIZE_128,
e8689e63
LW
1289 },
1290 {
1291 .burstwords = 64,
760596c6 1292 .reg = PL080_BSIZE_64,
e8689e63
LW
1293 },
1294 {
1295 .burstwords = 32,
760596c6 1296 .reg = PL080_BSIZE_32,
e8689e63
LW
1297 },
1298 {
1299 .burstwords = 16,
760596c6 1300 .reg = PL080_BSIZE_16,
e8689e63
LW
1301 },
1302 {
1303 .burstwords = 8,
760596c6 1304 .reg = PL080_BSIZE_8,
e8689e63
LW
1305 },
1306 {
1307 .burstwords = 4,
760596c6 1308 .reg = PL080_BSIZE_4,
e8689e63
LW
1309 },
1310 {
760596c6
RKAL
1311 .burstwords = 0,
1312 .reg = PL080_BSIZE_1,
e8689e63
LW
1313 },
1314};
1315
121c8476
RKAL
1316/*
1317 * Given the source and destination available bus masks, select which
1318 * will be routed to each port. We try to have source and destination
1319 * on separate ports, but always respect the allowable settings.
1320 */
1321static u32 pl08x_select_bus(u8 src, u8 dst)
1322{
1323 u32 cctl = 0;
1324
1325 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1326 cctl |= PL080_CONTROL_DST_AHB2;
1327 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1328 cctl |= PL080_CONTROL_SRC_AHB2;
1329
1330 return cctl;
1331}
1332
f14c426c
RKAL
1333static u32 pl08x_cctl(u32 cctl)
1334{
1335 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1336 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1337 PL080_CONTROL_PROT_MASK);
1338
1339 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1340 return cctl | PL080_CONTROL_PROT_SYS;
1341}
1342
aa88cdaa
RKAL
1343static u32 pl08x_width(enum dma_slave_buswidth width)
1344{
1345 switch (width) {
1346 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1347 return PL080_WIDTH_8BIT;
1348 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1349 return PL080_WIDTH_16BIT;
1350 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1351 return PL080_WIDTH_32BIT;
f32807f1
VK
1352 default:
1353 return ~0;
aa88cdaa 1354 }
aa88cdaa
RKAL
1355}
1356
760596c6
RKAL
1357static u32 pl08x_burst(u32 maxburst)
1358{
1359 int i;
1360
1361 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1362 if (burst_sizes[i].burstwords <= maxburst)
1363 break;
1364
1365 return burst_sizes[i].reg;
1366}
1367
9862ba17
RK
1368static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
1369 enum dma_slave_buswidth addr_width, u32 maxburst)
1370{
1371 u32 width, burst, cctl = 0;
1372
1373 width = pl08x_width(addr_width);
1374 if (width == ~0)
1375 return ~0;
1376
1377 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1378 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
1379
1380 /*
1381 * If this channel will only request single transfers, set this
1382 * down to ONE element. Also select one element if no maxburst
1383 * is specified.
1384 */
1385 if (plchan->cd->single)
1386 maxburst = 1;
1387
1388 burst = pl08x_burst(maxburst);
1389 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1390 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1391
1392 return pl08x_cctl(cctl);
1393}
1394
e8689e63
LW
1395/*
1396 * Slave transactions callback to the slave device to allow
1397 * synchronization of slave DMA signals with the DMAC enable
1398 */
1399static void pl08x_issue_pending(struct dma_chan *chan)
1400{
1401 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
e8689e63
LW
1402 unsigned long flags;
1403
083be28a 1404 spin_lock_irqsave(&plchan->vc.lock, flags);
879f127b 1405 if (vchan_issue_pending(&plchan->vc)) {
a5a488db
RK
1406 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
1407 pl08x_phy_alloc_and_start(plchan);
e8689e63 1408 }
083be28a 1409 spin_unlock_irqrestore(&plchan->vc.lock, flags);
e8689e63
LW
1410}
1411
879f127b 1412static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
ac3cd20d 1413{
b201c111 1414 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
ac3cd20d
RKAL
1415
1416 if (txd) {
b7f69d9d 1417 INIT_LIST_HEAD(&txd->dsg_list);
4983a04f
RKAL
1418
1419 /* Always enable error and terminal interrupts */
1420 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1421 PL080_CONFIG_TC_IRQ_MASK;
ac3cd20d
RKAL
1422 }
1423 return txd;
1424}
1425
e8689e63
LW
1426/*
1427 * Initialize a descriptor to be used by memcpy submit
1428 */
1429static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1430 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1431 size_t len, unsigned long flags)
1432{
1433 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1434 struct pl08x_driver_data *pl08x = plchan->host;
1435 struct pl08x_txd *txd;
b7f69d9d 1436 struct pl08x_sg *dsg;
e8689e63
LW
1437 int ret;
1438
879f127b 1439 txd = pl08x_get_txd(plchan);
e8689e63
LW
1440 if (!txd) {
1441 dev_err(&pl08x->adev->dev,
1442 "%s no memory for descriptor\n", __func__);
1443 return NULL;
1444 }
1445
b7f69d9d
VK
1446 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1447 if (!dsg) {
1448 pl08x_free_txd(pl08x, txd);
1449 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
1450 __func__);
1451 return NULL;
1452 }
1453 list_add_tail(&dsg->node, &txd->dsg_list);
1454
b7f69d9d
VK
1455 dsg->src_addr = src;
1456 dsg->dst_addr = dest;
1457 dsg->len = len;
e8689e63
LW
1458
1459 /* Set platform data for m2m */
4983a04f 1460 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
dc8d5f8d 1461 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
c7da9a56 1462 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
4983a04f 1463
e8689e63 1464 /* Both to be incremented or the code will break */
70b5ed6b 1465 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
c7da9a56 1466
c7da9a56 1467 if (pl08x->vd->dualmaster)
121c8476
RKAL
1468 txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
1469 pl08x->mem_buses);
e8689e63 1470
aa4afb75
RK
1471 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1472 if (!ret) {
1473 pl08x_free_txd(pl08x, txd);
e8689e63 1474 return NULL;
aa4afb75 1475 }
e8689e63 1476
879f127b 1477 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
e8689e63
LW
1478}
1479
3b24c20b
AB
1480static struct pl08x_txd *pl08x_init_txd(
1481 struct dma_chan *chan,
1482 enum dma_transfer_direction direction,
1483 dma_addr_t *slave_addr)
e8689e63
LW
1484{
1485 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1486 struct pl08x_driver_data *pl08x = plchan->host;
1487 struct pl08x_txd *txd;
dc8d5f8d 1488 enum dma_slave_buswidth addr_width;
0a235657 1489 int ret, tmp;
409ec8db 1490 u8 src_buses, dst_buses;
dc8d5f8d 1491 u32 maxburst, cctl;
e8689e63 1492
879f127b 1493 txd = pl08x_get_txd(plchan);
e8689e63
LW
1494 if (!txd) {
1495 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1496 return NULL;
1497 }
1498
e8689e63
LW
1499 /*
1500 * Set up addresses, the PrimeCell configured address
1501 * will take precedence since this may configure the
1502 * channel target address dynamically at runtime.
1503 */
db8196df 1504 if (direction == DMA_MEM_TO_DEV) {
dc8d5f8d 1505 cctl = PL080_CONTROL_SRC_INCR;
3b24c20b 1506 *slave_addr = plchan->cfg.dst_addr;
dc8d5f8d
RK
1507 addr_width = plchan->cfg.dst_addr_width;
1508 maxburst = plchan->cfg.dst_maxburst;
409ec8db
RK
1509 src_buses = pl08x->mem_buses;
1510 dst_buses = plchan->cd->periph_buses;
db8196df 1511 } else if (direction == DMA_DEV_TO_MEM) {
dc8d5f8d 1512 cctl = PL080_CONTROL_DST_INCR;
3b24c20b 1513 *slave_addr = plchan->cfg.src_addr;
dc8d5f8d
RK
1514 addr_width = plchan->cfg.src_addr_width;
1515 maxburst = plchan->cfg.src_maxburst;
409ec8db
RK
1516 src_buses = plchan->cd->periph_buses;
1517 dst_buses = pl08x->mem_buses;
e8689e63 1518 } else {
b7f69d9d 1519 pl08x_free_txd(pl08x, txd);
e8689e63
LW
1520 dev_err(&pl08x->adev->dev,
1521 "%s direction unsupported\n", __func__);
1522 return NULL;
1523 }
e8689e63 1524
dc8d5f8d 1525 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
800d683e
RK
1526 if (cctl == ~0) {
1527 pl08x_free_txd(pl08x, txd);
1528 dev_err(&pl08x->adev->dev,
1529 "DMA slave configuration botched?\n");
1530 return NULL;
1531 }
1532
409ec8db
RK
1533 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
1534
95442b22 1535 if (plchan->cfg.device_fc)
db8196df 1536 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
0a235657
VK
1537 PL080_FLOW_PER2MEM_PER;
1538 else
db8196df 1539 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
0a235657
VK
1540 PL080_FLOW_PER2MEM;
1541
1542 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1543
c48d4963
RK
1544 ret = pl08x_request_mux(plchan);
1545 if (ret < 0) {
1546 pl08x_free_txd(pl08x, txd);
1547 dev_dbg(&pl08x->adev->dev,
1548 "unable to mux for transfer on %s due to platform restrictions\n",
1549 plchan->name);
1550 return NULL;
1551 }
1552
1553 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
1554 plchan->signal, plchan->name);
1555
1556 /* Assign the flow control signal to this channel */
1557 if (direction == DMA_MEM_TO_DEV)
1558 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
1559 else
1560 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1561
3b24c20b
AB
1562 return txd;
1563}
1564
1565static int pl08x_tx_add_sg(struct pl08x_txd *txd,
1566 enum dma_transfer_direction direction,
1567 dma_addr_t slave_addr,
1568 dma_addr_t buf_addr,
1569 unsigned int len)
1570{
1571 struct pl08x_sg *dsg;
1572
1573 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1574 if (!dsg)
1575 return -ENOMEM;
1576
1577 list_add_tail(&dsg->node, &txd->dsg_list);
1578
1579 dsg->len = len;
1580 if (direction == DMA_MEM_TO_DEV) {
1581 dsg->src_addr = buf_addr;
1582 dsg->dst_addr = slave_addr;
1583 } else {
1584 dsg->src_addr = slave_addr;
1585 dsg->dst_addr = buf_addr;
1586 }
1587
1588 return 0;
1589}
1590
1591static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1592 struct dma_chan *chan, struct scatterlist *sgl,
1593 unsigned int sg_len, enum dma_transfer_direction direction,
1594 unsigned long flags, void *context)
1595{
1596 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1597 struct pl08x_driver_data *pl08x = plchan->host;
1598 struct pl08x_txd *txd;
1599 struct scatterlist *sg;
1600 int ret, tmp;
1601 dma_addr_t slave_addr;
1602
1603 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1604 __func__, sg_dma_len(sgl), plchan->name);
1605
1606 txd = pl08x_init_txd(chan, direction, &slave_addr);
1607 if (!txd)
1608 return NULL;
1609
b7f69d9d 1610 for_each_sg(sgl, sg, sg_len, tmp) {
3b24c20b
AB
1611 ret = pl08x_tx_add_sg(txd, direction, slave_addr,
1612 sg_dma_address(sg),
1613 sg_dma_len(sg));
1614 if (ret) {
c48d4963 1615 pl08x_release_mux(plchan);
b7f69d9d
VK
1616 pl08x_free_txd(pl08x, txd);
1617 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1618 __func__);
1619 return NULL;
1620 }
3b24c20b 1621 }
b7f69d9d 1622
3b24c20b
AB
1623 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1624 if (!ret) {
1625 pl08x_release_mux(plchan);
1626 pl08x_free_txd(pl08x, txd);
1627 return NULL;
1628 }
1629
1630 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1631}
1632
1633static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
1634 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1635 size_t period_len, enum dma_transfer_direction direction,
31c1e5a1 1636 unsigned long flags)
3b24c20b
AB
1637{
1638 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1639 struct pl08x_driver_data *pl08x = plchan->host;
1640 struct pl08x_txd *txd;
1641 int ret, tmp;
1642 dma_addr_t slave_addr;
1643
1644 dev_dbg(&pl08x->adev->dev,
6fc8ae78 1645 "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n",
3b24c20b
AB
1646 __func__, period_len, buf_len,
1647 direction == DMA_MEM_TO_DEV ? "to" : "from",
1648 plchan->name);
1649
1650 txd = pl08x_init_txd(chan, direction, &slave_addr);
1651 if (!txd)
1652 return NULL;
1653
1654 txd->cyclic = true;
1655 txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
1656 for (tmp = 0; tmp < buf_len; tmp += period_len) {
1657 ret = pl08x_tx_add_sg(txd, direction, slave_addr,
1658 buf_addr + tmp, period_len);
1659 if (ret) {
1660 pl08x_release_mux(plchan);
1661 pl08x_free_txd(pl08x, txd);
1662 return NULL;
b7f69d9d
VK
1663 }
1664 }
1665
aa4afb75
RK
1666 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1667 if (!ret) {
1668 pl08x_release_mux(plchan);
1669 pl08x_free_txd(pl08x, txd);
e8689e63 1670 return NULL;
aa4afb75 1671 }
e8689e63 1672
879f127b 1673 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
e8689e63
LW
1674}
1675
bcd1b0b9
MR
1676static int pl08x_config(struct dma_chan *chan,
1677 struct dma_slave_config *config)
1678{
1679 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1680 struct pl08x_driver_data *pl08x = plchan->host;
1681
1682 if (!plchan->slave)
1683 return -EINVAL;
1684
1685 /* Reject definitely invalid configurations */
1686 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1687 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1688 return -EINVAL;
1689
1690 if (config->device_fc && pl08x->vd->pl080s) {
1691 dev_err(&pl08x->adev->dev,
1692 "%s: PL080S does not support peripheral flow control\n",
1693 __func__);
1694 return -EINVAL;
1695 }
1696
1697 plchan->cfg = *config;
1698
1699 return 0;
1700}
1701
1702static int pl08x_terminate_all(struct dma_chan *chan)
e8689e63
LW
1703{
1704 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1705 struct pl08x_driver_data *pl08x = plchan->host;
1706 unsigned long flags;
e8689e63 1707
bcd1b0b9
MR
1708 spin_lock_irqsave(&plchan->vc.lock, flags);
1709 if (!plchan->phychan && !plchan->at) {
1710 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1711 return 0;
e8689e63
LW
1712 }
1713
bcd1b0b9
MR
1714 plchan->state = PL08X_CHAN_IDLE;
1715
1716 if (plchan->phychan) {
1717 /*
1718 * Mark physical channel as free and free any slave
1719 * signal
1720 */
1721 pl08x_phy_free(plchan);
1722 }
1723 /* Dequeue jobs and free LLIs */
1724 if (plchan->at) {
1725 pl08x_desc_free(&plchan->at->vd);
1726 plchan->at = NULL;
1727 }
1728 /* Dequeue jobs not yet fired as well */
1729 pl08x_free_txd_list(pl08x, plchan);
1730
1731 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1732
1733 return 0;
1734}
1735
1736static int pl08x_pause(struct dma_chan *chan)
1737{
1738 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1739 unsigned long flags;
1740
e8689e63
LW
1741 /*
1742 * Anything succeeds on channels with no physical allocation and
1743 * no queued transfers.
1744 */
083be28a 1745 spin_lock_irqsave(&plchan->vc.lock, flags);
e8689e63 1746 if (!plchan->phychan && !plchan->at) {
083be28a 1747 spin_unlock_irqrestore(&plchan->vc.lock, flags);
e8689e63
LW
1748 return 0;
1749 }
1750
bcd1b0b9
MR
1751 pl08x_pause_phy_chan(plchan->phychan);
1752 plchan->state = PL08X_CHAN_PAUSED;
e8689e63 1753
bcd1b0b9
MR
1754 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1755
1756 return 0;
1757}
1758
1759static int pl08x_resume(struct dma_chan *chan)
1760{
1761 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1762 unsigned long flags;
1763
1764 /*
1765 * Anything succeeds on channels with no physical allocation and
1766 * no queued transfers.
1767 */
1768 spin_lock_irqsave(&plchan->vc.lock, flags);
1769 if (!plchan->phychan && !plchan->at) {
1770 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1771 return 0;
e8689e63
LW
1772 }
1773
bcd1b0b9
MR
1774 pl08x_resume_phy_chan(plchan->phychan);
1775 plchan->state = PL08X_CHAN_RUNNING;
1776
083be28a 1777 spin_unlock_irqrestore(&plchan->vc.lock, flags);
e8689e63 1778
bcd1b0b9 1779 return 0;
e8689e63
LW
1780}
1781
1782bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1783{
7703eac9 1784 struct pl08x_dma_chan *plchan;
e8689e63
LW
1785 char *name = chan_id;
1786
7703eac9
RKAL
1787 /* Reject channels for devices not bound to this driver */
1788 if (chan->device->dev->driver != &pl08x_amba_driver.drv)
1789 return false;
1790
1791 plchan = to_pl08x_chan(chan);
1792
e8689e63
LW
1793 /* Check that the channel is not taken! */
1794 if (!strcmp(plchan->name, name))
1795 return true;
1796
1797 return false;
1798}
6d05c9fa 1799EXPORT_SYMBOL_GPL(pl08x_filter_id);
e8689e63
LW
1800
1801/*
1802 * Just check that the device is there and active
94ae8522
RKAL
1803 * TODO: turn this bit on/off depending on the number of physical channels
1804 * actually used, if it is zero... well shut it off. That will save some
1805 * power. Cut the clock at the same time.
e8689e63
LW
1806 */
1807static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1808{
affa115e
LW
1809 /* The Nomadik variant does not have the config register */
1810 if (pl08x->vd->nomadik)
1811 return;
48a59ef3 1812 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
e8689e63
LW
1813}
1814
e8689e63
LW
1815static irqreturn_t pl08x_irq(int irq, void *dev)
1816{
1817 struct pl08x_driver_data *pl08x = dev;
28da2836
VK
1818 u32 mask = 0, err, tc, i;
1819
1820 /* check & clear - ERR & TC interrupts */
1821 err = readl(pl08x->base + PL080_ERR_STATUS);
1822 if (err) {
1823 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
1824 __func__, err);
1825 writel(err, pl08x->base + PL080_ERR_CLEAR);
e8689e63 1826 }
d29bf019 1827 tc = readl(pl08x->base + PL080_TC_STATUS);
28da2836
VK
1828 if (tc)
1829 writel(tc, pl08x->base + PL080_TC_CLEAR);
1830
1831 if (!err && !tc)
1832 return IRQ_NONE;
1833
e8689e63 1834 for (i = 0; i < pl08x->vd->channels; i++) {
28da2836 1835 if (((1 << i) & err) || ((1 << i) & tc)) {
e8689e63
LW
1836 /* Locate physical channel */
1837 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1838 struct pl08x_dma_chan *plchan = phychan->serving;
a936e793 1839 struct pl08x_txd *tx;
e8689e63 1840
28da2836
VK
1841 if (!plchan) {
1842 dev_err(&pl08x->adev->dev,
1843 "%s Error TC interrupt on unused channel: 0x%08x\n",
1844 __func__, i);
1845 continue;
1846 }
1847
083be28a 1848 spin_lock(&plchan->vc.lock);
a936e793 1849 tx = plchan->at;
3b24c20b
AB
1850 if (tx && tx->cyclic) {
1851 vchan_cyclic_callback(&tx->vd);
1852 } else if (tx) {
a936e793 1853 plchan->at = NULL;
c48d4963
RK
1854 /*
1855 * This descriptor is done, release its mux
1856 * reservation.
1857 */
1858 pl08x_release_mux(plchan);
18536134
RK
1859 tx->done = true;
1860 vchan_cookie_complete(&tx->vd);
c33b644c 1861
a5a488db
RK
1862 /*
1863 * And start the next descriptor (if any),
1864 * otherwise free this channel.
1865 */
879f127b 1866 if (vchan_next_desc(&plchan->vc))
c33b644c 1867 pl08x_start_next_txd(plchan);
a5a488db
RK
1868 else
1869 pl08x_phy_free(plchan);
a936e793 1870 }
083be28a 1871 spin_unlock(&plchan->vc.lock);
a936e793 1872
e8689e63
LW
1873 mask |= (1 << i);
1874 }
1875 }
e8689e63
LW
1876
1877 return mask ? IRQ_HANDLED : IRQ_NONE;
1878}
1879
121c8476
RKAL
1880static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1881{
121c8476
RKAL
1882 chan->slave = true;
1883 chan->name = chan->cd->bus_id;
ed91c13d
RK
1884 chan->cfg.src_addr = chan->cd->addr;
1885 chan->cfg.dst_addr = chan->cd->addr;
121c8476
RKAL
1886}
1887
e8689e63
LW
1888/*
1889 * Initialise the DMAC memcpy/slave channels.
1890 * Make a local wrapper to hold required data
1891 */
1892static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
3e27ee84 1893 struct dma_device *dmadev, unsigned int channels, bool slave)
e8689e63
LW
1894{
1895 struct pl08x_dma_chan *chan;
1896 int i;
1897
1898 INIT_LIST_HEAD(&dmadev->channels);
94ae8522 1899
e8689e63
LW
1900 /*
1901 * Register as many many memcpy as we have physical channels,
1902 * we won't always be able to use all but the code will have
1903 * to cope with that situation.
1904 */
1905 for (i = 0; i < channels; i++) {
b201c111 1906 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
e8689e63
LW
1907 if (!chan) {
1908 dev_err(&pl08x->adev->dev,
1909 "%s no memory for channel\n", __func__);
1910 return -ENOMEM;
1911 }
1912
1913 chan->host = pl08x;
1914 chan->state = PL08X_CHAN_IDLE;
ad0de2ac 1915 chan->signal = -1;
e8689e63
LW
1916
1917 if (slave) {
e8689e63 1918 chan->cd = &pl08x->pd->slave_channels[i];
121c8476 1919 pl08x_dma_slave_init(chan);
e8689e63
LW
1920 } else {
1921 chan->cd = &pl08x->pd->memcpy_channel;
1922 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1923 if (!chan->name) {
1924 kfree(chan);
1925 return -ENOMEM;
1926 }
1927 }
175a5e61 1928 dev_dbg(&pl08x->adev->dev,
e8689e63
LW
1929 "initialize virtual channel \"%s\"\n",
1930 chan->name);
1931
18536134 1932 chan->vc.desc_free = pl08x_desc_free;
083be28a 1933 vchan_init(&chan->vc, dmadev);
e8689e63
LW
1934 }
1935 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1936 i, slave ? "slave" : "memcpy");
1937 return i;
1938}
1939
1940static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1941{
1942 struct pl08x_dma_chan *chan = NULL;
1943 struct pl08x_dma_chan *next;
1944
1945 list_for_each_entry_safe(chan,
01d8dc64
RK
1946 next, &dmadev->channels, vc.chan.device_node) {
1947 list_del(&chan->vc.chan.device_node);
e8689e63
LW
1948 kfree(chan);
1949 }
1950}
1951
1952#ifdef CONFIG_DEBUG_FS
1953static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
1954{
1955 switch (state) {
1956 case PL08X_CHAN_IDLE:
1957 return "idle";
1958 case PL08X_CHAN_RUNNING:
1959 return "running";
1960 case PL08X_CHAN_PAUSED:
1961 return "paused";
1962 case PL08X_CHAN_WAITING:
1963 return "waiting";
1964 default:
1965 break;
1966 }
1967 return "UNKNOWN STATE";
1968}
1969
1970static int pl08x_debugfs_show(struct seq_file *s, void *data)
1971{
1972 struct pl08x_driver_data *pl08x = s->private;
1973 struct pl08x_dma_chan *chan;
1974 struct pl08x_phy_chan *ch;
1975 unsigned long flags;
1976 int i;
1977
1978 seq_printf(s, "PL08x physical channels:\n");
1979 seq_printf(s, "CHANNEL:\tUSER:\n");
1980 seq_printf(s, "--------\t-----\n");
1981 for (i = 0; i < pl08x->vd->channels; i++) {
1982 struct pl08x_dma_chan *virt_chan;
1983
1984 ch = &pl08x->phy_chans[i];
1985
1986 spin_lock_irqsave(&ch->lock, flags);
1987 virt_chan = ch->serving;
1988
affa115e
LW
1989 seq_printf(s, "%d\t\t%s%s\n",
1990 ch->id,
1991 virt_chan ? virt_chan->name : "(none)",
1992 ch->locked ? " LOCKED" : "");
e8689e63
LW
1993
1994 spin_unlock_irqrestore(&ch->lock, flags);
1995 }
1996
1997 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1998 seq_printf(s, "CHANNEL:\tSTATE:\n");
1999 seq_printf(s, "--------\t------\n");
01d8dc64 2000 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
3e2a037c 2001 seq_printf(s, "%s\t\t%s\n", chan->name,
e8689e63
LW
2002 pl08x_state_str(chan->state));
2003 }
2004
2005 seq_printf(s, "\nPL08x virtual slave channels:\n");
2006 seq_printf(s, "CHANNEL:\tSTATE:\n");
2007 seq_printf(s, "--------\t------\n");
01d8dc64 2008 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
3e2a037c 2009 seq_printf(s, "%s\t\t%s\n", chan->name,
e8689e63
LW
2010 pl08x_state_str(chan->state));
2011 }
2012
2013 return 0;
2014}
2015
2016static int pl08x_debugfs_open(struct inode *inode, struct file *file)
2017{
2018 return single_open(file, pl08x_debugfs_show, inode->i_private);
2019}
2020
2021static const struct file_operations pl08x_debugfs_operations = {
2022 .open = pl08x_debugfs_open,
2023 .read = seq_read,
2024 .llseek = seq_lseek,
2025 .release = single_release,
2026};
2027
2028static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
2029{
2030 /* Expose a simple debugfs interface to view all clocks */
3e27ee84
VK
2031 (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
2032 S_IFREG | S_IRUGO, NULL, pl08x,
2033 &pl08x_debugfs_operations);
e8689e63
LW
2034}
2035
2036#else
2037static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
2038{
2039}
2040#endif
2041
aa25afad 2042static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
e8689e63
LW
2043{
2044 struct pl08x_driver_data *pl08x;
f96ca9ec 2045 const struct vendor_data *vd = id->data;
ba6785ff 2046 u32 tsfr_size;
e8689e63
LW
2047 int ret = 0;
2048 int i;
2049
2050 ret = amba_request_regions(adev, NULL);
2051 if (ret)
2052 return ret;
2053
de1a2419
RK
2054 /* Ensure that we can do DMA */
2055 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
2056 if (ret)
2057 goto out_no_pl08x;
2058
e8689e63 2059 /* Create the driver state holder */
b201c111 2060 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
e8689e63
LW
2061 if (!pl08x) {
2062 ret = -ENOMEM;
2063 goto out_no_pl08x;
2064 }
2065
2066 /* Initialize memcpy engine */
2067 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
2068 pl08x->memcpy.dev = &adev->dev;
2069 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
2070 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
2071 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
2072 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2073 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
2074 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
bcd1b0b9
MR
2075 pl08x->memcpy.device_config = pl08x_config;
2076 pl08x->memcpy.device_pause = pl08x_pause;
2077 pl08x->memcpy.device_resume = pl08x_resume;
2078 pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
ea524c7e
MB
2079 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2080 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2081 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
2082 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
e8689e63
LW
2083
2084 /* Initialize slave engine */
2085 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
3b24c20b 2086 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
e8689e63
LW
2087 pl08x->slave.dev = &adev->dev;
2088 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
2089 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
2090 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2091 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
2092 pl08x->slave.device_issue_pending = pl08x_issue_pending;
2093 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
3b24c20b 2094 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
bcd1b0b9
MR
2095 pl08x->slave.device_config = pl08x_config;
2096 pl08x->slave.device_pause = pl08x_pause;
2097 pl08x->slave.device_resume = pl08x_resume;
2098 pl08x->slave.device_terminate_all = pl08x_terminate_all;
ea524c7e
MB
2099 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2100 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2101 pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2102 pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
e8689e63
LW
2103
2104 /* Get the platform data */
2105 pl08x->pd = dev_get_platdata(&adev->dev);
2106 if (!pl08x->pd) {
2107 dev_err(&adev->dev, "no platform data supplied\n");
983d7beb 2108 ret = -EINVAL;
e8689e63
LW
2109 goto out_no_platdata;
2110 }
2111
2112 /* Assign useful pointers to the driver state */
2113 pl08x->adev = adev;
2114 pl08x->vd = vd;
2115
30749cb4
RKAL
2116 /* By default, AHB1 only. If dualmaster, from platform */
2117 pl08x->lli_buses = PL08X_AHB1;
2118 pl08x->mem_buses = PL08X_AHB1;
2119 if (pl08x->vd->dualmaster) {
2120 pl08x->lli_buses = pl08x->pd->lli_buses;
2121 pl08x->mem_buses = pl08x->pd->mem_buses;
2122 }
2123
da1b6c05
TF
2124 if (vd->pl080s)
2125 pl08x->lli_words = PL080S_LLI_WORDS;
2126 else
2127 pl08x->lli_words = PL080_LLI_WORDS;
ba6785ff
TF
2128 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
2129
e8689e63
LW
2130 /* A DMA memory pool for LLIs, align on 1-byte boundary */
2131 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
ba6785ff 2132 tsfr_size, PL08X_ALIGN, 0);
e8689e63
LW
2133 if (!pl08x->pool) {
2134 ret = -ENOMEM;
2135 goto out_no_lli_pool;
2136 }
2137
e8689e63
LW
2138 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
2139 if (!pl08x->base) {
2140 ret = -ENOMEM;
2141 goto out_no_ioremap;
2142 }
2143
2144 /* Turn on the PL08x */
2145 pl08x_ensure_on(pl08x);
2146
94ae8522 2147 /* Attach the interrupt handler */
e8689e63
LW
2148 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2149 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2150
174b537a 2151 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
e8689e63
LW
2152 if (ret) {
2153 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2154 __func__, adev->irq[0]);
2155 goto out_no_irq;
2156 }
2157
2158 /* Initialize physical channels */
affa115e 2159 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
e8689e63
LW
2160 GFP_KERNEL);
2161 if (!pl08x->phy_chans) {
2162 dev_err(&adev->dev, "%s failed to allocate "
2163 "physical channel holders\n",
2164 __func__);
983d7beb 2165 ret = -ENOMEM;
e8689e63
LW
2166 goto out_no_phychans;
2167 }
2168
2169 for (i = 0; i < vd->channels; i++) {
2170 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
2171
2172 ch->id = i;
2173 ch->base = pl08x->base + PL080_Cx_BASE(i);
d86ccea7 2174 ch->reg_config = ch->base + vd->config_offset;
e8689e63 2175 spin_lock_init(&ch->lock);
affa115e
LW
2176
2177 /*
2178 * Nomadik variants can have channels that are locked
2179 * down for the secure world only. Lock up these channels
2180 * by perpetually serving a dummy virtual channel.
2181 */
2182 if (vd->nomadik) {
2183 u32 val;
2184
d86ccea7 2185 val = readl(ch->reg_config);
affa115e
LW
2186 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
2187 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
2188 ch->locked = true;
2189 }
2190 }
2191
175a5e61
VK
2192 dev_dbg(&adev->dev, "physical channel %d is %s\n",
2193 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
e8689e63
LW
2194 }
2195
2196 /* Register as many memcpy channels as there are physical channels */
2197 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
2198 pl08x->vd->channels, false);
2199 if (ret <= 0) {
2200 dev_warn(&pl08x->adev->dev,
2201 "%s failed to enumerate memcpy channels - %d\n",
2202 __func__, ret);
2203 goto out_no_memcpy;
2204 }
e8689e63
LW
2205
2206 /* Register slave channels */
2207 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
3e27ee84 2208 pl08x->pd->num_slave_channels, true);
1080411c 2209 if (ret < 0) {
e8689e63
LW
2210 dev_warn(&pl08x->adev->dev,
2211 "%s failed to enumerate slave channels - %d\n",
2212 __func__, ret);
2213 goto out_no_slave;
2214 }
e8689e63
LW
2215
2216 ret = dma_async_device_register(&pl08x->memcpy);
2217 if (ret) {
2218 dev_warn(&pl08x->adev->dev,
2219 "%s failed to register memcpy as an async device - %d\n",
2220 __func__, ret);
2221 goto out_no_memcpy_reg;
2222 }
2223
2224 ret = dma_async_device_register(&pl08x->slave);
2225 if (ret) {
2226 dev_warn(&pl08x->adev->dev,
2227 "%s failed to register slave as an async device - %d\n",
2228 __func__, ret);
2229 goto out_no_slave_reg;
2230 }
2231
2232 amba_set_drvdata(adev, pl08x);
2233 init_pl08x_debugfs(pl08x);
da1b6c05
TF
2234 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
2235 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
b05cd8f4 2236 (unsigned long long)adev->res.start, adev->irq[0]);
b7b6018b 2237
e8689e63
LW
2238 return 0;
2239
2240out_no_slave_reg:
2241 dma_async_device_unregister(&pl08x->memcpy);
2242out_no_memcpy_reg:
2243 pl08x_free_virtual_channels(&pl08x->slave);
2244out_no_slave:
2245 pl08x_free_virtual_channels(&pl08x->memcpy);
2246out_no_memcpy:
2247 kfree(pl08x->phy_chans);
2248out_no_phychans:
2249 free_irq(adev->irq[0], pl08x);
2250out_no_irq:
2251 iounmap(pl08x->base);
2252out_no_ioremap:
2253 dma_pool_destroy(pl08x->pool);
2254out_no_lli_pool:
2255out_no_platdata:
2256 kfree(pl08x);
2257out_no_pl08x:
2258 amba_release_regions(adev);
2259 return ret;
2260}
2261
2262/* PL080 has 8 channels and the PL080 have just 2 */
2263static struct vendor_data vendor_pl080 = {
d86ccea7 2264 .config_offset = PL080_CH_CONFIG,
e8689e63
LW
2265 .channels = 8,
2266 .dualmaster = true,
5110e51d 2267 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
e8689e63
LW
2268};
2269
affa115e 2270static struct vendor_data vendor_nomadik = {
d86ccea7 2271 .config_offset = PL080_CH_CONFIG,
affa115e
LW
2272 .channels = 8,
2273 .dualmaster = true,
2274 .nomadik = true,
5110e51d 2275 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
affa115e
LW
2276};
2277
da1b6c05
TF
2278static struct vendor_data vendor_pl080s = {
2279 .config_offset = PL080S_CH_CONFIG,
2280 .channels = 8,
2281 .pl080s = true,
5110e51d 2282 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
affa115e
LW
2283};
2284
e8689e63 2285static struct vendor_data vendor_pl081 = {
d86ccea7 2286 .config_offset = PL080_CH_CONFIG,
e8689e63
LW
2287 .channels = 2,
2288 .dualmaster = false,
5110e51d 2289 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
e8689e63
LW
2290};
2291
2292static struct amba_id pl08x_ids[] = {
da1b6c05
TF
2293 /* Samsung PL080S variant */
2294 {
2295 .id = 0x0a141080,
2296 .mask = 0xffffffff,
2297 .data = &vendor_pl080s,
2298 },
e8689e63
LW
2299 /* PL080 */
2300 {
2301 .id = 0x00041080,
2302 .mask = 0x000fffff,
2303 .data = &vendor_pl080,
2304 },
2305 /* PL081 */
2306 {
2307 .id = 0x00041081,
2308 .mask = 0x000fffff,
2309 .data = &vendor_pl081,
2310 },
2311 /* Nomadik 8815 PL080 variant */
2312 {
affa115e 2313 .id = 0x00280080,
e8689e63 2314 .mask = 0x00ffffff,
affa115e 2315 .data = &vendor_nomadik,
e8689e63
LW
2316 },
2317 { 0, 0 },
2318};
2319
037566df
DM
2320MODULE_DEVICE_TABLE(amba, pl08x_ids);
2321
e8689e63
LW
2322static struct amba_driver pl08x_amba_driver = {
2323 .drv.name = DRIVER_NAME,
2324 .id_table = pl08x_ids,
2325 .probe = pl08x_probe,
2326};
2327
2328static int __init pl08x_init(void)
2329{
2330 int retval;
2331 retval = amba_driver_register(&pl08x_amba_driver);
2332 if (retval)
2333 printk(KERN_WARNING DRIVER_NAME
e8b5e11d 2334 "failed to register as an AMBA device (%d)\n",
e8689e63
LW
2335 retval);
2336 return retval;
2337}
2338subsys_initcall(pl08x_init);
This page took 0.336468 seconds and 5 git commands to generate.