FIRMWARE: bcm47xx_nvram: Fix module license.
[deliverable/linux.git] / drivers / dma / amba-pl08x.c
CommitLineData
e8689e63
LW
1/*
2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
4 *
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
94ae8522
RKAL
18 * The full GNU General Public License is in this distribution in the file
19 * called COPYING.
e8689e63
LW
20 *
21 * Documentation: ARM DDI 0196G == PL080
94ae8522 22 * Documentation: ARM DDI 0218E == PL081
da1b6c05 23 * Documentation: S3C6410 User's Manual == PL080S
e8689e63 24 *
94ae8522
RKAL
25 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
26 * channel.
e8689e63
LW
27 *
28 * The PL080 has 8 channels available for simultaneous use, and the PL081
29 * has only two channels. So on these DMA controllers the number of channels
30 * and the number of incoming DMA signals are two totally different things.
31 * It is usually not possible to theoretically handle all physical signals,
32 * so a multiplexing scheme with possible denial of use is necessary.
33 *
34 * The PL080 has a dual bus master, PL081 has a single master.
35 *
da1b6c05
TF
36 * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
37 * It differs in following aspects:
38 * - CH_CONFIG register at different offset,
39 * - separate CH_CONTROL2 register for transfer size,
40 * - bigger maximum transfer size,
41 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
42 * - no support for peripheral flow control.
43 *
e8689e63
LW
44 * Memory to peripheral transfer may be visualized as
45 * Get data from memory to DMAC
46 * Until no data left
47 * On burst request from peripheral
48 * Destination burst from DMAC to peripheral
49 * Clear burst request
50 * Raise terminal count interrupt
51 *
52 * For peripherals with a FIFO:
53 * Source burst size == half the depth of the peripheral FIFO
54 * Destination burst size == the depth of the peripheral FIFO
55 *
56 * (Bursts are irrelevant for mem to mem transfers - there are no burst
57 * signals, the DMA controller will simply facilitate its AHB master.)
58 *
59 * ASSUMES default (little) endianness for DMA transfers
60 *
9dc2c200
RKAL
61 * The PL08x has two flow control settings:
62 * - DMAC flow control: the transfer size defines the number of transfers
63 * which occur for the current LLI entry, and the DMAC raises TC at the
64 * end of every LLI entry. Observed behaviour shows the DMAC listening
65 * to both the BREQ and SREQ signals (contrary to documented),
66 * transferring data if either is active. The LBREQ and LSREQ signals
67 * are ignored.
68 *
69 * - Peripheral flow control: the transfer size is ignored (and should be
70 * zero). The data is transferred from the current LLI entry, until
71 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
da1b6c05 72 * will then move to the next LLI entry. Unsupported by PL080S.
e8689e63 73 */
730404ac 74#include <linux/amba/bus.h>
e8689e63
LW
75#include <linux/amba/pl08x.h>
76#include <linux/debugfs.h>
0c38d701
VK
77#include <linux/delay.h>
78#include <linux/device.h>
79#include <linux/dmaengine.h>
80#include <linux/dmapool.h>
8516f52f 81#include <linux/dma-mapping.h>
6d05c9fa 82#include <linux/export.h>
0c38d701
VK
83#include <linux/init.h>
84#include <linux/interrupt.h>
85#include <linux/module.h>
b7b6018b 86#include <linux/pm_runtime.h>
e8689e63 87#include <linux/seq_file.h>
0c38d701 88#include <linux/slab.h>
3a95b9fb 89#include <linux/amba/pl080.h>
e8689e63 90
d2ebfb33 91#include "dmaengine.h"
01d8dc64 92#include "virt-dma.h"
d2ebfb33 93
e8689e63
LW
94#define DRIVER_NAME "pl08xdmac"
95
ea524c7e
MB
96#define PL80X_DMA_BUSWIDTHS \
97 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
98 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
99 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
100 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
101
7703eac9 102static struct amba_driver pl08x_amba_driver;
b23f204c 103struct pl08x_driver_data;
7703eac9 104
e8689e63 105/**
94ae8522 106 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
e8689e63 107 * @channels: the number of channels available in this variant
94ae8522 108 * @dualmaster: whether this version supports dual AHB masters or not.
affa115e
LW
109 * @nomadik: whether the channels have Nomadik security extension bits
110 * that need to be checked for permission before use and some registers are
111 * missing
da1b6c05
TF
112 * @pl080s: whether this version is a PL080S, which has separate register and
113 * LLI word for transfer size.
e8689e63
LW
114 */
115struct vendor_data {
d86ccea7 116 u8 config_offset;
e8689e63
LW
117 u8 channels;
118 bool dualmaster;
affa115e 119 bool nomadik;
da1b6c05 120 bool pl080s;
5110e51d 121 u32 max_transfer_size;
e8689e63
LW
122};
123
b23f204c
RK
124/**
125 * struct pl08x_bus_data - information of source or destination
126 * busses for a transfer
127 * @addr: current address
128 * @maxwidth: the maximum width of a transfer on this bus
129 * @buswidth: the width of this bus in bytes: 1, 2 or 4
130 */
131struct pl08x_bus_data {
132 dma_addr_t addr;
133 u8 maxwidth;
134 u8 buswidth;
135};
136
1c38b289
AP
137#define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
138
b23f204c
RK
139/**
140 * struct pl08x_phy_chan - holder for the physical channels
141 * @id: physical index to this channel
142 * @lock: a lock to use when altering an instance of this struct
b23f204c
RK
143 * @serving: the virtual channel currently being served by this physical
144 * channel
ad0de2ac
RK
145 * @locked: channel unavailable for the system, e.g. dedicated to secure
146 * world
b23f204c
RK
147 */
148struct pl08x_phy_chan {
149 unsigned int id;
150 void __iomem *base;
d86ccea7 151 void __iomem *reg_config;
b23f204c 152 spinlock_t lock;
b23f204c 153 struct pl08x_dma_chan *serving;
ad0de2ac 154 bool locked;
b23f204c
RK
155};
156
157/**
158 * struct pl08x_sg - structure containing data per sg
159 * @src_addr: src address of sg
160 * @dst_addr: dst address of sg
161 * @len: transfer len in bytes
162 * @node: node for txd's dsg_list
163 */
164struct pl08x_sg {
165 dma_addr_t src_addr;
166 dma_addr_t dst_addr;
167 size_t len;
168 struct list_head node;
169};
170
171/**
172 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
01d8dc64 173 * @vd: virtual DMA descriptor
b23f204c 174 * @dsg_list: list of children sg's
b23f204c
RK
175 * @llis_bus: DMA memory address (physical) start for the LLIs
176 * @llis_va: virtual memory address start for the LLIs
177 * @cctl: control reg values for current txd
178 * @ccfg: config reg values for current txd
18536134
RK
179 * @done: this marks completed descriptors, which should not have their
180 * mux released.
3b24c20b 181 * @cyclic: indicate cyclic transfers
b23f204c
RK
182 */
183struct pl08x_txd {
01d8dc64 184 struct virt_dma_desc vd;
b23f204c 185 struct list_head dsg_list;
b23f204c 186 dma_addr_t llis_bus;
ba6785ff 187 u32 *llis_va;
b23f204c
RK
188 /* Default cctl value for LLIs */
189 u32 cctl;
190 /*
191 * Settings to be put into the physical channel when we
192 * trigger this txd. Other registers are in llis_va[0].
193 */
194 u32 ccfg;
18536134 195 bool done;
3b24c20b 196 bool cyclic;
b23f204c
RK
197};
198
199/**
200 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
201 * states
202 * @PL08X_CHAN_IDLE: the channel is idle
203 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
204 * channel and is running a transfer on it
205 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
206 * channel, but the transfer is currently paused
207 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
208 * channel to become available (only pertains to memcpy channels)
209 */
210enum pl08x_dma_chan_state {
211 PL08X_CHAN_IDLE,
212 PL08X_CHAN_RUNNING,
213 PL08X_CHAN_PAUSED,
214 PL08X_CHAN_WAITING,
215};
216
217/**
218 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
01d8dc64 219 * @vc: wrappped virtual channel
b23f204c 220 * @phychan: the physical channel utilized by this channel, if there is one
b23f204c
RK
221 * @name: name of channel
222 * @cd: channel platform data
223 * @runtime_addr: address for RX/TX according to the runtime config
b23f204c
RK
224 * @at: active transaction on this channel
225 * @lock: a lock for this channel data
226 * @host: a pointer to the host (internal use)
227 * @state: whether the channel is idle, paused, running etc
228 * @slave: whether this channel is a device (slave) or for memcpy
ad0de2ac 229 * @signal: the physical DMA request signal which this channel is using
5e2479bd 230 * @mux_use: count of descriptors using this DMA request signal setting
b23f204c
RK
231 */
232struct pl08x_dma_chan {
01d8dc64 233 struct virt_dma_chan vc;
b23f204c 234 struct pl08x_phy_chan *phychan;
550ec36f 235 const char *name;
b23f204c 236 const struct pl08x_channel_data *cd;
ed91c13d 237 struct dma_slave_config cfg;
b23f204c 238 struct pl08x_txd *at;
b23f204c
RK
239 struct pl08x_driver_data *host;
240 enum pl08x_dma_chan_state state;
241 bool slave;
ad0de2ac 242 int signal;
5e2479bd 243 unsigned mux_use;
b23f204c
RK
244};
245
e8689e63
LW
246/**
247 * struct pl08x_driver_data - the local state holder for the PL08x
248 * @slave: slave engine for this instance
249 * @memcpy: memcpy engine for this instance
250 * @base: virtual memory base (remapped) for the PL08x
251 * @adev: the corresponding AMBA (PrimeCell) bus entry
252 * @vd: vendor data for this PL08x variant
253 * @pd: platform data passed in from the platform/machine
254 * @phy_chans: array of data for the physical channels
255 * @pool: a pool for the LLI descriptors
3e27ee84
VK
256 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
257 * fetches
30749cb4 258 * @mem_buses: set to indicate memory transfers on AHB2.
e8689e63
LW
259 * @lock: a spinlock for this struct
260 */
261struct pl08x_driver_data {
262 struct dma_device slave;
263 struct dma_device memcpy;
264 void __iomem *base;
265 struct amba_device *adev;
f96ca9ec 266 const struct vendor_data *vd;
e8689e63
LW
267 struct pl08x_platform_data *pd;
268 struct pl08x_phy_chan *phy_chans;
269 struct dma_pool *pool;
30749cb4
RKAL
270 u8 lli_buses;
271 u8 mem_buses;
ba6785ff 272 u8 lli_words;
e8689e63
LW
273};
274
275/*
276 * PL08X specific defines
277 */
278
ba6785ff
TF
279/* The order of words in an LLI. */
280#define PL080_LLI_SRC 0
281#define PL080_LLI_DST 1
282#define PL080_LLI_LLI 2
283#define PL080_LLI_CCTL 3
da1b6c05 284#define PL080S_LLI_CCTL2 4
ba6785ff
TF
285
286/* Total words in an LLI. */
287#define PL080_LLI_WORDS 4
da1b6c05 288#define PL080S_LLI_WORDS 8
e8689e63 289
ba6785ff
TF
290/*
291 * Number of LLIs in each LLI buffer allocated for one transfer
292 * (maximum times we call dma_pool_alloc on this pool without freeing)
293 */
294#define MAX_NUM_TSFR_LLIS 512
e8689e63
LW
295#define PL08X_ALIGN 8
296
297static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
298{
01d8dc64 299 return container_of(chan, struct pl08x_dma_chan, vc.chan);
e8689e63
LW
300}
301
501e67e8
RKAL
302static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
303{
01d8dc64 304 return container_of(tx, struct pl08x_txd, vd.tx);
501e67e8
RKAL
305}
306
6b16c8b1
RK
307/*
308 * Mux handling.
309 *
310 * This gives us the DMA request input to the PL08x primecell which the
311 * peripheral described by the channel data will be routed to, possibly
312 * via a board/SoC specific external MUX. One important point to note
313 * here is that this does not depend on the physical channel.
314 */
ad0de2ac 315static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
6b16c8b1
RK
316{
317 const struct pl08x_platform_data *pd = plchan->host->pd;
318 int ret;
319
d7cabeed
MB
320 if (plchan->mux_use++ == 0 && pd->get_xfer_signal) {
321 ret = pd->get_xfer_signal(plchan->cd);
5e2479bd
RK
322 if (ret < 0) {
323 plchan->mux_use = 0;
6b16c8b1 324 return ret;
5e2479bd 325 }
6b16c8b1 326
ad0de2ac 327 plchan->signal = ret;
6b16c8b1
RK
328 }
329 return 0;
330}
331
332static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
333{
334 const struct pl08x_platform_data *pd = plchan->host->pd;
335
5e2479bd
RK
336 if (plchan->signal >= 0) {
337 WARN_ON(plchan->mux_use == 0);
338
d7cabeed
MB
339 if (--plchan->mux_use == 0 && pd->put_xfer_signal) {
340 pd->put_xfer_signal(plchan->cd, plchan->signal);
5e2479bd
RK
341 plchan->signal = -1;
342 }
6b16c8b1
RK
343 }
344}
345
e8689e63
LW
346/*
347 * Physical channel handling
348 */
349
350/* Whether a certain channel is busy or not */
351static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
352{
353 unsigned int val;
354
d86ccea7 355 val = readl(ch->reg_config);
e8689e63
LW
356 return val & PL080_CONFIG_ACTIVE;
357}
358
ba6785ff
TF
359static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
360 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
361{
da1b6c05
TF
362 if (pl08x->vd->pl080s)
363 dev_vdbg(&pl08x->adev->dev,
364 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
365 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
366 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
367 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
368 lli[PL080S_LLI_CCTL2], ccfg);
369 else
370 dev_vdbg(&pl08x->adev->dev,
371 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
372 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
373 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
374 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
ba6785ff
TF
375
376 writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
377 writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
378 writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
379 writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
380
da1b6c05
TF
381 if (pl08x->vd->pl080s)
382 writel_relaxed(lli[PL080S_LLI_CCTL2],
383 phychan->base + PL080S_CH_CONTROL2);
384
ba6785ff
TF
385 writel(ccfg, phychan->reg_config);
386}
387
e8689e63
LW
388/*
389 * Set the initial DMA register values i.e. those for the first LLI
e8b5e11d 390 * The next LLI pointer and the configuration interrupt bit have
c885bee4
RKAL
391 * been set when the LLIs were constructed. Poke them into the hardware
392 * and start the transfer.
e8689e63 393 */
eab82533 394static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
e8689e63 395{
c885bee4 396 struct pl08x_driver_data *pl08x = plchan->host;
e8689e63 397 struct pl08x_phy_chan *phychan = plchan->phychan;
879f127b
RK
398 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
399 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
09b3c323 400 u32 val;
c885bee4 401
879f127b 402 list_del(&txd->vd.node);
eab82533 403
c885bee4 404 plchan->at = txd;
e8689e63 405
c885bee4
RKAL
406 /* Wait for channel inactive */
407 while (pl08x_phy_channel_busy(phychan))
408 cpu_relax();
e8689e63 409
ba6785ff 410 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
c885bee4
RKAL
411
412 /* Enable the DMA channel */
413 /* Do not access config register until channel shows as disabled */
414 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
19386b32 415 cpu_relax();
e8689e63 416
c885bee4 417 /* Do not access config register until channel shows as inactive */
d86ccea7 418 val = readl(phychan->reg_config);
e8689e63 419 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
d86ccea7 420 val = readl(phychan->reg_config);
e8689e63 421
d86ccea7 422 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
e8689e63
LW
423}
424
425/*
81796616 426 * Pause the channel by setting the HALT bit.
e8689e63 427 *
81796616
RKAL
428 * For M->P transfers, pause the DMAC first and then stop the peripheral -
429 * the FIFO can only drain if the peripheral is still requesting data.
430 * (note: this can still timeout if the DMAC FIFO never drains of data.)
e8689e63 431 *
81796616
RKAL
432 * For P->M transfers, disable the peripheral first to stop it filling
433 * the DMAC FIFO, and then pause the DMAC.
e8689e63
LW
434 */
435static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
436{
437 u32 val;
81796616 438 int timeout;
e8689e63
LW
439
440 /* Set the HALT bit and wait for the FIFO to drain */
d86ccea7 441 val = readl(ch->reg_config);
e8689e63 442 val |= PL080_CONFIG_HALT;
d86ccea7 443 writel(val, ch->reg_config);
e8689e63
LW
444
445 /* Wait for channel inactive */
81796616
RKAL
446 for (timeout = 1000; timeout; timeout--) {
447 if (!pl08x_phy_channel_busy(ch))
448 break;
449 udelay(1);
450 }
451 if (pl08x_phy_channel_busy(ch))
452 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
e8689e63
LW
453}
454
455static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
456{
457 u32 val;
458
459 /* Clear the HALT bit */
d86ccea7 460 val = readl(ch->reg_config);
e8689e63 461 val &= ~PL080_CONFIG_HALT;
d86ccea7 462 writel(val, ch->reg_config);
e8689e63
LW
463}
464
fb526210
RKAL
465/*
466 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
467 * clears any pending interrupt status. This should not be used for
468 * an on-going transfer, but as a method of shutting down a channel
469 * (eg, when it's no longer used) or terminating a transfer.
470 */
471static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
472 struct pl08x_phy_chan *ch)
e8689e63 473{
d86ccea7 474 u32 val = readl(ch->reg_config);
e8689e63 475
fb526210 476 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
5835aa86 477 PL080_CONFIG_TC_IRQ_MASK);
e8689e63 478
d86ccea7 479 writel(val, ch->reg_config);
fb526210
RKAL
480
481 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
482 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
e8689e63
LW
483}
484
485static inline u32 get_bytes_in_cctl(u32 cctl)
486{
487 /* The source width defines the number of bytes */
488 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
489
f3287a52
AB
490 cctl &= PL080_CONTROL_SWIDTH_MASK;
491
e8689e63
LW
492 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
493 case PL080_WIDTH_8BIT:
494 break;
495 case PL080_WIDTH_16BIT:
496 bytes *= 2;
497 break;
498 case PL080_WIDTH_32BIT:
499 bytes *= 4;
500 break;
501 }
502 return bytes;
503}
504
da1b6c05
TF
505static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
506{
507 /* The source width defines the number of bytes */
508 u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
509
f3287a52
AB
510 cctl &= PL080_CONTROL_SWIDTH_MASK;
511
e8689e63
LW
512 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
513 case PL080_WIDTH_8BIT:
514 break;
515 case PL080_WIDTH_16BIT:
516 bytes *= 2;
517 break;
518 case PL080_WIDTH_32BIT:
519 bytes *= 4;
520 break;
521 }
522 return bytes;
523}
524
525/* The channel should be paused when calling this */
526static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
527{
ba6785ff
TF
528 struct pl08x_driver_data *pl08x = plchan->host;
529 const u32 *llis_va, *llis_va_limit;
e8689e63 530 struct pl08x_phy_chan *ch;
68a7faa2 531 dma_addr_t llis_bus;
e8689e63 532 struct pl08x_txd *txd;
ba6785ff 533 u32 llis_max_words;
68a7faa2 534 size_t bytes;
68a7faa2 535 u32 clli;
e8689e63 536
e8689e63
LW
537 ch = plchan->phychan;
538 txd = plchan->at;
539
68a7faa2
TF
540 if (!ch || !txd)
541 return 0;
542
e8689e63 543 /*
db9f136a
RKAL
544 * Follow the LLIs to get the number of remaining
545 * bytes in the currently active transaction.
e8689e63 546 */
68a7faa2 547 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
e8689e63 548
68a7faa2 549 /* First get the remaining bytes in the active transfer */
da1b6c05
TF
550 if (pl08x->vd->pl080s)
551 bytes = get_bytes_in_cctl_pl080s(
552 readl(ch->base + PL080_CH_CONTROL),
553 readl(ch->base + PL080S_CH_CONTROL2));
554 else
e8689e63
LW
555 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
556
68a7faa2
TF
557 if (!clli)
558 return bytes;
db9f136a 559
68a7faa2
TF
560 llis_va = txd->llis_va;
561 llis_bus = txd->llis_bus;
e8689e63 562
ba6785ff 563 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
68a7faa2 564 BUG_ON(clli < llis_bus || clli >= llis_bus +
ba6785ff 565 sizeof(u32) * llis_max_words);
db9f136a 566
68a7faa2
TF
567 /*
568 * Locate the next LLI - as this is an array,
569 * it's simple maths to find.
570 */
ba6785ff 571 llis_va += (clli - llis_bus) / sizeof(u32);
e8689e63 572
ba6785ff
TF
573 llis_va_limit = llis_va + llis_max_words;
574
575 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
da1b6c05
TF
576 if (pl08x->vd->pl080s)
577 bytes += get_bytes_in_cctl_pl080s(
578 llis_va[PL080_LLI_CCTL],
579 llis_va[PL080S_LLI_CCTL2]);
580 else
581 bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
68a7faa2
TF
582
583 /*
3b24c20b 584 * A LLI pointer going backward terminates the LLI list
68a7faa2 585 */
3b24c20b 586 if (llis_va[PL080_LLI_LLI] <= clli)
68a7faa2 587 break;
e8689e63
LW
588 }
589
e8689e63
LW
590 return bytes;
591}
592
593/*
594 * Allocate a physical channel for a virtual channel
94ae8522
RKAL
595 *
596 * Try to locate a physical channel to be used for this transfer. If all
597 * are taken return NULL and the requester will have to cope by using
598 * some fallback PIO mode or retrying later.
e8689e63
LW
599 */
600static struct pl08x_phy_chan *
601pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
602 struct pl08x_dma_chan *virt_chan)
603{
604 struct pl08x_phy_chan *ch = NULL;
605 unsigned long flags;
606 int i;
607
e8689e63
LW
608 for (i = 0; i < pl08x->vd->channels; i++) {
609 ch = &pl08x->phy_chans[i];
610
611 spin_lock_irqsave(&ch->lock, flags);
612
affa115e 613 if (!ch->locked && !ch->serving) {
e8689e63 614 ch->serving = virt_chan;
e8689e63
LW
615 spin_unlock_irqrestore(&ch->lock, flags);
616 break;
617 }
618
619 spin_unlock_irqrestore(&ch->lock, flags);
620 }
621
622 if (i == pl08x->vd->channels) {
623 /* No physical channel available, cope with it */
624 return NULL;
625 }
626
627 return ch;
628}
629
a5a488db 630/* Mark the physical channel as free. Note, this write is atomic. */
e8689e63
LW
631static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
632 struct pl08x_phy_chan *ch)
633{
a5a488db
RK
634 ch->serving = NULL;
635}
e8689e63 636
a5a488db
RK
637/*
638 * Try to allocate a physical channel. When successful, assign it to
639 * this virtual channel, and initiate the next descriptor. The
640 * virtual channel lock must be held at this point.
641 */
642static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
643{
644 struct pl08x_driver_data *pl08x = plchan->host;
645 struct pl08x_phy_chan *ch;
fb526210 646
a5a488db
RK
647 ch = pl08x_get_phy_channel(pl08x, plchan);
648 if (!ch) {
649 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
650 plchan->state = PL08X_CHAN_WAITING;
651 return;
652 }
e8689e63 653
a5a488db
RK
654 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
655 ch->id, plchan->name);
656
657 plchan->phychan = ch;
658 plchan->state = PL08X_CHAN_RUNNING;
659 pl08x_start_next_txd(plchan);
660}
661
662static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
663 struct pl08x_dma_chan *plchan)
664{
665 struct pl08x_driver_data *pl08x = plchan->host;
666
667 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
668 ch->id, plchan->name);
669
670 /*
671 * We do this without taking the lock; we're really only concerned
672 * about whether this pointer is NULL or not, and we're guaranteed
673 * that this will only be called when it _already_ is non-NULL.
674 */
675 ch->serving = plchan;
676 plchan->phychan = ch;
677 plchan->state = PL08X_CHAN_RUNNING;
678 pl08x_start_next_txd(plchan);
679}
680
681/*
682 * Free a physical DMA channel, potentially reallocating it to another
683 * virtual channel if we have any pending.
684 */
685static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
686{
687 struct pl08x_driver_data *pl08x = plchan->host;
688 struct pl08x_dma_chan *p, *next;
689
690 retry:
691 next = NULL;
692
693 /* Find a waiting virtual channel for the next transfer. */
01d8dc64 694 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
a5a488db
RK
695 if (p->state == PL08X_CHAN_WAITING) {
696 next = p;
697 break;
698 }
699
700 if (!next) {
01d8dc64 701 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
a5a488db
RK
702 if (p->state == PL08X_CHAN_WAITING) {
703 next = p;
704 break;
705 }
706 }
707
708 /* Ensure that the physical channel is stopped */
709 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
710
711 if (next) {
712 bool success;
713
714 /*
715 * Eww. We know this isn't going to deadlock
716 * but lockdep probably doesn't.
717 */
083be28a 718 spin_lock(&next->vc.lock);
a5a488db
RK
719 /* Re-check the state now that we have the lock */
720 success = next->state == PL08X_CHAN_WAITING;
721 if (success)
722 pl08x_phy_reassign_start(plchan->phychan, next);
083be28a 723 spin_unlock(&next->vc.lock);
a5a488db
RK
724
725 /* If the state changed, try to find another channel */
726 if (!success)
727 goto retry;
728 } else {
729 /* No more jobs, so free up the physical channel */
730 pl08x_put_phy_channel(pl08x, plchan->phychan);
731 }
732
733 plchan->phychan = NULL;
734 plchan->state = PL08X_CHAN_IDLE;
e8689e63
LW
735}
736
737/*
738 * LLI handling
739 */
740
741static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
742{
743 switch (coded) {
744 case PL080_WIDTH_8BIT:
745 return 1;
746 case PL080_WIDTH_16BIT:
747 return 2;
748 case PL080_WIDTH_32BIT:
749 return 4;
750 default:
751 break;
752 }
753 BUG();
754 return 0;
755}
756
757static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
cace6585 758 size_t tsize)
e8689e63
LW
759{
760 u32 retbits = cctl;
761
e8b5e11d 762 /* Remove all src, dst and transfer size bits */
e8689e63
LW
763 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
764 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
765 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
766
767 /* Then set the bits according to the parameters */
768 switch (srcwidth) {
769 case 1:
770 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
771 break;
772 case 2:
773 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
774 break;
775 case 4:
776 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
777 break;
778 default:
779 BUG();
780 break;
781 }
782
783 switch (dstwidth) {
784 case 1:
785 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
786 break;
787 case 2:
788 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
789 break;
790 case 4:
791 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
792 break;
793 default:
794 BUG();
795 break;
796 }
797
5110e51d 798 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
e8689e63
LW
799 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
800 return retbits;
801}
802
542361f8
RKAL
803struct pl08x_lli_build_data {
804 struct pl08x_txd *txd;
542361f8
RKAL
805 struct pl08x_bus_data srcbus;
806 struct pl08x_bus_data dstbus;
807 size_t remainder;
25c94f7f 808 u32 lli_bus;
542361f8
RKAL
809};
810
e8689e63 811/*
0532e6fc
VK
812 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
813 * victim in case src & dest are not similarly aligned. i.e. If after aligning
814 * masters address with width requirements of transfer (by sending few byte by
815 * byte data), slave is still not aligned, then its width will be reduced to
816 * BYTE.
817 * - prefers the destination bus if both available
036f05fd 818 * - prefers bus with fixed address (i.e. peripheral)
e8689e63 819 */
542361f8
RKAL
820static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
821 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
e8689e63
LW
822{
823 if (!(cctl & PL080_CONTROL_DST_INCR)) {
542361f8
RKAL
824 *mbus = &bd->dstbus;
825 *sbus = &bd->srcbus;
036f05fd
VK
826 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
827 *mbus = &bd->srcbus;
828 *sbus = &bd->dstbus;
e8689e63 829 } else {
036f05fd 830 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
542361f8
RKAL
831 *mbus = &bd->dstbus;
832 *sbus = &bd->srcbus;
036f05fd 833 } else {
542361f8
RKAL
834 *mbus = &bd->srcbus;
835 *sbus = &bd->dstbus;
e8689e63
LW
836 }
837 }
838}
839
840/*
94ae8522 841 * Fills in one LLI for a certain transfer descriptor and advance the counter
e8689e63 842 */
ba6785ff
TF
843static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
844 struct pl08x_lli_build_data *bd,
da1b6c05 845 int num_llis, int len, u32 cctl, u32 cctl2)
e8689e63 846{
ba6785ff
TF
847 u32 offset = num_llis * pl08x->lli_words;
848 u32 *llis_va = bd->txd->llis_va + offset;
542361f8 849 dma_addr_t llis_bus = bd->txd->llis_bus;
e8689e63
LW
850
851 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
852
ba6785ff
TF
853 /* Advance the offset to next LLI. */
854 offset += pl08x->lli_words;
855
856 llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
857 llis_va[PL080_LLI_DST] = bd->dstbus.addr;
858 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
859 llis_va[PL080_LLI_LLI] |= bd->lli_bus;
860 llis_va[PL080_LLI_CCTL] = cctl;
da1b6c05
TF
861 if (pl08x->vd->pl080s)
862 llis_va[PL080S_LLI_CCTL2] = cctl2;
e8689e63
LW
863
864 if (cctl & PL080_CONTROL_SRC_INCR)
542361f8 865 bd->srcbus.addr += len;
e8689e63 866 if (cctl & PL080_CONTROL_DST_INCR)
542361f8 867 bd->dstbus.addr += len;
e8689e63 868
542361f8 869 BUG_ON(bd->remainder < len);
cace6585 870
542361f8 871 bd->remainder -= len;
e8689e63
LW
872}
873
ba6785ff
TF
874static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
875 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
876 int num_llis, size_t *total_bytes)
e8689e63 877{
03af500f 878 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
da1b6c05 879 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
03af500f 880 (*total_bytes) += len;
e8689e63
LW
881}
882
48924e42
TF
883#ifdef VERBOSE_DEBUG
884static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
885 const u32 *llis_va, int num_llis)
886{
887 int i;
888
da1b6c05 889 if (pl08x->vd->pl080s) {
48924e42 890 dev_vdbg(&pl08x->adev->dev,
da1b6c05
TF
891 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
892 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
893 for (i = 0; i < num_llis; i++) {
894 dev_vdbg(&pl08x->adev->dev,
895 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
896 i, llis_va, llis_va[PL080_LLI_SRC],
897 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
898 llis_va[PL080_LLI_CCTL],
899 llis_va[PL080S_LLI_CCTL2]);
900 llis_va += pl08x->lli_words;
901 }
902 } else {
903 dev_vdbg(&pl08x->adev->dev,
904 "%-3s %-9s %-10s %-10s %-10s %s\n",
905 "lli", "", "csrc", "cdst", "clli", "cctl");
906 for (i = 0; i < num_llis; i++) {
907 dev_vdbg(&pl08x->adev->dev,
908 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
909 i, llis_va, llis_va[PL080_LLI_SRC],
910 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
911 llis_va[PL080_LLI_CCTL]);
912 llis_va += pl08x->lli_words;
913 }
48924e42
TF
914 }
915}
916#else
917static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
918 const u32 *llis_va, int num_llis) {}
919#endif
920
e8689e63
LW
921/*
922 * This fills in the table of LLIs for the transfer descriptor
923 * Note that we assume we never have to change the burst sizes
924 * Return 0 for error
925 */
926static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
927 struct pl08x_txd *txd)
928{
e8689e63 929 struct pl08x_bus_data *mbus, *sbus;
542361f8 930 struct pl08x_lli_build_data bd;
e8689e63 931 int num_llis = 0;
03af500f 932 u32 cctl, early_bytes = 0;
b7f69d9d 933 size_t max_bytes_per_lli, total_bytes;
ba6785ff 934 u32 *llis_va, *last_lli;
b7f69d9d 935 struct pl08x_sg *dsg;
e8689e63 936
3e27ee84 937 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
e8689e63
LW
938 if (!txd->llis_va) {
939 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
940 return 0;
941 }
942
542361f8 943 bd.txd = txd;
25c94f7f 944 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
b7f69d9d 945 cctl = txd->cctl;
542361f8 946
e8689e63 947 /* Find maximum width of the source bus */
542361f8 948 bd.srcbus.maxwidth =
e8689e63
LW
949 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
950 PL080_CONTROL_SWIDTH_SHIFT);
951
952 /* Find maximum width of the destination bus */
542361f8 953 bd.dstbus.maxwidth =
e8689e63
LW
954 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
955 PL080_CONTROL_DWIDTH_SHIFT);
956
b7f69d9d
VK
957 list_for_each_entry(dsg, &txd->dsg_list, node) {
958 total_bytes = 0;
959 cctl = txd->cctl;
e8689e63 960
b7f69d9d
VK
961 bd.srcbus.addr = dsg->src_addr;
962 bd.dstbus.addr = dsg->dst_addr;
963 bd.remainder = dsg->len;
964 bd.srcbus.buswidth = bd.srcbus.maxwidth;
965 bd.dstbus.buswidth = bd.dstbus.maxwidth;
e8689e63 966
b7f69d9d 967 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
e8689e63 968
b90ca063
AP
969 dev_vdbg(&pl08x->adev->dev,
970 "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
971 (u64)bd.srcbus.addr,
972 cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
b7f69d9d 973 bd.srcbus.buswidth,
b90ca063
AP
974 (u64)bd.dstbus.addr,
975 cctl & PL080_CONTROL_DST_INCR ? "+" : "",
b7f69d9d
VK
976 bd.dstbus.buswidth,
977 bd.remainder);
978 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
979 mbus == &bd.srcbus ? "src" : "dst",
980 sbus == &bd.srcbus ? "src" : "dst");
fc74eb79 981
b7f69d9d
VK
982 /*
983 * Zero length is only allowed if all these requirements are
984 * met:
985 * - flow controller is peripheral.
986 * - src.addr is aligned to src.width
987 * - dst.addr is aligned to dst.width
988 *
989 * sg_len == 1 should be true, as there can be two cases here:
990 *
991 * - Memory addresses are contiguous and are not scattered.
992 * Here, Only one sg will be passed by user driver, with
993 * memory address and zero length. We pass this to controller
994 * and after the transfer it will receive the last burst
995 * request from peripheral and so transfer finishes.
996 *
997 * - Memory addresses are scattered and are not contiguous.
998 * Here, Obviously as DMA controller doesn't know when a lli's
999 * transfer gets over, it can't load next lli. So in this
1000 * case, there has to be an assumption that only one lli is
1001 * supported. Thus, we can't have scattered addresses.
1002 */
1003 if (!bd.remainder) {
1004 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
1005 PL080_CONFIG_FLOW_CONTROL_SHIFT;
1006 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
0a235657 1007 (fc <= PL080_FLOW_SRC2DST_SRC))) {
b7f69d9d
VK
1008 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
1009 __func__);
1010 return 0;
1011 }
0a235657 1012
1c38b289
AP
1013 if (!IS_BUS_ALIGNED(&bd.srcbus) ||
1014 !IS_BUS_ALIGNED(&bd.dstbus)) {
b7f69d9d
VK
1015 dev_err(&pl08x->adev->dev,
1016 "%s src & dst address must be aligned to src"
1017 " & dst width if peripheral is flow controller",
1018 __func__);
1019 return 0;
1020 }
03af500f 1021
b7f69d9d
VK
1022 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
1023 bd.dstbus.buswidth, 0);
ba6785ff 1024 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
da1b6c05 1025 0, cctl, 0);
b7f69d9d
VK
1026 break;
1027 }
e8689e63
LW
1028
1029 /*
b7f69d9d
VK
1030 * Send byte by byte for following cases
1031 * - Less than a bus width available
1032 * - until master bus is aligned
e8689e63 1033 */
b7f69d9d
VK
1034 if (bd.remainder < mbus->buswidth)
1035 early_bytes = bd.remainder;
1c38b289
AP
1036 else if (!IS_BUS_ALIGNED(mbus)) {
1037 early_bytes = mbus->buswidth -
1038 (mbus->addr & (mbus->buswidth - 1));
b7f69d9d
VK
1039 if ((bd.remainder - early_bytes) < mbus->buswidth)
1040 early_bytes = bd.remainder;
1041 }
e8689e63 1042
b7f69d9d
VK
1043 if (early_bytes) {
1044 dev_vdbg(&pl08x->adev->dev,
6fc8ae78 1045 "%s byte width LLIs (remain 0x%08zx)\n",
b7f69d9d 1046 __func__, bd.remainder);
ba6785ff
TF
1047 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
1048 num_llis++, &total_bytes);
e8689e63
LW
1049 }
1050
b7f69d9d
VK
1051 if (bd.remainder) {
1052 /*
1053 * Master now aligned
1054 * - if slave is not then we must set its width down
1055 */
1c38b289 1056 if (!IS_BUS_ALIGNED(sbus)) {
b7f69d9d
VK
1057 dev_dbg(&pl08x->adev->dev,
1058 "%s set down bus width to one byte\n",
1059 __func__);
fa6a940b 1060
b7f69d9d
VK
1061 sbus->buswidth = 1;
1062 }
e8689e63
LW
1063
1064 /*
b7f69d9d
VK
1065 * Bytes transferred = tsize * src width, not
1066 * MIN(buswidths)
e8689e63 1067 */
b7f69d9d 1068 max_bytes_per_lli = bd.srcbus.buswidth *
5110e51d 1069 pl08x->vd->max_transfer_size;
b7f69d9d
VK
1070 dev_vdbg(&pl08x->adev->dev,
1071 "%s max bytes per lli = %zu\n",
1072 __func__, max_bytes_per_lli);
e8689e63
LW
1073
1074 /*
b7f69d9d
VK
1075 * Make largest possible LLIs until less than one bus
1076 * width left
e8689e63 1077 */
b7f69d9d
VK
1078 while (bd.remainder > (mbus->buswidth - 1)) {
1079 size_t lli_len, tsize, width;
e8689e63 1080
b7f69d9d
VK
1081 /*
1082 * If enough left try to send max possible,
1083 * otherwise try to send the remainder
1084 */
1085 lli_len = min(bd.remainder, max_bytes_per_lli);
16a2e7d3 1086
b7f69d9d
VK
1087 /*
1088 * Check against maximum bus alignment:
1089 * Calculate actual transfer size in relation to
1090 * bus width an get a maximum remainder of the
1091 * highest bus width - 1
1092 */
1093 width = max(mbus->buswidth, sbus->buswidth);
1094 lli_len = (lli_len / width) * width;
1095 tsize = lli_len / bd.srcbus.buswidth;
1096
1097 dev_vdbg(&pl08x->adev->dev,
1098 "%s fill lli with single lli chunk of "
1099 "size 0x%08zx (remainder 0x%08zx)\n",
1100 __func__, lli_len, bd.remainder);
1101
1102 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
16a2e7d3 1103 bd.dstbus.buswidth, tsize);
ba6785ff 1104 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
da1b6c05 1105 lli_len, cctl, tsize);
b7f69d9d
VK
1106 total_bytes += lli_len;
1107 }
e8689e63 1108
b7f69d9d
VK
1109 /*
1110 * Send any odd bytes
1111 */
1112 if (bd.remainder) {
1113 dev_vdbg(&pl08x->adev->dev,
1114 "%s align with boundary, send odd bytes (remain %zu)\n",
1115 __func__, bd.remainder);
ba6785ff
TF
1116 prep_byte_width_lli(pl08x, &bd, &cctl,
1117 bd.remainder, num_llis++, &total_bytes);
b7f69d9d 1118 }
e8689e63 1119 }
16a2e7d3 1120
b7f69d9d
VK
1121 if (total_bytes != dsg->len) {
1122 dev_err(&pl08x->adev->dev,
1123 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
1124 __func__, total_bytes, dsg->len);
1125 return 0;
1126 }
e8689e63 1127
b7f69d9d
VK
1128 if (num_llis >= MAX_NUM_TSFR_LLIS) {
1129 dev_err(&pl08x->adev->dev,
1130 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
ba6785ff 1131 __func__, MAX_NUM_TSFR_LLIS);
b7f69d9d
VK
1132 return 0;
1133 }
e8689e63 1134 }
b58b6b5b
RKAL
1135
1136 llis_va = txd->llis_va;
ba6785ff 1137 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
e8689e63 1138
3b24c20b
AB
1139 if (txd->cyclic) {
1140 /* Link back to the first LLI. */
1141 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
1142 } else {
1143 /* The final LLI terminates the LLI. */
1144 last_lli[PL080_LLI_LLI] = 0;
1145 /* The final LLI element shall also fire an interrupt. */
1146 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
e8689e63 1147 }
e8689e63 1148
48924e42 1149 pl08x_dump_lli(pl08x, llis_va, num_llis);
e8689e63
LW
1150
1151 return num_llis;
1152}
1153
e8689e63
LW
1154static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
1155 struct pl08x_txd *txd)
1156{
b7f69d9d
VK
1157 struct pl08x_sg *dsg, *_dsg;
1158
c1205646
VK
1159 if (txd->llis_va)
1160 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
e8689e63 1161
b7f69d9d
VK
1162 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
1163 list_del(&dsg->node);
1164 kfree(dsg);
1165 }
1166
e8689e63
LW
1167 kfree(txd);
1168}
1169
18536134
RK
1170static void pl08x_desc_free(struct virt_dma_desc *vd)
1171{
1172 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1173 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
18536134 1174
89116bf9 1175 dma_descriptor_unmap(&vd->tx);
18536134
RK
1176 if (!txd->done)
1177 pl08x_release_mux(plchan);
1178
18536134 1179 pl08x_free_txd(plchan->host, txd);
18536134
RK
1180}
1181
e8689e63
LW
1182static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
1183 struct pl08x_dma_chan *plchan)
1184{
ea160561 1185 LIST_HEAD(head);
e8689e63 1186
879f127b 1187 vchan_get_all_descriptors(&plchan->vc, &head);
91998261 1188 vchan_dma_desc_free_list(&plchan->vc, &head);
e8689e63
LW
1189}
1190
1191/*
1192 * The DMA ENGINE API
1193 */
e8689e63
LW
1194static void pl08x_free_chan_resources(struct dma_chan *chan)
1195{
a068682c
RK
1196 /* Ensure all queued descriptors are freed */
1197 vchan_free_chan_resources(to_virt_chan(chan));
e8689e63
LW
1198}
1199
e8689e63
LW
1200static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1201 struct dma_chan *chan, unsigned long flags)
1202{
1203 struct dma_async_tx_descriptor *retval = NULL;
1204
1205 return retval;
1206}
1207
1208/*
94ae8522
RKAL
1209 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1210 * If slaves are relying on interrupts to signal completion this function
1211 * must not be called with interrupts disabled.
e8689e63 1212 */
3e27ee84
VK
1213static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1214 dma_cookie_t cookie, struct dma_tx_state *txstate)
e8689e63
LW
1215{
1216 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
06e885b7
RK
1217 struct virt_dma_desc *vd;
1218 unsigned long flags;
e8689e63 1219 enum dma_status ret;
06e885b7 1220 size_t bytes = 0;
e8689e63 1221
96a2af41 1222 ret = dma_cookie_status(chan, cookie, txstate);
0996e895 1223 if (ret == DMA_COMPLETE)
e8689e63 1224 return ret;
e8689e63 1225
06e885b7
RK
1226 /*
1227 * There's no point calculating the residue if there's
1228 * no txstate to store the value.
1229 */
1230 if (!txstate) {
1231 if (plchan->state == PL08X_CHAN_PAUSED)
1232 ret = DMA_PAUSED;
1233 return ret;
1234 }
1235
1236 spin_lock_irqsave(&plchan->vc.lock, flags);
1237 ret = dma_cookie_status(chan, cookie, txstate);
0996e895 1238 if (ret != DMA_COMPLETE) {
06e885b7
RK
1239 vd = vchan_find_desc(&plchan->vc, cookie);
1240 if (vd) {
1241 /* On the issued list, so hasn't been processed yet */
1242 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1243 struct pl08x_sg *dsg;
1244
1245 list_for_each_entry(dsg, &txd->dsg_list, node)
1246 bytes += dsg->len;
1247 } else {
1248 bytes = pl08x_getbytes_chan(plchan);
1249 }
1250 }
1251 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1252
e8689e63
LW
1253 /*
1254 * This cookie not complete yet
96a2af41 1255 * Get number of bytes left in the active transactions and queue
e8689e63 1256 */
06e885b7 1257 dma_set_residue(txstate, bytes);
e8689e63 1258
06e885b7
RK
1259 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
1260 ret = DMA_PAUSED;
e8689e63
LW
1261
1262 /* Whether waiting or running, we're in progress */
06e885b7 1263 return ret;
e8689e63
LW
1264}
1265
1266/* PrimeCell DMA extension */
1267struct burst_table {
760596c6 1268 u32 burstwords;
e8689e63
LW
1269 u32 reg;
1270};
1271
1272static const struct burst_table burst_sizes[] = {
1273 {
1274 .burstwords = 256,
760596c6 1275 .reg = PL080_BSIZE_256,
e8689e63
LW
1276 },
1277 {
1278 .burstwords = 128,
760596c6 1279 .reg = PL080_BSIZE_128,
e8689e63
LW
1280 },
1281 {
1282 .burstwords = 64,
760596c6 1283 .reg = PL080_BSIZE_64,
e8689e63
LW
1284 },
1285 {
1286 .burstwords = 32,
760596c6 1287 .reg = PL080_BSIZE_32,
e8689e63
LW
1288 },
1289 {
1290 .burstwords = 16,
760596c6 1291 .reg = PL080_BSIZE_16,
e8689e63
LW
1292 },
1293 {
1294 .burstwords = 8,
760596c6 1295 .reg = PL080_BSIZE_8,
e8689e63
LW
1296 },
1297 {
1298 .burstwords = 4,
760596c6 1299 .reg = PL080_BSIZE_4,
e8689e63
LW
1300 },
1301 {
760596c6
RKAL
1302 .burstwords = 0,
1303 .reg = PL080_BSIZE_1,
e8689e63
LW
1304 },
1305};
1306
121c8476
RKAL
1307/*
1308 * Given the source and destination available bus masks, select which
1309 * will be routed to each port. We try to have source and destination
1310 * on separate ports, but always respect the allowable settings.
1311 */
1312static u32 pl08x_select_bus(u8 src, u8 dst)
1313{
1314 u32 cctl = 0;
1315
1316 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1317 cctl |= PL080_CONTROL_DST_AHB2;
1318 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1319 cctl |= PL080_CONTROL_SRC_AHB2;
1320
1321 return cctl;
1322}
1323
f14c426c
RKAL
1324static u32 pl08x_cctl(u32 cctl)
1325{
1326 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1327 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1328 PL080_CONTROL_PROT_MASK);
1329
1330 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1331 return cctl | PL080_CONTROL_PROT_SYS;
1332}
1333
aa88cdaa
RKAL
1334static u32 pl08x_width(enum dma_slave_buswidth width)
1335{
1336 switch (width) {
1337 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1338 return PL080_WIDTH_8BIT;
1339 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1340 return PL080_WIDTH_16BIT;
1341 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1342 return PL080_WIDTH_32BIT;
f32807f1
VK
1343 default:
1344 return ~0;
aa88cdaa 1345 }
aa88cdaa
RKAL
1346}
1347
760596c6
RKAL
1348static u32 pl08x_burst(u32 maxburst)
1349{
1350 int i;
1351
1352 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1353 if (burst_sizes[i].burstwords <= maxburst)
1354 break;
1355
1356 return burst_sizes[i].reg;
1357}
1358
9862ba17
RK
1359static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
1360 enum dma_slave_buswidth addr_width, u32 maxburst)
1361{
1362 u32 width, burst, cctl = 0;
1363
1364 width = pl08x_width(addr_width);
1365 if (width == ~0)
1366 return ~0;
1367
1368 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1369 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
1370
1371 /*
1372 * If this channel will only request single transfers, set this
1373 * down to ONE element. Also select one element if no maxburst
1374 * is specified.
1375 */
1376 if (plchan->cd->single)
1377 maxburst = 1;
1378
1379 burst = pl08x_burst(maxburst);
1380 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1381 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1382
1383 return pl08x_cctl(cctl);
1384}
1385
e8689e63
LW
1386/*
1387 * Slave transactions callback to the slave device to allow
1388 * synchronization of slave DMA signals with the DMAC enable
1389 */
1390static void pl08x_issue_pending(struct dma_chan *chan)
1391{
1392 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
e8689e63
LW
1393 unsigned long flags;
1394
083be28a 1395 spin_lock_irqsave(&plchan->vc.lock, flags);
879f127b 1396 if (vchan_issue_pending(&plchan->vc)) {
a5a488db
RK
1397 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
1398 pl08x_phy_alloc_and_start(plchan);
e8689e63 1399 }
083be28a 1400 spin_unlock_irqrestore(&plchan->vc.lock, flags);
e8689e63
LW
1401}
1402
879f127b 1403static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
ac3cd20d 1404{
b201c111 1405 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
ac3cd20d
RKAL
1406
1407 if (txd) {
b7f69d9d 1408 INIT_LIST_HEAD(&txd->dsg_list);
4983a04f
RKAL
1409
1410 /* Always enable error and terminal interrupts */
1411 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1412 PL080_CONFIG_TC_IRQ_MASK;
ac3cd20d
RKAL
1413 }
1414 return txd;
1415}
1416
e8689e63
LW
1417/*
1418 * Initialize a descriptor to be used by memcpy submit
1419 */
1420static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1421 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1422 size_t len, unsigned long flags)
1423{
1424 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1425 struct pl08x_driver_data *pl08x = plchan->host;
1426 struct pl08x_txd *txd;
b7f69d9d 1427 struct pl08x_sg *dsg;
e8689e63
LW
1428 int ret;
1429
879f127b 1430 txd = pl08x_get_txd(plchan);
e8689e63
LW
1431 if (!txd) {
1432 dev_err(&pl08x->adev->dev,
1433 "%s no memory for descriptor\n", __func__);
1434 return NULL;
1435 }
1436
b7f69d9d
VK
1437 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1438 if (!dsg) {
1439 pl08x_free_txd(pl08x, txd);
1440 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
1441 __func__);
1442 return NULL;
1443 }
1444 list_add_tail(&dsg->node, &txd->dsg_list);
1445
b7f69d9d
VK
1446 dsg->src_addr = src;
1447 dsg->dst_addr = dest;
1448 dsg->len = len;
e8689e63
LW
1449
1450 /* Set platform data for m2m */
4983a04f 1451 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
dc8d5f8d 1452 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
c7da9a56 1453 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
4983a04f 1454
e8689e63 1455 /* Both to be incremented or the code will break */
70b5ed6b 1456 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
c7da9a56 1457
c7da9a56 1458 if (pl08x->vd->dualmaster)
121c8476
RKAL
1459 txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
1460 pl08x->mem_buses);
e8689e63 1461
aa4afb75
RK
1462 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1463 if (!ret) {
1464 pl08x_free_txd(pl08x, txd);
e8689e63 1465 return NULL;
aa4afb75 1466 }
e8689e63 1467
879f127b 1468 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
e8689e63
LW
1469}
1470
3b24c20b
AB
1471static struct pl08x_txd *pl08x_init_txd(
1472 struct dma_chan *chan,
1473 enum dma_transfer_direction direction,
1474 dma_addr_t *slave_addr)
e8689e63
LW
1475{
1476 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1477 struct pl08x_driver_data *pl08x = plchan->host;
1478 struct pl08x_txd *txd;
dc8d5f8d 1479 enum dma_slave_buswidth addr_width;
0a235657 1480 int ret, tmp;
409ec8db 1481 u8 src_buses, dst_buses;
dc8d5f8d 1482 u32 maxburst, cctl;
e8689e63 1483
879f127b 1484 txd = pl08x_get_txd(plchan);
e8689e63
LW
1485 if (!txd) {
1486 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1487 return NULL;
1488 }
1489
e8689e63
LW
1490 /*
1491 * Set up addresses, the PrimeCell configured address
1492 * will take precedence since this may configure the
1493 * channel target address dynamically at runtime.
1494 */
db8196df 1495 if (direction == DMA_MEM_TO_DEV) {
dc8d5f8d 1496 cctl = PL080_CONTROL_SRC_INCR;
3b24c20b 1497 *slave_addr = plchan->cfg.dst_addr;
dc8d5f8d
RK
1498 addr_width = plchan->cfg.dst_addr_width;
1499 maxburst = plchan->cfg.dst_maxburst;
409ec8db
RK
1500 src_buses = pl08x->mem_buses;
1501 dst_buses = plchan->cd->periph_buses;
db8196df 1502 } else if (direction == DMA_DEV_TO_MEM) {
dc8d5f8d 1503 cctl = PL080_CONTROL_DST_INCR;
3b24c20b 1504 *slave_addr = plchan->cfg.src_addr;
dc8d5f8d
RK
1505 addr_width = plchan->cfg.src_addr_width;
1506 maxburst = plchan->cfg.src_maxburst;
409ec8db
RK
1507 src_buses = plchan->cd->periph_buses;
1508 dst_buses = pl08x->mem_buses;
e8689e63 1509 } else {
b7f69d9d 1510 pl08x_free_txd(pl08x, txd);
e8689e63
LW
1511 dev_err(&pl08x->adev->dev,
1512 "%s direction unsupported\n", __func__);
1513 return NULL;
1514 }
e8689e63 1515
dc8d5f8d 1516 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
800d683e
RK
1517 if (cctl == ~0) {
1518 pl08x_free_txd(pl08x, txd);
1519 dev_err(&pl08x->adev->dev,
1520 "DMA slave configuration botched?\n");
1521 return NULL;
1522 }
1523
409ec8db
RK
1524 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
1525
95442b22 1526 if (plchan->cfg.device_fc)
db8196df 1527 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
0a235657
VK
1528 PL080_FLOW_PER2MEM_PER;
1529 else
db8196df 1530 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
0a235657
VK
1531 PL080_FLOW_PER2MEM;
1532
1533 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1534
c48d4963
RK
1535 ret = pl08x_request_mux(plchan);
1536 if (ret < 0) {
1537 pl08x_free_txd(pl08x, txd);
1538 dev_dbg(&pl08x->adev->dev,
1539 "unable to mux for transfer on %s due to platform restrictions\n",
1540 plchan->name);
1541 return NULL;
1542 }
1543
1544 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
1545 plchan->signal, plchan->name);
1546
1547 /* Assign the flow control signal to this channel */
1548 if (direction == DMA_MEM_TO_DEV)
1549 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
1550 else
1551 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1552
3b24c20b
AB
1553 return txd;
1554}
1555
1556static int pl08x_tx_add_sg(struct pl08x_txd *txd,
1557 enum dma_transfer_direction direction,
1558 dma_addr_t slave_addr,
1559 dma_addr_t buf_addr,
1560 unsigned int len)
1561{
1562 struct pl08x_sg *dsg;
1563
1564 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1565 if (!dsg)
1566 return -ENOMEM;
1567
1568 list_add_tail(&dsg->node, &txd->dsg_list);
1569
1570 dsg->len = len;
1571 if (direction == DMA_MEM_TO_DEV) {
1572 dsg->src_addr = buf_addr;
1573 dsg->dst_addr = slave_addr;
1574 } else {
1575 dsg->src_addr = slave_addr;
1576 dsg->dst_addr = buf_addr;
1577 }
1578
1579 return 0;
1580}
1581
1582static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1583 struct dma_chan *chan, struct scatterlist *sgl,
1584 unsigned int sg_len, enum dma_transfer_direction direction,
1585 unsigned long flags, void *context)
1586{
1587 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1588 struct pl08x_driver_data *pl08x = plchan->host;
1589 struct pl08x_txd *txd;
1590 struct scatterlist *sg;
1591 int ret, tmp;
1592 dma_addr_t slave_addr;
1593
1594 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1595 __func__, sg_dma_len(sgl), plchan->name);
1596
1597 txd = pl08x_init_txd(chan, direction, &slave_addr);
1598 if (!txd)
1599 return NULL;
1600
b7f69d9d 1601 for_each_sg(sgl, sg, sg_len, tmp) {
3b24c20b
AB
1602 ret = pl08x_tx_add_sg(txd, direction, slave_addr,
1603 sg_dma_address(sg),
1604 sg_dma_len(sg));
1605 if (ret) {
c48d4963 1606 pl08x_release_mux(plchan);
b7f69d9d
VK
1607 pl08x_free_txd(pl08x, txd);
1608 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1609 __func__);
1610 return NULL;
1611 }
3b24c20b 1612 }
b7f69d9d 1613
3b24c20b
AB
1614 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1615 if (!ret) {
1616 pl08x_release_mux(plchan);
1617 pl08x_free_txd(pl08x, txd);
1618 return NULL;
1619 }
1620
1621 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1622}
1623
1624static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
1625 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1626 size_t period_len, enum dma_transfer_direction direction,
31c1e5a1 1627 unsigned long flags)
3b24c20b
AB
1628{
1629 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1630 struct pl08x_driver_data *pl08x = plchan->host;
1631 struct pl08x_txd *txd;
1632 int ret, tmp;
1633 dma_addr_t slave_addr;
1634
1635 dev_dbg(&pl08x->adev->dev,
6fc8ae78 1636 "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n",
3b24c20b
AB
1637 __func__, period_len, buf_len,
1638 direction == DMA_MEM_TO_DEV ? "to" : "from",
1639 plchan->name);
1640
1641 txd = pl08x_init_txd(chan, direction, &slave_addr);
1642 if (!txd)
1643 return NULL;
1644
1645 txd->cyclic = true;
1646 txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
1647 for (tmp = 0; tmp < buf_len; tmp += period_len) {
1648 ret = pl08x_tx_add_sg(txd, direction, slave_addr,
1649 buf_addr + tmp, period_len);
1650 if (ret) {
1651 pl08x_release_mux(plchan);
1652 pl08x_free_txd(pl08x, txd);
1653 return NULL;
b7f69d9d
VK
1654 }
1655 }
1656
aa4afb75
RK
1657 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1658 if (!ret) {
1659 pl08x_release_mux(plchan);
1660 pl08x_free_txd(pl08x, txd);
e8689e63 1661 return NULL;
aa4afb75 1662 }
e8689e63 1663
879f127b 1664 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
e8689e63
LW
1665}
1666
bcd1b0b9
MR
1667static int pl08x_config(struct dma_chan *chan,
1668 struct dma_slave_config *config)
1669{
1670 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1671 struct pl08x_driver_data *pl08x = plchan->host;
1672
1673 if (!plchan->slave)
1674 return -EINVAL;
1675
1676 /* Reject definitely invalid configurations */
1677 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1678 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1679 return -EINVAL;
1680
1681 if (config->device_fc && pl08x->vd->pl080s) {
1682 dev_err(&pl08x->adev->dev,
1683 "%s: PL080S does not support peripheral flow control\n",
1684 __func__);
1685 return -EINVAL;
1686 }
1687
1688 plchan->cfg = *config;
1689
1690 return 0;
1691}
1692
1693static int pl08x_terminate_all(struct dma_chan *chan)
e8689e63
LW
1694{
1695 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1696 struct pl08x_driver_data *pl08x = plchan->host;
1697 unsigned long flags;
e8689e63 1698
bcd1b0b9
MR
1699 spin_lock_irqsave(&plchan->vc.lock, flags);
1700 if (!plchan->phychan && !plchan->at) {
1701 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1702 return 0;
e8689e63
LW
1703 }
1704
bcd1b0b9
MR
1705 plchan->state = PL08X_CHAN_IDLE;
1706
1707 if (plchan->phychan) {
1708 /*
1709 * Mark physical channel as free and free any slave
1710 * signal
1711 */
1712 pl08x_phy_free(plchan);
1713 }
1714 /* Dequeue jobs and free LLIs */
1715 if (plchan->at) {
1716 pl08x_desc_free(&plchan->at->vd);
1717 plchan->at = NULL;
1718 }
1719 /* Dequeue jobs not yet fired as well */
1720 pl08x_free_txd_list(pl08x, plchan);
1721
1722 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1723
1724 return 0;
1725}
1726
1727static int pl08x_pause(struct dma_chan *chan)
1728{
1729 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1730 unsigned long flags;
1731
e8689e63
LW
1732 /*
1733 * Anything succeeds on channels with no physical allocation and
1734 * no queued transfers.
1735 */
083be28a 1736 spin_lock_irqsave(&plchan->vc.lock, flags);
e8689e63 1737 if (!plchan->phychan && !plchan->at) {
083be28a 1738 spin_unlock_irqrestore(&plchan->vc.lock, flags);
e8689e63
LW
1739 return 0;
1740 }
1741
bcd1b0b9
MR
1742 pl08x_pause_phy_chan(plchan->phychan);
1743 plchan->state = PL08X_CHAN_PAUSED;
e8689e63 1744
bcd1b0b9
MR
1745 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1746
1747 return 0;
1748}
1749
1750static int pl08x_resume(struct dma_chan *chan)
1751{
1752 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1753 unsigned long flags;
1754
1755 /*
1756 * Anything succeeds on channels with no physical allocation and
1757 * no queued transfers.
1758 */
1759 spin_lock_irqsave(&plchan->vc.lock, flags);
1760 if (!plchan->phychan && !plchan->at) {
1761 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1762 return 0;
e8689e63
LW
1763 }
1764
bcd1b0b9
MR
1765 pl08x_resume_phy_chan(plchan->phychan);
1766 plchan->state = PL08X_CHAN_RUNNING;
1767
083be28a 1768 spin_unlock_irqrestore(&plchan->vc.lock, flags);
e8689e63 1769
bcd1b0b9 1770 return 0;
e8689e63
LW
1771}
1772
1773bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1774{
7703eac9 1775 struct pl08x_dma_chan *plchan;
e8689e63
LW
1776 char *name = chan_id;
1777
7703eac9
RKAL
1778 /* Reject channels for devices not bound to this driver */
1779 if (chan->device->dev->driver != &pl08x_amba_driver.drv)
1780 return false;
1781
1782 plchan = to_pl08x_chan(chan);
1783
e8689e63
LW
1784 /* Check that the channel is not taken! */
1785 if (!strcmp(plchan->name, name))
1786 return true;
1787
1788 return false;
1789}
6d05c9fa 1790EXPORT_SYMBOL_GPL(pl08x_filter_id);
e8689e63
LW
1791
1792/*
1793 * Just check that the device is there and active
94ae8522
RKAL
1794 * TODO: turn this bit on/off depending on the number of physical channels
1795 * actually used, if it is zero... well shut it off. That will save some
1796 * power. Cut the clock at the same time.
e8689e63
LW
1797 */
1798static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1799{
affa115e
LW
1800 /* The Nomadik variant does not have the config register */
1801 if (pl08x->vd->nomadik)
1802 return;
48a59ef3 1803 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
e8689e63
LW
1804}
1805
e8689e63
LW
1806static irqreturn_t pl08x_irq(int irq, void *dev)
1807{
1808 struct pl08x_driver_data *pl08x = dev;
28da2836
VK
1809 u32 mask = 0, err, tc, i;
1810
1811 /* check & clear - ERR & TC interrupts */
1812 err = readl(pl08x->base + PL080_ERR_STATUS);
1813 if (err) {
1814 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
1815 __func__, err);
1816 writel(err, pl08x->base + PL080_ERR_CLEAR);
e8689e63 1817 }
d29bf019 1818 tc = readl(pl08x->base + PL080_TC_STATUS);
28da2836
VK
1819 if (tc)
1820 writel(tc, pl08x->base + PL080_TC_CLEAR);
1821
1822 if (!err && !tc)
1823 return IRQ_NONE;
1824
e8689e63 1825 for (i = 0; i < pl08x->vd->channels; i++) {
28da2836 1826 if (((1 << i) & err) || ((1 << i) & tc)) {
e8689e63
LW
1827 /* Locate physical channel */
1828 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1829 struct pl08x_dma_chan *plchan = phychan->serving;
a936e793 1830 struct pl08x_txd *tx;
e8689e63 1831
28da2836
VK
1832 if (!plchan) {
1833 dev_err(&pl08x->adev->dev,
1834 "%s Error TC interrupt on unused channel: 0x%08x\n",
1835 __func__, i);
1836 continue;
1837 }
1838
083be28a 1839 spin_lock(&plchan->vc.lock);
a936e793 1840 tx = plchan->at;
3b24c20b
AB
1841 if (tx && tx->cyclic) {
1842 vchan_cyclic_callback(&tx->vd);
1843 } else if (tx) {
a936e793 1844 plchan->at = NULL;
c48d4963
RK
1845 /*
1846 * This descriptor is done, release its mux
1847 * reservation.
1848 */
1849 pl08x_release_mux(plchan);
18536134
RK
1850 tx->done = true;
1851 vchan_cookie_complete(&tx->vd);
c33b644c 1852
a5a488db
RK
1853 /*
1854 * And start the next descriptor (if any),
1855 * otherwise free this channel.
1856 */
879f127b 1857 if (vchan_next_desc(&plchan->vc))
c33b644c 1858 pl08x_start_next_txd(plchan);
a5a488db
RK
1859 else
1860 pl08x_phy_free(plchan);
a936e793 1861 }
083be28a 1862 spin_unlock(&plchan->vc.lock);
a936e793 1863
e8689e63
LW
1864 mask |= (1 << i);
1865 }
1866 }
e8689e63
LW
1867
1868 return mask ? IRQ_HANDLED : IRQ_NONE;
1869}
1870
121c8476
RKAL
1871static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1872{
121c8476
RKAL
1873 chan->slave = true;
1874 chan->name = chan->cd->bus_id;
ed91c13d
RK
1875 chan->cfg.src_addr = chan->cd->addr;
1876 chan->cfg.dst_addr = chan->cd->addr;
121c8476
RKAL
1877}
1878
e8689e63
LW
1879/*
1880 * Initialise the DMAC memcpy/slave channels.
1881 * Make a local wrapper to hold required data
1882 */
1883static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
3e27ee84 1884 struct dma_device *dmadev, unsigned int channels, bool slave)
e8689e63
LW
1885{
1886 struct pl08x_dma_chan *chan;
1887 int i;
1888
1889 INIT_LIST_HEAD(&dmadev->channels);
94ae8522 1890
e8689e63
LW
1891 /*
1892 * Register as many many memcpy as we have physical channels,
1893 * we won't always be able to use all but the code will have
1894 * to cope with that situation.
1895 */
1896 for (i = 0; i < channels; i++) {
b201c111 1897 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
e8689e63
LW
1898 if (!chan) {
1899 dev_err(&pl08x->adev->dev,
1900 "%s no memory for channel\n", __func__);
1901 return -ENOMEM;
1902 }
1903
1904 chan->host = pl08x;
1905 chan->state = PL08X_CHAN_IDLE;
ad0de2ac 1906 chan->signal = -1;
e8689e63
LW
1907
1908 if (slave) {
e8689e63 1909 chan->cd = &pl08x->pd->slave_channels[i];
121c8476 1910 pl08x_dma_slave_init(chan);
e8689e63
LW
1911 } else {
1912 chan->cd = &pl08x->pd->memcpy_channel;
1913 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1914 if (!chan->name) {
1915 kfree(chan);
1916 return -ENOMEM;
1917 }
1918 }
175a5e61 1919 dev_dbg(&pl08x->adev->dev,
e8689e63
LW
1920 "initialize virtual channel \"%s\"\n",
1921 chan->name);
1922
18536134 1923 chan->vc.desc_free = pl08x_desc_free;
083be28a 1924 vchan_init(&chan->vc, dmadev);
e8689e63
LW
1925 }
1926 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1927 i, slave ? "slave" : "memcpy");
1928 return i;
1929}
1930
1931static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1932{
1933 struct pl08x_dma_chan *chan = NULL;
1934 struct pl08x_dma_chan *next;
1935
1936 list_for_each_entry_safe(chan,
01d8dc64
RK
1937 next, &dmadev->channels, vc.chan.device_node) {
1938 list_del(&chan->vc.chan.device_node);
e8689e63
LW
1939 kfree(chan);
1940 }
1941}
1942
1943#ifdef CONFIG_DEBUG_FS
1944static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
1945{
1946 switch (state) {
1947 case PL08X_CHAN_IDLE:
1948 return "idle";
1949 case PL08X_CHAN_RUNNING:
1950 return "running";
1951 case PL08X_CHAN_PAUSED:
1952 return "paused";
1953 case PL08X_CHAN_WAITING:
1954 return "waiting";
1955 default:
1956 break;
1957 }
1958 return "UNKNOWN STATE";
1959}
1960
1961static int pl08x_debugfs_show(struct seq_file *s, void *data)
1962{
1963 struct pl08x_driver_data *pl08x = s->private;
1964 struct pl08x_dma_chan *chan;
1965 struct pl08x_phy_chan *ch;
1966 unsigned long flags;
1967 int i;
1968
1969 seq_printf(s, "PL08x physical channels:\n");
1970 seq_printf(s, "CHANNEL:\tUSER:\n");
1971 seq_printf(s, "--------\t-----\n");
1972 for (i = 0; i < pl08x->vd->channels; i++) {
1973 struct pl08x_dma_chan *virt_chan;
1974
1975 ch = &pl08x->phy_chans[i];
1976
1977 spin_lock_irqsave(&ch->lock, flags);
1978 virt_chan = ch->serving;
1979
affa115e
LW
1980 seq_printf(s, "%d\t\t%s%s\n",
1981 ch->id,
1982 virt_chan ? virt_chan->name : "(none)",
1983 ch->locked ? " LOCKED" : "");
e8689e63
LW
1984
1985 spin_unlock_irqrestore(&ch->lock, flags);
1986 }
1987
1988 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1989 seq_printf(s, "CHANNEL:\tSTATE:\n");
1990 seq_printf(s, "--------\t------\n");
01d8dc64 1991 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
3e2a037c 1992 seq_printf(s, "%s\t\t%s\n", chan->name,
e8689e63
LW
1993 pl08x_state_str(chan->state));
1994 }
1995
1996 seq_printf(s, "\nPL08x virtual slave channels:\n");
1997 seq_printf(s, "CHANNEL:\tSTATE:\n");
1998 seq_printf(s, "--------\t------\n");
01d8dc64 1999 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
3e2a037c 2000 seq_printf(s, "%s\t\t%s\n", chan->name,
e8689e63
LW
2001 pl08x_state_str(chan->state));
2002 }
2003
2004 return 0;
2005}
2006
2007static int pl08x_debugfs_open(struct inode *inode, struct file *file)
2008{
2009 return single_open(file, pl08x_debugfs_show, inode->i_private);
2010}
2011
2012static const struct file_operations pl08x_debugfs_operations = {
2013 .open = pl08x_debugfs_open,
2014 .read = seq_read,
2015 .llseek = seq_lseek,
2016 .release = single_release,
2017};
2018
2019static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
2020{
2021 /* Expose a simple debugfs interface to view all clocks */
3e27ee84
VK
2022 (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
2023 S_IFREG | S_IRUGO, NULL, pl08x,
2024 &pl08x_debugfs_operations);
e8689e63
LW
2025}
2026
2027#else
2028static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
2029{
2030}
2031#endif
2032
aa25afad 2033static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
e8689e63
LW
2034{
2035 struct pl08x_driver_data *pl08x;
f96ca9ec 2036 const struct vendor_data *vd = id->data;
ba6785ff 2037 u32 tsfr_size;
e8689e63
LW
2038 int ret = 0;
2039 int i;
2040
2041 ret = amba_request_regions(adev, NULL);
2042 if (ret)
2043 return ret;
2044
de1a2419
RK
2045 /* Ensure that we can do DMA */
2046 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
2047 if (ret)
2048 goto out_no_pl08x;
2049
e8689e63 2050 /* Create the driver state holder */
b201c111 2051 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
e8689e63
LW
2052 if (!pl08x) {
2053 ret = -ENOMEM;
2054 goto out_no_pl08x;
2055 }
2056
2057 /* Initialize memcpy engine */
2058 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
2059 pl08x->memcpy.dev = &adev->dev;
e8689e63
LW
2060 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
2061 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
2062 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2063 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
2064 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
bcd1b0b9
MR
2065 pl08x->memcpy.device_config = pl08x_config;
2066 pl08x->memcpy.device_pause = pl08x_pause;
2067 pl08x->memcpy.device_resume = pl08x_resume;
2068 pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
ea524c7e
MB
2069 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2070 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2071 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
2072 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
e8689e63
LW
2073
2074 /* Initialize slave engine */
2075 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
3b24c20b 2076 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
e8689e63 2077 pl08x->slave.dev = &adev->dev;
e8689e63
LW
2078 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
2079 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2080 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
2081 pl08x->slave.device_issue_pending = pl08x_issue_pending;
2082 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
3b24c20b 2083 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
bcd1b0b9
MR
2084 pl08x->slave.device_config = pl08x_config;
2085 pl08x->slave.device_pause = pl08x_pause;
2086 pl08x->slave.device_resume = pl08x_resume;
2087 pl08x->slave.device_terminate_all = pl08x_terminate_all;
ea524c7e
MB
2088 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2089 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2090 pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2091 pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
e8689e63
LW
2092
2093 /* Get the platform data */
2094 pl08x->pd = dev_get_platdata(&adev->dev);
2095 if (!pl08x->pd) {
2096 dev_err(&adev->dev, "no platform data supplied\n");
983d7beb 2097 ret = -EINVAL;
e8689e63
LW
2098 goto out_no_platdata;
2099 }
2100
2101 /* Assign useful pointers to the driver state */
2102 pl08x->adev = adev;
2103 pl08x->vd = vd;
2104
30749cb4
RKAL
2105 /* By default, AHB1 only. If dualmaster, from platform */
2106 pl08x->lli_buses = PL08X_AHB1;
2107 pl08x->mem_buses = PL08X_AHB1;
2108 if (pl08x->vd->dualmaster) {
2109 pl08x->lli_buses = pl08x->pd->lli_buses;
2110 pl08x->mem_buses = pl08x->pd->mem_buses;
2111 }
2112
da1b6c05
TF
2113 if (vd->pl080s)
2114 pl08x->lli_words = PL080S_LLI_WORDS;
2115 else
2116 pl08x->lli_words = PL080_LLI_WORDS;
ba6785ff
TF
2117 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
2118
e8689e63
LW
2119 /* A DMA memory pool for LLIs, align on 1-byte boundary */
2120 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
ba6785ff 2121 tsfr_size, PL08X_ALIGN, 0);
e8689e63
LW
2122 if (!pl08x->pool) {
2123 ret = -ENOMEM;
2124 goto out_no_lli_pool;
2125 }
2126
e8689e63
LW
2127 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
2128 if (!pl08x->base) {
2129 ret = -ENOMEM;
2130 goto out_no_ioremap;
2131 }
2132
2133 /* Turn on the PL08x */
2134 pl08x_ensure_on(pl08x);
2135
94ae8522 2136 /* Attach the interrupt handler */
e8689e63
LW
2137 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2138 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2139
174b537a 2140 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
e8689e63
LW
2141 if (ret) {
2142 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2143 __func__, adev->irq[0]);
2144 goto out_no_irq;
2145 }
2146
2147 /* Initialize physical channels */
affa115e 2148 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
e8689e63
LW
2149 GFP_KERNEL);
2150 if (!pl08x->phy_chans) {
2151 dev_err(&adev->dev, "%s failed to allocate "
2152 "physical channel holders\n",
2153 __func__);
983d7beb 2154 ret = -ENOMEM;
e8689e63
LW
2155 goto out_no_phychans;
2156 }
2157
2158 for (i = 0; i < vd->channels; i++) {
2159 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
2160
2161 ch->id = i;
2162 ch->base = pl08x->base + PL080_Cx_BASE(i);
d86ccea7 2163 ch->reg_config = ch->base + vd->config_offset;
e8689e63 2164 spin_lock_init(&ch->lock);
affa115e
LW
2165
2166 /*
2167 * Nomadik variants can have channels that are locked
2168 * down for the secure world only. Lock up these channels
2169 * by perpetually serving a dummy virtual channel.
2170 */
2171 if (vd->nomadik) {
2172 u32 val;
2173
d86ccea7 2174 val = readl(ch->reg_config);
affa115e
LW
2175 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
2176 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
2177 ch->locked = true;
2178 }
2179 }
2180
175a5e61
VK
2181 dev_dbg(&adev->dev, "physical channel %d is %s\n",
2182 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
e8689e63
LW
2183 }
2184
2185 /* Register as many memcpy channels as there are physical channels */
2186 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
2187 pl08x->vd->channels, false);
2188 if (ret <= 0) {
2189 dev_warn(&pl08x->adev->dev,
2190 "%s failed to enumerate memcpy channels - %d\n",
2191 __func__, ret);
2192 goto out_no_memcpy;
2193 }
e8689e63
LW
2194
2195 /* Register slave channels */
2196 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
3e27ee84 2197 pl08x->pd->num_slave_channels, true);
1080411c 2198 if (ret < 0) {
e8689e63
LW
2199 dev_warn(&pl08x->adev->dev,
2200 "%s failed to enumerate slave channels - %d\n",
2201 __func__, ret);
2202 goto out_no_slave;
2203 }
e8689e63
LW
2204
2205 ret = dma_async_device_register(&pl08x->memcpy);
2206 if (ret) {
2207 dev_warn(&pl08x->adev->dev,
2208 "%s failed to register memcpy as an async device - %d\n",
2209 __func__, ret);
2210 goto out_no_memcpy_reg;
2211 }
2212
2213 ret = dma_async_device_register(&pl08x->slave);
2214 if (ret) {
2215 dev_warn(&pl08x->adev->dev,
2216 "%s failed to register slave as an async device - %d\n",
2217 __func__, ret);
2218 goto out_no_slave_reg;
2219 }
2220
2221 amba_set_drvdata(adev, pl08x);
2222 init_pl08x_debugfs(pl08x);
da1b6c05
TF
2223 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
2224 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
b05cd8f4 2225 (unsigned long long)adev->res.start, adev->irq[0]);
b7b6018b 2226
e8689e63
LW
2227 return 0;
2228
2229out_no_slave_reg:
2230 dma_async_device_unregister(&pl08x->memcpy);
2231out_no_memcpy_reg:
2232 pl08x_free_virtual_channels(&pl08x->slave);
2233out_no_slave:
2234 pl08x_free_virtual_channels(&pl08x->memcpy);
2235out_no_memcpy:
2236 kfree(pl08x->phy_chans);
2237out_no_phychans:
2238 free_irq(adev->irq[0], pl08x);
2239out_no_irq:
2240 iounmap(pl08x->base);
2241out_no_ioremap:
2242 dma_pool_destroy(pl08x->pool);
2243out_no_lli_pool:
2244out_no_platdata:
2245 kfree(pl08x);
2246out_no_pl08x:
2247 amba_release_regions(adev);
2248 return ret;
2249}
2250
2251/* PL080 has 8 channels and the PL080 have just 2 */
2252static struct vendor_data vendor_pl080 = {
d86ccea7 2253 .config_offset = PL080_CH_CONFIG,
e8689e63
LW
2254 .channels = 8,
2255 .dualmaster = true,
5110e51d 2256 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
e8689e63
LW
2257};
2258
affa115e 2259static struct vendor_data vendor_nomadik = {
d86ccea7 2260 .config_offset = PL080_CH_CONFIG,
affa115e
LW
2261 .channels = 8,
2262 .dualmaster = true,
2263 .nomadik = true,
5110e51d 2264 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
affa115e
LW
2265};
2266
da1b6c05
TF
2267static struct vendor_data vendor_pl080s = {
2268 .config_offset = PL080S_CH_CONFIG,
2269 .channels = 8,
2270 .pl080s = true,
5110e51d 2271 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
affa115e
LW
2272};
2273
e8689e63 2274static struct vendor_data vendor_pl081 = {
d86ccea7 2275 .config_offset = PL080_CH_CONFIG,
e8689e63
LW
2276 .channels = 2,
2277 .dualmaster = false,
5110e51d 2278 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
e8689e63
LW
2279};
2280
2281static struct amba_id pl08x_ids[] = {
da1b6c05
TF
2282 /* Samsung PL080S variant */
2283 {
2284 .id = 0x0a141080,
2285 .mask = 0xffffffff,
2286 .data = &vendor_pl080s,
2287 },
e8689e63
LW
2288 /* PL080 */
2289 {
2290 .id = 0x00041080,
2291 .mask = 0x000fffff,
2292 .data = &vendor_pl080,
2293 },
2294 /* PL081 */
2295 {
2296 .id = 0x00041081,
2297 .mask = 0x000fffff,
2298 .data = &vendor_pl081,
2299 },
2300 /* Nomadik 8815 PL080 variant */
2301 {
affa115e 2302 .id = 0x00280080,
e8689e63 2303 .mask = 0x00ffffff,
affa115e 2304 .data = &vendor_nomadik,
e8689e63
LW
2305 },
2306 { 0, 0 },
2307};
2308
037566df
DM
2309MODULE_DEVICE_TABLE(amba, pl08x_ids);
2310
e8689e63
LW
2311static struct amba_driver pl08x_amba_driver = {
2312 .drv.name = DRIVER_NAME,
2313 .id_table = pl08x_ids,
2314 .probe = pl08x_probe,
2315};
2316
2317static int __init pl08x_init(void)
2318{
2319 int retval;
2320 retval = amba_driver_register(&pl08x_amba_driver);
2321 if (retval)
2322 printk(KERN_WARNING DRIVER_NAME
e8b5e11d 2323 "failed to register as an AMBA device (%d)\n",
e8689e63
LW
2324 retval);
2325 return retval;
2326}
2327subsys_initcall(pl08x_init);
This page took 0.381621 seconds and 5 git commands to generate.