dmaengine: PL08x: move DMA signal muxing into pl08x_dma_chan struct
[deliverable/linux.git] / drivers / dma / amba-pl08x.c
1 /*
2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
4 *
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * The full GNU General Public License is in this distribution in the file
23 * called COPYING.
24 *
25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081
27 *
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
29 * channel.
30 *
31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels
33 * and the number of incoming DMA signals are two totally different things.
34 * It is usually not possible to theoretically handle all physical signals,
35 * so a multiplexing scheme with possible denial of use is necessary.
36 *
37 * The PL080 has a dual bus master, PL081 has a single master.
38 *
39 * Memory to peripheral transfer may be visualized as
40 * Get data from memory to DMAC
41 * Until no data left
42 * On burst request from peripheral
43 * Destination burst from DMAC to peripheral
44 * Clear burst request
45 * Raise terminal count interrupt
46 *
47 * For peripherals with a FIFO:
48 * Source burst size == half the depth of the peripheral FIFO
49 * Destination burst size == the depth of the peripheral FIFO
50 *
51 * (Bursts are irrelevant for mem to mem transfers - there are no burst
52 * signals, the DMA controller will simply facilitate its AHB master.)
53 *
54 * ASSUMES default (little) endianness for DMA transfers
55 *
56 * The PL08x has two flow control settings:
57 * - DMAC flow control: the transfer size defines the number of transfers
58 * which occur for the current LLI entry, and the DMAC raises TC at the
59 * end of every LLI entry. Observed behaviour shows the DMAC listening
60 * to both the BREQ and SREQ signals (contrary to documented),
61 * transferring data if either is active. The LBREQ and LSREQ signals
62 * are ignored.
63 *
64 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry.
68 *
69 * Global TODO:
70 * - Break out common code from arch/arm/mach-s3c64xx and share
71 */
72 #include <linux/amba/bus.h>
73 #include <linux/amba/pl08x.h>
74 #include <linux/debugfs.h>
75 #include <linux/delay.h>
76 #include <linux/device.h>
77 #include <linux/dmaengine.h>
78 #include <linux/dmapool.h>
79 #include <linux/dma-mapping.h>
80 #include <linux/init.h>
81 #include <linux/interrupt.h>
82 #include <linux/module.h>
83 #include <linux/pm_runtime.h>
84 #include <linux/seq_file.h>
85 #include <linux/slab.h>
86 #include <asm/hardware/pl080.h>
87
88 #include "dmaengine.h"
89
90 #define DRIVER_NAME "pl08xdmac"
91
92 static struct amba_driver pl08x_amba_driver;
93 struct pl08x_driver_data;
94
95 /**
96 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
97 * @channels: the number of channels available in this variant
98 * @dualmaster: whether this version supports dual AHB masters or not.
99 * @nomadik: whether the channels have Nomadik security extension bits
100 * that need to be checked for permission before use and some registers are
101 * missing
102 */
103 struct vendor_data {
104 u8 channels;
105 bool dualmaster;
106 bool nomadik;
107 };
108
109 /*
110 * PL08X private data structures
111 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
112 * start & end do not - their bus bit info is in cctl. Also note that these
113 * are fixed 32-bit quantities.
114 */
115 struct pl08x_lli {
116 u32 src;
117 u32 dst;
118 u32 lli;
119 u32 cctl;
120 };
121
122 /**
123 * struct pl08x_bus_data - information of source or destination
124 * busses for a transfer
125 * @addr: current address
126 * @maxwidth: the maximum width of a transfer on this bus
127 * @buswidth: the width of this bus in bytes: 1, 2 or 4
128 */
129 struct pl08x_bus_data {
130 dma_addr_t addr;
131 u8 maxwidth;
132 u8 buswidth;
133 };
134
135 /**
136 * struct pl08x_phy_chan - holder for the physical channels
137 * @id: physical index to this channel
138 * @lock: a lock to use when altering an instance of this struct
139 * @serving: the virtual channel currently being served by this physical
140 * channel
141 * @locked: channel unavailable for the system, e.g. dedicated to secure
142 * world
143 */
144 struct pl08x_phy_chan {
145 unsigned int id;
146 void __iomem *base;
147 spinlock_t lock;
148 struct pl08x_dma_chan *serving;
149 bool locked;
150 };
151
152 /**
153 * struct pl08x_sg - structure containing data per sg
154 * @src_addr: src address of sg
155 * @dst_addr: dst address of sg
156 * @len: transfer len in bytes
157 * @node: node for txd's dsg_list
158 */
159 struct pl08x_sg {
160 dma_addr_t src_addr;
161 dma_addr_t dst_addr;
162 size_t len;
163 struct list_head node;
164 };
165
166 /**
167 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
168 * @tx: async tx descriptor
169 * @node: node for txd list for channels
170 * @dsg_list: list of children sg's
171 * @direction: direction of transfer
172 * @llis_bus: DMA memory address (physical) start for the LLIs
173 * @llis_va: virtual memory address start for the LLIs
174 * @cctl: control reg values for current txd
175 * @ccfg: config reg values for current txd
176 */
177 struct pl08x_txd {
178 struct dma_async_tx_descriptor tx;
179 struct list_head node;
180 struct list_head dsg_list;
181 enum dma_transfer_direction direction;
182 dma_addr_t llis_bus;
183 struct pl08x_lli *llis_va;
184 /* Default cctl value for LLIs */
185 u32 cctl;
186 /*
187 * Settings to be put into the physical channel when we
188 * trigger this txd. Other registers are in llis_va[0].
189 */
190 u32 ccfg;
191 };
192
193 /**
194 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
195 * states
196 * @PL08X_CHAN_IDLE: the channel is idle
197 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
198 * channel and is running a transfer on it
199 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
200 * channel, but the transfer is currently paused
201 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
202 * channel to become available (only pertains to memcpy channels)
203 */
204 enum pl08x_dma_chan_state {
205 PL08X_CHAN_IDLE,
206 PL08X_CHAN_RUNNING,
207 PL08X_CHAN_PAUSED,
208 PL08X_CHAN_WAITING,
209 };
210
211 /**
212 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
213 * @chan: wrappped abstract channel
214 * @phychan: the physical channel utilized by this channel, if there is one
215 * @phychan_hold: if non-zero, hold on to the physical channel even if we
216 * have no pending entries
217 * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
218 * @name: name of channel
219 * @cd: channel platform data
220 * @runtime_addr: address for RX/TX according to the runtime config
221 * @pend_list: queued transactions pending on this channel
222 * @at: active transaction on this channel
223 * @lock: a lock for this channel data
224 * @host: a pointer to the host (internal use)
225 * @state: whether the channel is idle, paused, running etc
226 * @slave: whether this channel is a device (slave) or for memcpy
227 * @waiting: a TX descriptor on this channel which is waiting for a physical
228 * channel to become available
229 * @signal: the physical DMA request signal which this channel is using
230 */
231 struct pl08x_dma_chan {
232 struct dma_chan chan;
233 struct pl08x_phy_chan *phychan;
234 int phychan_hold;
235 struct tasklet_struct tasklet;
236 const char *name;
237 const struct pl08x_channel_data *cd;
238 struct dma_slave_config cfg;
239 struct list_head pend_list;
240 struct pl08x_txd *at;
241 spinlock_t lock;
242 struct pl08x_driver_data *host;
243 enum pl08x_dma_chan_state state;
244 bool slave;
245 struct pl08x_txd *waiting;
246 int signal;
247 };
248
249 /**
250 * struct pl08x_driver_data - the local state holder for the PL08x
251 * @slave: slave engine for this instance
252 * @memcpy: memcpy engine for this instance
253 * @base: virtual memory base (remapped) for the PL08x
254 * @adev: the corresponding AMBA (PrimeCell) bus entry
255 * @vd: vendor data for this PL08x variant
256 * @pd: platform data passed in from the platform/machine
257 * @phy_chans: array of data for the physical channels
258 * @pool: a pool for the LLI descriptors
259 * @pool_ctr: counter of LLIs in the pool
260 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
261 * fetches
262 * @mem_buses: set to indicate memory transfers on AHB2.
263 * @lock: a spinlock for this struct
264 */
265 struct pl08x_driver_data {
266 struct dma_device slave;
267 struct dma_device memcpy;
268 void __iomem *base;
269 struct amba_device *adev;
270 const struct vendor_data *vd;
271 struct pl08x_platform_data *pd;
272 struct pl08x_phy_chan *phy_chans;
273 struct dma_pool *pool;
274 int pool_ctr;
275 u8 lli_buses;
276 u8 mem_buses;
277 };
278
279 /*
280 * PL08X specific defines
281 */
282
283 /* Size (bytes) of each LLI buffer allocated for one transfer */
284 # define PL08X_LLI_TSFR_SIZE 0x2000
285
286 /* Maximum times we call dma_pool_alloc on this pool without freeing */
287 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
288 #define PL08X_ALIGN 8
289
290 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
291 {
292 return container_of(chan, struct pl08x_dma_chan, chan);
293 }
294
295 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
296 {
297 return container_of(tx, struct pl08x_txd, tx);
298 }
299
300 /*
301 * Mux handling.
302 *
303 * This gives us the DMA request input to the PL08x primecell which the
304 * peripheral described by the channel data will be routed to, possibly
305 * via a board/SoC specific external MUX. One important point to note
306 * here is that this does not depend on the physical channel.
307 */
308 static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
309 {
310 const struct pl08x_platform_data *pd = plchan->host->pd;
311 int ret;
312
313 if (pd->get_signal) {
314 ret = pd->get_signal(plchan->cd);
315 if (ret < 0)
316 return ret;
317
318 plchan->signal = ret;
319 }
320 return 0;
321 }
322
323 static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
324 {
325 const struct pl08x_platform_data *pd = plchan->host->pd;
326
327 if (plchan->signal >= 0 && pd->put_signal) {
328 pd->put_signal(plchan->cd, plchan->signal);
329 plchan->signal = -1;
330 }
331 }
332
333 /*
334 * Physical channel handling
335 */
336
337 /* Whether a certain channel is busy or not */
338 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
339 {
340 unsigned int val;
341
342 val = readl(ch->base + PL080_CH_CONFIG);
343 return val & PL080_CONFIG_ACTIVE;
344 }
345
346 /*
347 * Set the initial DMA register values i.e. those for the first LLI
348 * The next LLI pointer and the configuration interrupt bit have
349 * been set when the LLIs were constructed. Poke them into the hardware
350 * and start the transfer.
351 */
352 static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
353 struct pl08x_txd *txd)
354 {
355 struct pl08x_driver_data *pl08x = plchan->host;
356 struct pl08x_phy_chan *phychan = plchan->phychan;
357 struct pl08x_lli *lli = &txd->llis_va[0];
358 u32 val;
359
360 plchan->at = txd;
361
362 /* Wait for channel inactive */
363 while (pl08x_phy_channel_busy(phychan))
364 cpu_relax();
365
366 dev_vdbg(&pl08x->adev->dev,
367 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
368 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
369 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
370 txd->ccfg);
371
372 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
373 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
374 writel(lli->lli, phychan->base + PL080_CH_LLI);
375 writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
376 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
377
378 /* Enable the DMA channel */
379 /* Do not access config register until channel shows as disabled */
380 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
381 cpu_relax();
382
383 /* Do not access config register until channel shows as inactive */
384 val = readl(phychan->base + PL080_CH_CONFIG);
385 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
386 val = readl(phychan->base + PL080_CH_CONFIG);
387
388 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
389 }
390
391 /*
392 * Pause the channel by setting the HALT bit.
393 *
394 * For M->P transfers, pause the DMAC first and then stop the peripheral -
395 * the FIFO can only drain if the peripheral is still requesting data.
396 * (note: this can still timeout if the DMAC FIFO never drains of data.)
397 *
398 * For P->M transfers, disable the peripheral first to stop it filling
399 * the DMAC FIFO, and then pause the DMAC.
400 */
401 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
402 {
403 u32 val;
404 int timeout;
405
406 /* Set the HALT bit and wait for the FIFO to drain */
407 val = readl(ch->base + PL080_CH_CONFIG);
408 val |= PL080_CONFIG_HALT;
409 writel(val, ch->base + PL080_CH_CONFIG);
410
411 /* Wait for channel inactive */
412 for (timeout = 1000; timeout; timeout--) {
413 if (!pl08x_phy_channel_busy(ch))
414 break;
415 udelay(1);
416 }
417 if (pl08x_phy_channel_busy(ch))
418 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
419 }
420
421 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
422 {
423 u32 val;
424
425 /* Clear the HALT bit */
426 val = readl(ch->base + PL080_CH_CONFIG);
427 val &= ~PL080_CONFIG_HALT;
428 writel(val, ch->base + PL080_CH_CONFIG);
429 }
430
431 /*
432 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
433 * clears any pending interrupt status. This should not be used for
434 * an on-going transfer, but as a method of shutting down a channel
435 * (eg, when it's no longer used) or terminating a transfer.
436 */
437 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
438 struct pl08x_phy_chan *ch)
439 {
440 u32 val = readl(ch->base + PL080_CH_CONFIG);
441
442 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
443 PL080_CONFIG_TC_IRQ_MASK);
444
445 writel(val, ch->base + PL080_CH_CONFIG);
446
447 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
448 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
449 }
450
451 static inline u32 get_bytes_in_cctl(u32 cctl)
452 {
453 /* The source width defines the number of bytes */
454 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
455
456 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
457 case PL080_WIDTH_8BIT:
458 break;
459 case PL080_WIDTH_16BIT:
460 bytes *= 2;
461 break;
462 case PL080_WIDTH_32BIT:
463 bytes *= 4;
464 break;
465 }
466 return bytes;
467 }
468
469 /* The channel should be paused when calling this */
470 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
471 {
472 struct pl08x_phy_chan *ch;
473 struct pl08x_txd *txd;
474 unsigned long flags;
475 size_t bytes = 0;
476
477 spin_lock_irqsave(&plchan->lock, flags);
478 ch = plchan->phychan;
479 txd = plchan->at;
480
481 /*
482 * Follow the LLIs to get the number of remaining
483 * bytes in the currently active transaction.
484 */
485 if (ch && txd) {
486 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
487
488 /* First get the remaining bytes in the active transfer */
489 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
490
491 if (clli) {
492 struct pl08x_lli *llis_va = txd->llis_va;
493 dma_addr_t llis_bus = txd->llis_bus;
494 int index;
495
496 BUG_ON(clli < llis_bus || clli >= llis_bus +
497 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
498
499 /*
500 * Locate the next LLI - as this is an array,
501 * it's simple maths to find.
502 */
503 index = (clli - llis_bus) / sizeof(struct pl08x_lli);
504
505 for (; index < MAX_NUM_TSFR_LLIS; index++) {
506 bytes += get_bytes_in_cctl(llis_va[index].cctl);
507
508 /*
509 * A LLI pointer of 0 terminates the LLI list
510 */
511 if (!llis_va[index].lli)
512 break;
513 }
514 }
515 }
516
517 /* Sum up all queued transactions */
518 if (!list_empty(&plchan->pend_list)) {
519 struct pl08x_txd *txdi;
520 list_for_each_entry(txdi, &plchan->pend_list, node) {
521 struct pl08x_sg *dsg;
522 list_for_each_entry(dsg, &txd->dsg_list, node)
523 bytes += dsg->len;
524 }
525 }
526
527 spin_unlock_irqrestore(&plchan->lock, flags);
528
529 return bytes;
530 }
531
532 /*
533 * Allocate a physical channel for a virtual channel
534 *
535 * Try to locate a physical channel to be used for this transfer. If all
536 * are taken return NULL and the requester will have to cope by using
537 * some fallback PIO mode or retrying later.
538 */
539 static struct pl08x_phy_chan *
540 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
541 struct pl08x_dma_chan *virt_chan)
542 {
543 struct pl08x_phy_chan *ch = NULL;
544 unsigned long flags;
545 int i;
546
547 for (i = 0; i < pl08x->vd->channels; i++) {
548 ch = &pl08x->phy_chans[i];
549
550 spin_lock_irqsave(&ch->lock, flags);
551
552 if (!ch->locked && !ch->serving) {
553 ch->serving = virt_chan;
554 spin_unlock_irqrestore(&ch->lock, flags);
555 break;
556 }
557
558 spin_unlock_irqrestore(&ch->lock, flags);
559 }
560
561 if (i == pl08x->vd->channels) {
562 /* No physical channel available, cope with it */
563 return NULL;
564 }
565
566 return ch;
567 }
568
569 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
570 struct pl08x_phy_chan *ch)
571 {
572 unsigned long flags;
573
574 spin_lock_irqsave(&ch->lock, flags);
575
576 /* Stop the channel and clear its interrupts */
577 pl08x_terminate_phy_chan(pl08x, ch);
578
579 /* Mark it as free */
580 ch->serving = NULL;
581 spin_unlock_irqrestore(&ch->lock, flags);
582 }
583
584 /*
585 * LLI handling
586 */
587
588 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
589 {
590 switch (coded) {
591 case PL080_WIDTH_8BIT:
592 return 1;
593 case PL080_WIDTH_16BIT:
594 return 2;
595 case PL080_WIDTH_32BIT:
596 return 4;
597 default:
598 break;
599 }
600 BUG();
601 return 0;
602 }
603
604 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
605 size_t tsize)
606 {
607 u32 retbits = cctl;
608
609 /* Remove all src, dst and transfer size bits */
610 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
611 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
612 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
613
614 /* Then set the bits according to the parameters */
615 switch (srcwidth) {
616 case 1:
617 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
618 break;
619 case 2:
620 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
621 break;
622 case 4:
623 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
624 break;
625 default:
626 BUG();
627 break;
628 }
629
630 switch (dstwidth) {
631 case 1:
632 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
633 break;
634 case 2:
635 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
636 break;
637 case 4:
638 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
639 break;
640 default:
641 BUG();
642 break;
643 }
644
645 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
646 return retbits;
647 }
648
649 struct pl08x_lli_build_data {
650 struct pl08x_txd *txd;
651 struct pl08x_bus_data srcbus;
652 struct pl08x_bus_data dstbus;
653 size_t remainder;
654 u32 lli_bus;
655 };
656
657 /*
658 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
659 * victim in case src & dest are not similarly aligned. i.e. If after aligning
660 * masters address with width requirements of transfer (by sending few byte by
661 * byte data), slave is still not aligned, then its width will be reduced to
662 * BYTE.
663 * - prefers the destination bus if both available
664 * - prefers bus with fixed address (i.e. peripheral)
665 */
666 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
667 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
668 {
669 if (!(cctl & PL080_CONTROL_DST_INCR)) {
670 *mbus = &bd->dstbus;
671 *sbus = &bd->srcbus;
672 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
673 *mbus = &bd->srcbus;
674 *sbus = &bd->dstbus;
675 } else {
676 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
677 *mbus = &bd->dstbus;
678 *sbus = &bd->srcbus;
679 } else {
680 *mbus = &bd->srcbus;
681 *sbus = &bd->dstbus;
682 }
683 }
684 }
685
686 /*
687 * Fills in one LLI for a certain transfer descriptor and advance the counter
688 */
689 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
690 int num_llis, int len, u32 cctl)
691 {
692 struct pl08x_lli *llis_va = bd->txd->llis_va;
693 dma_addr_t llis_bus = bd->txd->llis_bus;
694
695 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
696
697 llis_va[num_llis].cctl = cctl;
698 llis_va[num_llis].src = bd->srcbus.addr;
699 llis_va[num_llis].dst = bd->dstbus.addr;
700 llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
701 sizeof(struct pl08x_lli);
702 llis_va[num_llis].lli |= bd->lli_bus;
703
704 if (cctl & PL080_CONTROL_SRC_INCR)
705 bd->srcbus.addr += len;
706 if (cctl & PL080_CONTROL_DST_INCR)
707 bd->dstbus.addr += len;
708
709 BUG_ON(bd->remainder < len);
710
711 bd->remainder -= len;
712 }
713
714 static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
715 u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
716 {
717 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
718 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
719 (*total_bytes) += len;
720 }
721
722 /*
723 * This fills in the table of LLIs for the transfer descriptor
724 * Note that we assume we never have to change the burst sizes
725 * Return 0 for error
726 */
727 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
728 struct pl08x_txd *txd)
729 {
730 struct pl08x_bus_data *mbus, *sbus;
731 struct pl08x_lli_build_data bd;
732 int num_llis = 0;
733 u32 cctl, early_bytes = 0;
734 size_t max_bytes_per_lli, total_bytes;
735 struct pl08x_lli *llis_va;
736 struct pl08x_sg *dsg;
737
738 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
739 if (!txd->llis_va) {
740 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
741 return 0;
742 }
743
744 pl08x->pool_ctr++;
745
746 bd.txd = txd;
747 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
748 cctl = txd->cctl;
749
750 /* Find maximum width of the source bus */
751 bd.srcbus.maxwidth =
752 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
753 PL080_CONTROL_SWIDTH_SHIFT);
754
755 /* Find maximum width of the destination bus */
756 bd.dstbus.maxwidth =
757 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
758 PL080_CONTROL_DWIDTH_SHIFT);
759
760 list_for_each_entry(dsg, &txd->dsg_list, node) {
761 total_bytes = 0;
762 cctl = txd->cctl;
763
764 bd.srcbus.addr = dsg->src_addr;
765 bd.dstbus.addr = dsg->dst_addr;
766 bd.remainder = dsg->len;
767 bd.srcbus.buswidth = bd.srcbus.maxwidth;
768 bd.dstbus.buswidth = bd.dstbus.maxwidth;
769
770 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
771
772 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
773 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
774 bd.srcbus.buswidth,
775 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
776 bd.dstbus.buswidth,
777 bd.remainder);
778 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
779 mbus == &bd.srcbus ? "src" : "dst",
780 sbus == &bd.srcbus ? "src" : "dst");
781
782 /*
783 * Zero length is only allowed if all these requirements are
784 * met:
785 * - flow controller is peripheral.
786 * - src.addr is aligned to src.width
787 * - dst.addr is aligned to dst.width
788 *
789 * sg_len == 1 should be true, as there can be two cases here:
790 *
791 * - Memory addresses are contiguous and are not scattered.
792 * Here, Only one sg will be passed by user driver, with
793 * memory address and zero length. We pass this to controller
794 * and after the transfer it will receive the last burst
795 * request from peripheral and so transfer finishes.
796 *
797 * - Memory addresses are scattered and are not contiguous.
798 * Here, Obviously as DMA controller doesn't know when a lli's
799 * transfer gets over, it can't load next lli. So in this
800 * case, there has to be an assumption that only one lli is
801 * supported. Thus, we can't have scattered addresses.
802 */
803 if (!bd.remainder) {
804 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
805 PL080_CONFIG_FLOW_CONTROL_SHIFT;
806 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
807 (fc <= PL080_FLOW_SRC2DST_SRC))) {
808 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
809 __func__);
810 return 0;
811 }
812
813 if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
814 (bd.dstbus.addr % bd.dstbus.buswidth)) {
815 dev_err(&pl08x->adev->dev,
816 "%s src & dst address must be aligned to src"
817 " & dst width if peripheral is flow controller",
818 __func__);
819 return 0;
820 }
821
822 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
823 bd.dstbus.buswidth, 0);
824 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
825 break;
826 }
827
828 /*
829 * Send byte by byte for following cases
830 * - Less than a bus width available
831 * - until master bus is aligned
832 */
833 if (bd.remainder < mbus->buswidth)
834 early_bytes = bd.remainder;
835 else if ((mbus->addr) % (mbus->buswidth)) {
836 early_bytes = mbus->buswidth - (mbus->addr) %
837 (mbus->buswidth);
838 if ((bd.remainder - early_bytes) < mbus->buswidth)
839 early_bytes = bd.remainder;
840 }
841
842 if (early_bytes) {
843 dev_vdbg(&pl08x->adev->dev,
844 "%s byte width LLIs (remain 0x%08x)\n",
845 __func__, bd.remainder);
846 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
847 &total_bytes);
848 }
849
850 if (bd.remainder) {
851 /*
852 * Master now aligned
853 * - if slave is not then we must set its width down
854 */
855 if (sbus->addr % sbus->buswidth) {
856 dev_dbg(&pl08x->adev->dev,
857 "%s set down bus width to one byte\n",
858 __func__);
859
860 sbus->buswidth = 1;
861 }
862
863 /*
864 * Bytes transferred = tsize * src width, not
865 * MIN(buswidths)
866 */
867 max_bytes_per_lli = bd.srcbus.buswidth *
868 PL080_CONTROL_TRANSFER_SIZE_MASK;
869 dev_vdbg(&pl08x->adev->dev,
870 "%s max bytes per lli = %zu\n",
871 __func__, max_bytes_per_lli);
872
873 /*
874 * Make largest possible LLIs until less than one bus
875 * width left
876 */
877 while (bd.remainder > (mbus->buswidth - 1)) {
878 size_t lli_len, tsize, width;
879
880 /*
881 * If enough left try to send max possible,
882 * otherwise try to send the remainder
883 */
884 lli_len = min(bd.remainder, max_bytes_per_lli);
885
886 /*
887 * Check against maximum bus alignment:
888 * Calculate actual transfer size in relation to
889 * bus width an get a maximum remainder of the
890 * highest bus width - 1
891 */
892 width = max(mbus->buswidth, sbus->buswidth);
893 lli_len = (lli_len / width) * width;
894 tsize = lli_len / bd.srcbus.buswidth;
895
896 dev_vdbg(&pl08x->adev->dev,
897 "%s fill lli with single lli chunk of "
898 "size 0x%08zx (remainder 0x%08zx)\n",
899 __func__, lli_len, bd.remainder);
900
901 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
902 bd.dstbus.buswidth, tsize);
903 pl08x_fill_lli_for_desc(&bd, num_llis++,
904 lli_len, cctl);
905 total_bytes += lli_len;
906 }
907
908 /*
909 * Send any odd bytes
910 */
911 if (bd.remainder) {
912 dev_vdbg(&pl08x->adev->dev,
913 "%s align with boundary, send odd bytes (remain %zu)\n",
914 __func__, bd.remainder);
915 prep_byte_width_lli(&bd, &cctl, bd.remainder,
916 num_llis++, &total_bytes);
917 }
918 }
919
920 if (total_bytes != dsg->len) {
921 dev_err(&pl08x->adev->dev,
922 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
923 __func__, total_bytes, dsg->len);
924 return 0;
925 }
926
927 if (num_llis >= MAX_NUM_TSFR_LLIS) {
928 dev_err(&pl08x->adev->dev,
929 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
930 __func__, (u32) MAX_NUM_TSFR_LLIS);
931 return 0;
932 }
933 }
934
935 llis_va = txd->llis_va;
936 /* The final LLI terminates the LLI. */
937 llis_va[num_llis - 1].lli = 0;
938 /* The final LLI element shall also fire an interrupt. */
939 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
940
941 #ifdef VERBOSE_DEBUG
942 {
943 int i;
944
945 dev_vdbg(&pl08x->adev->dev,
946 "%-3s %-9s %-10s %-10s %-10s %s\n",
947 "lli", "", "csrc", "cdst", "clli", "cctl");
948 for (i = 0; i < num_llis; i++) {
949 dev_vdbg(&pl08x->adev->dev,
950 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
951 i, &llis_va[i], llis_va[i].src,
952 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
953 );
954 }
955 }
956 #endif
957
958 return num_llis;
959 }
960
961 /* You should call this with the struct pl08x lock held */
962 static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
963 struct pl08x_txd *txd)
964 {
965 struct pl08x_sg *dsg, *_dsg;
966
967 /* Free the LLI */
968 if (txd->llis_va)
969 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
970
971 pl08x->pool_ctr--;
972
973 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
974 list_del(&dsg->node);
975 kfree(dsg);
976 }
977
978 kfree(txd);
979 }
980
981 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
982 struct pl08x_dma_chan *plchan)
983 {
984 struct pl08x_txd *txdi = NULL;
985 struct pl08x_txd *next;
986
987 if (!list_empty(&plchan->pend_list)) {
988 list_for_each_entry_safe(txdi,
989 next, &plchan->pend_list, node) {
990 list_del(&txdi->node);
991 pl08x_free_txd(pl08x, txdi);
992 }
993 }
994 }
995
996 /*
997 * The DMA ENGINE API
998 */
999 static int pl08x_alloc_chan_resources(struct dma_chan *chan)
1000 {
1001 return 0;
1002 }
1003
1004 static void pl08x_free_chan_resources(struct dma_chan *chan)
1005 {
1006 }
1007
1008 /*
1009 * This should be called with the channel plchan->lock held
1010 */
1011 static int prep_phy_channel(struct pl08x_dma_chan *plchan,
1012 struct pl08x_txd *txd)
1013 {
1014 struct pl08x_driver_data *pl08x = plchan->host;
1015 struct pl08x_phy_chan *ch;
1016 int ret;
1017
1018 /* Check if we already have a channel */
1019 if (plchan->phychan) {
1020 ch = plchan->phychan;
1021 goto got_channel;
1022 }
1023
1024 ch = pl08x_get_phy_channel(pl08x, plchan);
1025 if (!ch) {
1026 /* No physical channel available, cope with it */
1027 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
1028 return -EBUSY;
1029 }
1030
1031 /*
1032 * OK we have a physical channel: for memcpy() this is all we
1033 * need, but for slaves the physical signals may be muxed!
1034 * Can the platform allow us to use this channel?
1035 */
1036 if (plchan->slave) {
1037 ret = pl08x_request_mux(plchan);
1038 if (ret < 0) {
1039 dev_dbg(&pl08x->adev->dev,
1040 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
1041 ch->id, plchan->name);
1042 /* Release physical channel & return */
1043 pl08x_put_phy_channel(pl08x, ch);
1044 return -EBUSY;
1045 }
1046 }
1047
1048 plchan->phychan = ch;
1049 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
1050 ch->id,
1051 plchan->signal,
1052 plchan->name);
1053
1054 got_channel:
1055 /* Assign the flow control signal to this channel */
1056 if (txd->direction == DMA_MEM_TO_DEV)
1057 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
1058 else if (txd->direction == DMA_DEV_TO_MEM)
1059 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1060
1061 plchan->phychan_hold++;
1062
1063 return 0;
1064 }
1065
1066 static void release_phy_channel(struct pl08x_dma_chan *plchan)
1067 {
1068 struct pl08x_driver_data *pl08x = plchan->host;
1069
1070 pl08x_release_mux(plchan);
1071 pl08x_put_phy_channel(pl08x, plchan->phychan);
1072 plchan->phychan = NULL;
1073 }
1074
1075 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
1076 {
1077 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
1078 struct pl08x_txd *txd = to_pl08x_txd(tx);
1079 unsigned long flags;
1080 dma_cookie_t cookie;
1081
1082 spin_lock_irqsave(&plchan->lock, flags);
1083 cookie = dma_cookie_assign(tx);
1084
1085 /* Put this onto the pending list */
1086 list_add_tail(&txd->node, &plchan->pend_list);
1087
1088 /*
1089 * If there was no physical channel available for this memcpy,
1090 * stack the request up and indicate that the channel is waiting
1091 * for a free physical channel.
1092 */
1093 if (!plchan->slave && !plchan->phychan) {
1094 /* Do this memcpy whenever there is a channel ready */
1095 plchan->state = PL08X_CHAN_WAITING;
1096 plchan->waiting = txd;
1097 } else {
1098 plchan->phychan_hold--;
1099 }
1100
1101 spin_unlock_irqrestore(&plchan->lock, flags);
1102
1103 return cookie;
1104 }
1105
1106 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1107 struct dma_chan *chan, unsigned long flags)
1108 {
1109 struct dma_async_tx_descriptor *retval = NULL;
1110
1111 return retval;
1112 }
1113
1114 /*
1115 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1116 * If slaves are relying on interrupts to signal completion this function
1117 * must not be called with interrupts disabled.
1118 */
1119 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1120 dma_cookie_t cookie, struct dma_tx_state *txstate)
1121 {
1122 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1123 enum dma_status ret;
1124
1125 ret = dma_cookie_status(chan, cookie, txstate);
1126 if (ret == DMA_SUCCESS)
1127 return ret;
1128
1129 /*
1130 * This cookie not complete yet
1131 * Get number of bytes left in the active transactions and queue
1132 */
1133 dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
1134
1135 if (plchan->state == PL08X_CHAN_PAUSED)
1136 return DMA_PAUSED;
1137
1138 /* Whether waiting or running, we're in progress */
1139 return DMA_IN_PROGRESS;
1140 }
1141
1142 /* PrimeCell DMA extension */
1143 struct burst_table {
1144 u32 burstwords;
1145 u32 reg;
1146 };
1147
1148 static const struct burst_table burst_sizes[] = {
1149 {
1150 .burstwords = 256,
1151 .reg = PL080_BSIZE_256,
1152 },
1153 {
1154 .burstwords = 128,
1155 .reg = PL080_BSIZE_128,
1156 },
1157 {
1158 .burstwords = 64,
1159 .reg = PL080_BSIZE_64,
1160 },
1161 {
1162 .burstwords = 32,
1163 .reg = PL080_BSIZE_32,
1164 },
1165 {
1166 .burstwords = 16,
1167 .reg = PL080_BSIZE_16,
1168 },
1169 {
1170 .burstwords = 8,
1171 .reg = PL080_BSIZE_8,
1172 },
1173 {
1174 .burstwords = 4,
1175 .reg = PL080_BSIZE_4,
1176 },
1177 {
1178 .burstwords = 0,
1179 .reg = PL080_BSIZE_1,
1180 },
1181 };
1182
1183 /*
1184 * Given the source and destination available bus masks, select which
1185 * will be routed to each port. We try to have source and destination
1186 * on separate ports, but always respect the allowable settings.
1187 */
1188 static u32 pl08x_select_bus(u8 src, u8 dst)
1189 {
1190 u32 cctl = 0;
1191
1192 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1193 cctl |= PL080_CONTROL_DST_AHB2;
1194 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1195 cctl |= PL080_CONTROL_SRC_AHB2;
1196
1197 return cctl;
1198 }
1199
1200 static u32 pl08x_cctl(u32 cctl)
1201 {
1202 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1203 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1204 PL080_CONTROL_PROT_MASK);
1205
1206 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1207 return cctl | PL080_CONTROL_PROT_SYS;
1208 }
1209
1210 static u32 pl08x_width(enum dma_slave_buswidth width)
1211 {
1212 switch (width) {
1213 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1214 return PL080_WIDTH_8BIT;
1215 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1216 return PL080_WIDTH_16BIT;
1217 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1218 return PL080_WIDTH_32BIT;
1219 default:
1220 return ~0;
1221 }
1222 }
1223
1224 static u32 pl08x_burst(u32 maxburst)
1225 {
1226 int i;
1227
1228 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1229 if (burst_sizes[i].burstwords <= maxburst)
1230 break;
1231
1232 return burst_sizes[i].reg;
1233 }
1234
1235 static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
1236 enum dma_slave_buswidth addr_width, u32 maxburst)
1237 {
1238 u32 width, burst, cctl = 0;
1239
1240 width = pl08x_width(addr_width);
1241 if (width == ~0)
1242 return ~0;
1243
1244 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1245 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
1246
1247 /*
1248 * If this channel will only request single transfers, set this
1249 * down to ONE element. Also select one element if no maxburst
1250 * is specified.
1251 */
1252 if (plchan->cd->single)
1253 maxburst = 1;
1254
1255 burst = pl08x_burst(maxburst);
1256 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1257 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1258
1259 return pl08x_cctl(cctl);
1260 }
1261
1262 static int dma_set_runtime_config(struct dma_chan *chan,
1263 struct dma_slave_config *config)
1264 {
1265 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1266
1267 if (!plchan->slave)
1268 return -EINVAL;
1269
1270 /* Reject definitely invalid configurations */
1271 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1272 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1273 return -EINVAL;
1274
1275 plchan->cfg = *config;
1276
1277 return 0;
1278 }
1279
1280 /*
1281 * Slave transactions callback to the slave device to allow
1282 * synchronization of slave DMA signals with the DMAC enable
1283 */
1284 static void pl08x_issue_pending(struct dma_chan *chan)
1285 {
1286 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1287 unsigned long flags;
1288
1289 spin_lock_irqsave(&plchan->lock, flags);
1290 /* Something is already active, or we're waiting for a channel... */
1291 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
1292 spin_unlock_irqrestore(&plchan->lock, flags);
1293 return;
1294 }
1295
1296 /* Take the first element in the queue and execute it */
1297 if (!list_empty(&plchan->pend_list)) {
1298 struct pl08x_txd *next;
1299
1300 next = list_first_entry(&plchan->pend_list,
1301 struct pl08x_txd,
1302 node);
1303 list_del(&next->node);
1304 plchan->state = PL08X_CHAN_RUNNING;
1305
1306 pl08x_start_txd(plchan, next);
1307 }
1308
1309 spin_unlock_irqrestore(&plchan->lock, flags);
1310 }
1311
1312 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1313 struct pl08x_txd *txd)
1314 {
1315 struct pl08x_driver_data *pl08x = plchan->host;
1316 unsigned long flags;
1317 int num_llis, ret;
1318
1319 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1320 if (!num_llis) {
1321 spin_lock_irqsave(&plchan->lock, flags);
1322 pl08x_free_txd(pl08x, txd);
1323 spin_unlock_irqrestore(&plchan->lock, flags);
1324 return -EINVAL;
1325 }
1326
1327 spin_lock_irqsave(&plchan->lock, flags);
1328
1329 /*
1330 * See if we already have a physical channel allocated,
1331 * else this is the time to try to get one.
1332 */
1333 ret = prep_phy_channel(plchan, txd);
1334 if (ret) {
1335 /*
1336 * No physical channel was available.
1337 *
1338 * memcpy transfers can be sorted out at submission time.
1339 *
1340 * Slave transfers may have been denied due to platform
1341 * channel muxing restrictions. Since there is no guarantee
1342 * that this will ever be resolved, and the signal must be
1343 * acquired AFTER acquiring the physical channel, we will let
1344 * them be NACK:ed with -EBUSY here. The drivers can retry
1345 * the prep() call if they are eager on doing this using DMA.
1346 */
1347 if (plchan->slave) {
1348 pl08x_free_txd_list(pl08x, plchan);
1349 pl08x_free_txd(pl08x, txd);
1350 spin_unlock_irqrestore(&plchan->lock, flags);
1351 return -EBUSY;
1352 }
1353 } else
1354 /*
1355 * Else we're all set, paused and ready to roll, status
1356 * will switch to PL08X_CHAN_RUNNING when we call
1357 * issue_pending(). If there is something running on the
1358 * channel already we don't change its state.
1359 */
1360 if (plchan->state == PL08X_CHAN_IDLE)
1361 plchan->state = PL08X_CHAN_PAUSED;
1362
1363 spin_unlock_irqrestore(&plchan->lock, flags);
1364
1365 return 0;
1366 }
1367
1368 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1369 unsigned long flags)
1370 {
1371 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1372
1373 if (txd) {
1374 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1375 txd->tx.flags = flags;
1376 txd->tx.tx_submit = pl08x_tx_submit;
1377 INIT_LIST_HEAD(&txd->node);
1378 INIT_LIST_HEAD(&txd->dsg_list);
1379
1380 /* Always enable error and terminal interrupts */
1381 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1382 PL080_CONFIG_TC_IRQ_MASK;
1383 }
1384 return txd;
1385 }
1386
1387 /*
1388 * Initialize a descriptor to be used by memcpy submit
1389 */
1390 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1391 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1392 size_t len, unsigned long flags)
1393 {
1394 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1395 struct pl08x_driver_data *pl08x = plchan->host;
1396 struct pl08x_txd *txd;
1397 struct pl08x_sg *dsg;
1398 int ret;
1399
1400 txd = pl08x_get_txd(plchan, flags);
1401 if (!txd) {
1402 dev_err(&pl08x->adev->dev,
1403 "%s no memory for descriptor\n", __func__);
1404 return NULL;
1405 }
1406
1407 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1408 if (!dsg) {
1409 pl08x_free_txd(pl08x, txd);
1410 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
1411 __func__);
1412 return NULL;
1413 }
1414 list_add_tail(&dsg->node, &txd->dsg_list);
1415
1416 txd->direction = DMA_MEM_TO_MEM;
1417 dsg->src_addr = src;
1418 dsg->dst_addr = dest;
1419 dsg->len = len;
1420
1421 /* Set platform data for m2m */
1422 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1423 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
1424 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
1425
1426 /* Both to be incremented or the code will break */
1427 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1428
1429 if (pl08x->vd->dualmaster)
1430 txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
1431 pl08x->mem_buses);
1432
1433 ret = pl08x_prep_channel_resources(plchan, txd);
1434 if (ret)
1435 return NULL;
1436
1437 return &txd->tx;
1438 }
1439
1440 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1441 struct dma_chan *chan, struct scatterlist *sgl,
1442 unsigned int sg_len, enum dma_transfer_direction direction,
1443 unsigned long flags, void *context)
1444 {
1445 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1446 struct pl08x_driver_data *pl08x = plchan->host;
1447 struct pl08x_txd *txd;
1448 struct pl08x_sg *dsg;
1449 struct scatterlist *sg;
1450 enum dma_slave_buswidth addr_width;
1451 dma_addr_t slave_addr;
1452 int ret, tmp;
1453 u8 src_buses, dst_buses;
1454 u32 maxburst, cctl;
1455
1456 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1457 __func__, sg_dma_len(sgl), plchan->name);
1458
1459 txd = pl08x_get_txd(plchan, flags);
1460 if (!txd) {
1461 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1462 return NULL;
1463 }
1464
1465 /*
1466 * Set up addresses, the PrimeCell configured address
1467 * will take precedence since this may configure the
1468 * channel target address dynamically at runtime.
1469 */
1470 txd->direction = direction;
1471
1472 if (direction == DMA_MEM_TO_DEV) {
1473 cctl = PL080_CONTROL_SRC_INCR;
1474 slave_addr = plchan->cfg.dst_addr;
1475 addr_width = plchan->cfg.dst_addr_width;
1476 maxburst = plchan->cfg.dst_maxburst;
1477 src_buses = pl08x->mem_buses;
1478 dst_buses = plchan->cd->periph_buses;
1479 } else if (direction == DMA_DEV_TO_MEM) {
1480 cctl = PL080_CONTROL_DST_INCR;
1481 slave_addr = plchan->cfg.src_addr;
1482 addr_width = plchan->cfg.src_addr_width;
1483 maxburst = plchan->cfg.src_maxburst;
1484 src_buses = plchan->cd->periph_buses;
1485 dst_buses = pl08x->mem_buses;
1486 } else {
1487 pl08x_free_txd(pl08x, txd);
1488 dev_err(&pl08x->adev->dev,
1489 "%s direction unsupported\n", __func__);
1490 return NULL;
1491 }
1492
1493 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
1494 if (cctl == ~0) {
1495 pl08x_free_txd(pl08x, txd);
1496 dev_err(&pl08x->adev->dev,
1497 "DMA slave configuration botched?\n");
1498 return NULL;
1499 }
1500
1501 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
1502
1503 if (plchan->cfg.device_fc)
1504 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
1505 PL080_FLOW_PER2MEM_PER;
1506 else
1507 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
1508 PL080_FLOW_PER2MEM;
1509
1510 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1511
1512 for_each_sg(sgl, sg, sg_len, tmp) {
1513 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1514 if (!dsg) {
1515 pl08x_free_txd(pl08x, txd);
1516 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1517 __func__);
1518 return NULL;
1519 }
1520 list_add_tail(&dsg->node, &txd->dsg_list);
1521
1522 dsg->len = sg_dma_len(sg);
1523 if (direction == DMA_MEM_TO_DEV) {
1524 dsg->src_addr = sg_dma_address(sg);
1525 dsg->dst_addr = slave_addr;
1526 } else {
1527 dsg->src_addr = slave_addr;
1528 dsg->dst_addr = sg_dma_address(sg);
1529 }
1530 }
1531
1532 ret = pl08x_prep_channel_resources(plchan, txd);
1533 if (ret)
1534 return NULL;
1535
1536 return &txd->tx;
1537 }
1538
1539 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1540 unsigned long arg)
1541 {
1542 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1543 struct pl08x_driver_data *pl08x = plchan->host;
1544 unsigned long flags;
1545 int ret = 0;
1546
1547 /* Controls applicable to inactive channels */
1548 if (cmd == DMA_SLAVE_CONFIG) {
1549 return dma_set_runtime_config(chan,
1550 (struct dma_slave_config *)arg);
1551 }
1552
1553 /*
1554 * Anything succeeds on channels with no physical allocation and
1555 * no queued transfers.
1556 */
1557 spin_lock_irqsave(&plchan->lock, flags);
1558 if (!plchan->phychan && !plchan->at) {
1559 spin_unlock_irqrestore(&plchan->lock, flags);
1560 return 0;
1561 }
1562
1563 switch (cmd) {
1564 case DMA_TERMINATE_ALL:
1565 plchan->state = PL08X_CHAN_IDLE;
1566
1567 if (plchan->phychan) {
1568 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
1569
1570 /*
1571 * Mark physical channel as free and free any slave
1572 * signal
1573 */
1574 release_phy_channel(plchan);
1575 plchan->phychan_hold = 0;
1576 }
1577 /* Dequeue jobs and free LLIs */
1578 if (plchan->at) {
1579 pl08x_free_txd(pl08x, plchan->at);
1580 plchan->at = NULL;
1581 }
1582 /* Dequeue jobs not yet fired as well */
1583 pl08x_free_txd_list(pl08x, plchan);
1584 break;
1585 case DMA_PAUSE:
1586 pl08x_pause_phy_chan(plchan->phychan);
1587 plchan->state = PL08X_CHAN_PAUSED;
1588 break;
1589 case DMA_RESUME:
1590 pl08x_resume_phy_chan(plchan->phychan);
1591 plchan->state = PL08X_CHAN_RUNNING;
1592 break;
1593 default:
1594 /* Unknown command */
1595 ret = -ENXIO;
1596 break;
1597 }
1598
1599 spin_unlock_irqrestore(&plchan->lock, flags);
1600
1601 return ret;
1602 }
1603
1604 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1605 {
1606 struct pl08x_dma_chan *plchan;
1607 char *name = chan_id;
1608
1609 /* Reject channels for devices not bound to this driver */
1610 if (chan->device->dev->driver != &pl08x_amba_driver.drv)
1611 return false;
1612
1613 plchan = to_pl08x_chan(chan);
1614
1615 /* Check that the channel is not taken! */
1616 if (!strcmp(plchan->name, name))
1617 return true;
1618
1619 return false;
1620 }
1621
1622 /*
1623 * Just check that the device is there and active
1624 * TODO: turn this bit on/off depending on the number of physical channels
1625 * actually used, if it is zero... well shut it off. That will save some
1626 * power. Cut the clock at the same time.
1627 */
1628 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1629 {
1630 /* The Nomadik variant does not have the config register */
1631 if (pl08x->vd->nomadik)
1632 return;
1633 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1634 }
1635
1636 static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1637 {
1638 struct device *dev = txd->tx.chan->device->dev;
1639 struct pl08x_sg *dsg;
1640
1641 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1642 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1643 list_for_each_entry(dsg, &txd->dsg_list, node)
1644 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1645 DMA_TO_DEVICE);
1646 else {
1647 list_for_each_entry(dsg, &txd->dsg_list, node)
1648 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1649 DMA_TO_DEVICE);
1650 }
1651 }
1652 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1653 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1654 list_for_each_entry(dsg, &txd->dsg_list, node)
1655 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1656 DMA_FROM_DEVICE);
1657 else
1658 list_for_each_entry(dsg, &txd->dsg_list, node)
1659 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1660 DMA_FROM_DEVICE);
1661 }
1662 }
1663
1664 static void pl08x_tasklet(unsigned long data)
1665 {
1666 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
1667 struct pl08x_driver_data *pl08x = plchan->host;
1668 struct pl08x_txd *txd;
1669 unsigned long flags;
1670
1671 spin_lock_irqsave(&plchan->lock, flags);
1672
1673 txd = plchan->at;
1674 plchan->at = NULL;
1675
1676 if (txd) {
1677 /* Update last completed */
1678 dma_cookie_complete(&txd->tx);
1679 }
1680
1681 /* If a new descriptor is queued, set it up plchan->at is NULL here */
1682 if (!list_empty(&plchan->pend_list)) {
1683 struct pl08x_txd *next;
1684
1685 next = list_first_entry(&plchan->pend_list,
1686 struct pl08x_txd,
1687 node);
1688 list_del(&next->node);
1689
1690 pl08x_start_txd(plchan, next);
1691 } else if (plchan->phychan_hold) {
1692 /*
1693 * This channel is still in use - we have a new txd being
1694 * prepared and will soon be queued. Don't give up the
1695 * physical channel.
1696 */
1697 } else {
1698 struct pl08x_dma_chan *waiting = NULL;
1699
1700 /*
1701 * No more jobs, so free up the physical channel
1702 * Free any allocated signal on slave transfers too
1703 */
1704 release_phy_channel(plchan);
1705 plchan->state = PL08X_CHAN_IDLE;
1706
1707 /*
1708 * And NOW before anyone else can grab that free:d up
1709 * physical channel, see if there is some memcpy pending
1710 * that seriously needs to start because of being stacked
1711 * up while we were choking the physical channels with data.
1712 */
1713 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1714 chan.device_node) {
1715 if (waiting->state == PL08X_CHAN_WAITING &&
1716 waiting->waiting != NULL) {
1717 int ret;
1718
1719 /* This should REALLY not fail now */
1720 ret = prep_phy_channel(waiting,
1721 waiting->waiting);
1722 BUG_ON(ret);
1723 waiting->phychan_hold--;
1724 waiting->state = PL08X_CHAN_RUNNING;
1725 waiting->waiting = NULL;
1726 pl08x_issue_pending(&waiting->chan);
1727 break;
1728 }
1729 }
1730 }
1731
1732 spin_unlock_irqrestore(&plchan->lock, flags);
1733
1734 if (txd) {
1735 dma_async_tx_callback callback = txd->tx.callback;
1736 void *callback_param = txd->tx.callback_param;
1737
1738 /* Don't try to unmap buffers on slave channels */
1739 if (!plchan->slave)
1740 pl08x_unmap_buffers(txd);
1741
1742 /* Free the descriptor */
1743 spin_lock_irqsave(&plchan->lock, flags);
1744 pl08x_free_txd(pl08x, txd);
1745 spin_unlock_irqrestore(&plchan->lock, flags);
1746
1747 /* Callback to signal completion */
1748 if (callback)
1749 callback(callback_param);
1750 }
1751 }
1752
1753 static irqreturn_t pl08x_irq(int irq, void *dev)
1754 {
1755 struct pl08x_driver_data *pl08x = dev;
1756 u32 mask = 0, err, tc, i;
1757
1758 /* check & clear - ERR & TC interrupts */
1759 err = readl(pl08x->base + PL080_ERR_STATUS);
1760 if (err) {
1761 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
1762 __func__, err);
1763 writel(err, pl08x->base + PL080_ERR_CLEAR);
1764 }
1765 tc = readl(pl08x->base + PL080_TC_STATUS);
1766 if (tc)
1767 writel(tc, pl08x->base + PL080_TC_CLEAR);
1768
1769 if (!err && !tc)
1770 return IRQ_NONE;
1771
1772 for (i = 0; i < pl08x->vd->channels; i++) {
1773 if (((1 << i) & err) || ((1 << i) & tc)) {
1774 /* Locate physical channel */
1775 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1776 struct pl08x_dma_chan *plchan = phychan->serving;
1777
1778 if (!plchan) {
1779 dev_err(&pl08x->adev->dev,
1780 "%s Error TC interrupt on unused channel: 0x%08x\n",
1781 __func__, i);
1782 continue;
1783 }
1784
1785 /* Schedule tasklet on this channel */
1786 tasklet_schedule(&plchan->tasklet);
1787 mask |= (1 << i);
1788 }
1789 }
1790
1791 return mask ? IRQ_HANDLED : IRQ_NONE;
1792 }
1793
1794 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1795 {
1796 chan->slave = true;
1797 chan->name = chan->cd->bus_id;
1798 chan->cfg.src_addr = chan->cd->addr;
1799 chan->cfg.dst_addr = chan->cd->addr;
1800 }
1801
1802 /*
1803 * Initialise the DMAC memcpy/slave channels.
1804 * Make a local wrapper to hold required data
1805 */
1806 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1807 struct dma_device *dmadev, unsigned int channels, bool slave)
1808 {
1809 struct pl08x_dma_chan *chan;
1810 int i;
1811
1812 INIT_LIST_HEAD(&dmadev->channels);
1813
1814 /*
1815 * Register as many many memcpy as we have physical channels,
1816 * we won't always be able to use all but the code will have
1817 * to cope with that situation.
1818 */
1819 for (i = 0; i < channels; i++) {
1820 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1821 if (!chan) {
1822 dev_err(&pl08x->adev->dev,
1823 "%s no memory for channel\n", __func__);
1824 return -ENOMEM;
1825 }
1826
1827 chan->host = pl08x;
1828 chan->state = PL08X_CHAN_IDLE;
1829 chan->signal = -1;
1830
1831 if (slave) {
1832 chan->cd = &pl08x->pd->slave_channels[i];
1833 pl08x_dma_slave_init(chan);
1834 } else {
1835 chan->cd = &pl08x->pd->memcpy_channel;
1836 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1837 if (!chan->name) {
1838 kfree(chan);
1839 return -ENOMEM;
1840 }
1841 }
1842 dev_dbg(&pl08x->adev->dev,
1843 "initialize virtual channel \"%s\"\n",
1844 chan->name);
1845
1846 chan->chan.device = dmadev;
1847 dma_cookie_init(&chan->chan);
1848
1849 spin_lock_init(&chan->lock);
1850 INIT_LIST_HEAD(&chan->pend_list);
1851 tasklet_init(&chan->tasklet, pl08x_tasklet,
1852 (unsigned long) chan);
1853
1854 list_add_tail(&chan->chan.device_node, &dmadev->channels);
1855 }
1856 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1857 i, slave ? "slave" : "memcpy");
1858 return i;
1859 }
1860
1861 static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1862 {
1863 struct pl08x_dma_chan *chan = NULL;
1864 struct pl08x_dma_chan *next;
1865
1866 list_for_each_entry_safe(chan,
1867 next, &dmadev->channels, chan.device_node) {
1868 list_del(&chan->chan.device_node);
1869 kfree(chan);
1870 }
1871 }
1872
1873 #ifdef CONFIG_DEBUG_FS
1874 static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
1875 {
1876 switch (state) {
1877 case PL08X_CHAN_IDLE:
1878 return "idle";
1879 case PL08X_CHAN_RUNNING:
1880 return "running";
1881 case PL08X_CHAN_PAUSED:
1882 return "paused";
1883 case PL08X_CHAN_WAITING:
1884 return "waiting";
1885 default:
1886 break;
1887 }
1888 return "UNKNOWN STATE";
1889 }
1890
1891 static int pl08x_debugfs_show(struct seq_file *s, void *data)
1892 {
1893 struct pl08x_driver_data *pl08x = s->private;
1894 struct pl08x_dma_chan *chan;
1895 struct pl08x_phy_chan *ch;
1896 unsigned long flags;
1897 int i;
1898
1899 seq_printf(s, "PL08x physical channels:\n");
1900 seq_printf(s, "CHANNEL:\tUSER:\n");
1901 seq_printf(s, "--------\t-----\n");
1902 for (i = 0; i < pl08x->vd->channels; i++) {
1903 struct pl08x_dma_chan *virt_chan;
1904
1905 ch = &pl08x->phy_chans[i];
1906
1907 spin_lock_irqsave(&ch->lock, flags);
1908 virt_chan = ch->serving;
1909
1910 seq_printf(s, "%d\t\t%s%s\n",
1911 ch->id,
1912 virt_chan ? virt_chan->name : "(none)",
1913 ch->locked ? " LOCKED" : "");
1914
1915 spin_unlock_irqrestore(&ch->lock, flags);
1916 }
1917
1918 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1919 seq_printf(s, "CHANNEL:\tSTATE:\n");
1920 seq_printf(s, "--------\t------\n");
1921 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
1922 seq_printf(s, "%s\t\t%s\n", chan->name,
1923 pl08x_state_str(chan->state));
1924 }
1925
1926 seq_printf(s, "\nPL08x virtual slave channels:\n");
1927 seq_printf(s, "CHANNEL:\tSTATE:\n");
1928 seq_printf(s, "--------\t------\n");
1929 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
1930 seq_printf(s, "%s\t\t%s\n", chan->name,
1931 pl08x_state_str(chan->state));
1932 }
1933
1934 return 0;
1935 }
1936
1937 static int pl08x_debugfs_open(struct inode *inode, struct file *file)
1938 {
1939 return single_open(file, pl08x_debugfs_show, inode->i_private);
1940 }
1941
1942 static const struct file_operations pl08x_debugfs_operations = {
1943 .open = pl08x_debugfs_open,
1944 .read = seq_read,
1945 .llseek = seq_lseek,
1946 .release = single_release,
1947 };
1948
1949 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1950 {
1951 /* Expose a simple debugfs interface to view all clocks */
1952 (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
1953 S_IFREG | S_IRUGO, NULL, pl08x,
1954 &pl08x_debugfs_operations);
1955 }
1956
1957 #else
1958 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1959 {
1960 }
1961 #endif
1962
1963 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1964 {
1965 struct pl08x_driver_data *pl08x;
1966 const struct vendor_data *vd = id->data;
1967 int ret = 0;
1968 int i;
1969
1970 ret = amba_request_regions(adev, NULL);
1971 if (ret)
1972 return ret;
1973
1974 /* Create the driver state holder */
1975 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
1976 if (!pl08x) {
1977 ret = -ENOMEM;
1978 goto out_no_pl08x;
1979 }
1980
1981 /* Initialize memcpy engine */
1982 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1983 pl08x->memcpy.dev = &adev->dev;
1984 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1985 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
1986 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
1987 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1988 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
1989 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
1990 pl08x->memcpy.device_control = pl08x_control;
1991
1992 /* Initialize slave engine */
1993 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
1994 pl08x->slave.dev = &adev->dev;
1995 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1996 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
1997 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1998 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
1999 pl08x->slave.device_issue_pending = pl08x_issue_pending;
2000 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
2001 pl08x->slave.device_control = pl08x_control;
2002
2003 /* Get the platform data */
2004 pl08x->pd = dev_get_platdata(&adev->dev);
2005 if (!pl08x->pd) {
2006 dev_err(&adev->dev, "no platform data supplied\n");
2007 goto out_no_platdata;
2008 }
2009
2010 /* Assign useful pointers to the driver state */
2011 pl08x->adev = adev;
2012 pl08x->vd = vd;
2013
2014 /* By default, AHB1 only. If dualmaster, from platform */
2015 pl08x->lli_buses = PL08X_AHB1;
2016 pl08x->mem_buses = PL08X_AHB1;
2017 if (pl08x->vd->dualmaster) {
2018 pl08x->lli_buses = pl08x->pd->lli_buses;
2019 pl08x->mem_buses = pl08x->pd->mem_buses;
2020 }
2021
2022 /* A DMA memory pool for LLIs, align on 1-byte boundary */
2023 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
2024 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
2025 if (!pl08x->pool) {
2026 ret = -ENOMEM;
2027 goto out_no_lli_pool;
2028 }
2029
2030 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
2031 if (!pl08x->base) {
2032 ret = -ENOMEM;
2033 goto out_no_ioremap;
2034 }
2035
2036 /* Turn on the PL08x */
2037 pl08x_ensure_on(pl08x);
2038
2039 /* Attach the interrupt handler */
2040 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2041 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2042
2043 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
2044 DRIVER_NAME, pl08x);
2045 if (ret) {
2046 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2047 __func__, adev->irq[0]);
2048 goto out_no_irq;
2049 }
2050
2051 /* Initialize physical channels */
2052 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
2053 GFP_KERNEL);
2054 if (!pl08x->phy_chans) {
2055 dev_err(&adev->dev, "%s failed to allocate "
2056 "physical channel holders\n",
2057 __func__);
2058 goto out_no_phychans;
2059 }
2060
2061 for (i = 0; i < vd->channels; i++) {
2062 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
2063
2064 ch->id = i;
2065 ch->base = pl08x->base + PL080_Cx_BASE(i);
2066 spin_lock_init(&ch->lock);
2067
2068 /*
2069 * Nomadik variants can have channels that are locked
2070 * down for the secure world only. Lock up these channels
2071 * by perpetually serving a dummy virtual channel.
2072 */
2073 if (vd->nomadik) {
2074 u32 val;
2075
2076 val = readl(ch->base + PL080_CH_CONFIG);
2077 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
2078 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
2079 ch->locked = true;
2080 }
2081 }
2082
2083 dev_dbg(&adev->dev, "physical channel %d is %s\n",
2084 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
2085 }
2086
2087 /* Register as many memcpy channels as there are physical channels */
2088 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
2089 pl08x->vd->channels, false);
2090 if (ret <= 0) {
2091 dev_warn(&pl08x->adev->dev,
2092 "%s failed to enumerate memcpy channels - %d\n",
2093 __func__, ret);
2094 goto out_no_memcpy;
2095 }
2096 pl08x->memcpy.chancnt = ret;
2097
2098 /* Register slave channels */
2099 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
2100 pl08x->pd->num_slave_channels, true);
2101 if (ret <= 0) {
2102 dev_warn(&pl08x->adev->dev,
2103 "%s failed to enumerate slave channels - %d\n",
2104 __func__, ret);
2105 goto out_no_slave;
2106 }
2107 pl08x->slave.chancnt = ret;
2108
2109 ret = dma_async_device_register(&pl08x->memcpy);
2110 if (ret) {
2111 dev_warn(&pl08x->adev->dev,
2112 "%s failed to register memcpy as an async device - %d\n",
2113 __func__, ret);
2114 goto out_no_memcpy_reg;
2115 }
2116
2117 ret = dma_async_device_register(&pl08x->slave);
2118 if (ret) {
2119 dev_warn(&pl08x->adev->dev,
2120 "%s failed to register slave as an async device - %d\n",
2121 __func__, ret);
2122 goto out_no_slave_reg;
2123 }
2124
2125 amba_set_drvdata(adev, pl08x);
2126 init_pl08x_debugfs(pl08x);
2127 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2128 amba_part(adev), amba_rev(adev),
2129 (unsigned long long)adev->res.start, adev->irq[0]);
2130
2131 return 0;
2132
2133 out_no_slave_reg:
2134 dma_async_device_unregister(&pl08x->memcpy);
2135 out_no_memcpy_reg:
2136 pl08x_free_virtual_channels(&pl08x->slave);
2137 out_no_slave:
2138 pl08x_free_virtual_channels(&pl08x->memcpy);
2139 out_no_memcpy:
2140 kfree(pl08x->phy_chans);
2141 out_no_phychans:
2142 free_irq(adev->irq[0], pl08x);
2143 out_no_irq:
2144 iounmap(pl08x->base);
2145 out_no_ioremap:
2146 dma_pool_destroy(pl08x->pool);
2147 out_no_lli_pool:
2148 out_no_platdata:
2149 kfree(pl08x);
2150 out_no_pl08x:
2151 amba_release_regions(adev);
2152 return ret;
2153 }
2154
2155 /* PL080 has 8 channels and the PL080 have just 2 */
2156 static struct vendor_data vendor_pl080 = {
2157 .channels = 8,
2158 .dualmaster = true,
2159 };
2160
2161 static struct vendor_data vendor_nomadik = {
2162 .channels = 8,
2163 .dualmaster = true,
2164 .nomadik = true,
2165 };
2166
2167 static struct vendor_data vendor_pl081 = {
2168 .channels = 2,
2169 .dualmaster = false,
2170 };
2171
2172 static struct amba_id pl08x_ids[] = {
2173 /* PL080 */
2174 {
2175 .id = 0x00041080,
2176 .mask = 0x000fffff,
2177 .data = &vendor_pl080,
2178 },
2179 /* PL081 */
2180 {
2181 .id = 0x00041081,
2182 .mask = 0x000fffff,
2183 .data = &vendor_pl081,
2184 },
2185 /* Nomadik 8815 PL080 variant */
2186 {
2187 .id = 0x00280080,
2188 .mask = 0x00ffffff,
2189 .data = &vendor_nomadik,
2190 },
2191 { 0, 0 },
2192 };
2193
2194 MODULE_DEVICE_TABLE(amba, pl08x_ids);
2195
2196 static struct amba_driver pl08x_amba_driver = {
2197 .drv.name = DRIVER_NAME,
2198 .id_table = pl08x_ids,
2199 .probe = pl08x_probe,
2200 };
2201
2202 static int __init pl08x_init(void)
2203 {
2204 int retval;
2205 retval = amba_driver_register(&pl08x_amba_driver);
2206 if (retval)
2207 printk(KERN_WARNING DRIVER_NAME
2208 "failed to register as an AMBA device (%d)\n",
2209 retval);
2210 return retval;
2211 }
2212 subsys_initcall(pl08x_init);
This page took 0.073906 seconds and 6 git commands to generate.