2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/amba/bus.h>
25 #include <linux/amba/pl330.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/scatterlist.h>
30 #include "dmaengine.h"
31 #define PL330_MAX_CHAN 8
32 #define PL330_MAX_IRQS 32
33 #define PL330_MAX_PERI 32
35 enum pl330_srccachectrl
{
36 SCCTRL0
, /* Noncacheable and nonbufferable */
37 SCCTRL1
, /* Bufferable only */
38 SCCTRL2
, /* Cacheable, but do not allocate */
39 SCCTRL3
, /* Cacheable and bufferable, but do not allocate */
42 SCCTRL6
, /* Cacheable write-through, allocate on reads only */
43 SCCTRL7
, /* Cacheable write-back, allocate on reads only */
46 enum pl330_dstcachectrl
{
47 DCCTRL0
, /* Noncacheable and nonbufferable */
48 DCCTRL1
, /* Bufferable only */
49 DCCTRL2
, /* Cacheable, but do not allocate */
50 DCCTRL3
, /* Cacheable and bufferable, but do not allocate */
51 DINVALID1
, /* AWCACHE = 0x1000 */
53 DCCTRL6
, /* Cacheable write-through, allocate on writes only */
54 DCCTRL7
, /* Cacheable write-back, allocate on writes only */
72 /* Register and Bit field Definitions */
74 #define DS_ST_STOP 0x0
75 #define DS_ST_EXEC 0x1
76 #define DS_ST_CMISS 0x2
77 #define DS_ST_UPDTPC 0x3
79 #define DS_ST_ATBRR 0x5
80 #define DS_ST_QBUSY 0x6
82 #define DS_ST_KILL 0x8
83 #define DS_ST_CMPLT 0x9
84 #define DS_ST_FLTCMP 0xe
85 #define DS_ST_FAULT 0xf
90 #define INTSTATUS 0x28
97 #define FTC(n) (_FTC + (n)*0x4)
100 #define CS(n) (_CS + (n)*0x8)
101 #define CS_CNS (1 << 21)
104 #define CPC(n) (_CPC + (n)*0x8)
107 #define SA(n) (_SA + (n)*0x20)
110 #define DA(n) (_DA + (n)*0x20)
113 #define CC(n) (_CC + (n)*0x20)
115 #define CC_SRCINC (1 << 0)
116 #define CC_DSTINC (1 << 14)
117 #define CC_SRCPRI (1 << 8)
118 #define CC_DSTPRI (1 << 22)
119 #define CC_SRCNS (1 << 9)
120 #define CC_DSTNS (1 << 23)
121 #define CC_SRCIA (1 << 10)
122 #define CC_DSTIA (1 << 24)
123 #define CC_SRCBRSTLEN_SHFT 4
124 #define CC_DSTBRSTLEN_SHFT 18
125 #define CC_SRCBRSTSIZE_SHFT 1
126 #define CC_DSTBRSTSIZE_SHFT 15
127 #define CC_SRCCCTRL_SHFT 11
128 #define CC_SRCCCTRL_MASK 0x7
129 #define CC_DSTCCTRL_SHFT 25
130 #define CC_DRCCCTRL_MASK 0x7
131 #define CC_SWAP_SHFT 28
134 #define LC0(n) (_LC0 + (n)*0x20)
137 #define LC1(n) (_LC1 + (n)*0x20)
139 #define DBGSTATUS 0xd00
140 #define DBG_BUSY (1 << 0)
143 #define DBGINST0 0xd08
144 #define DBGINST1 0xd0c
153 #define PERIPH_ID 0xfe0
154 #define PERIPH_REV_SHIFT 20
155 #define PERIPH_REV_MASK 0xf
156 #define PERIPH_REV_R0P0 0
157 #define PERIPH_REV_R1P0 1
158 #define PERIPH_REV_R1P1 2
159 #define PCELL_ID 0xff0
161 #define CR0_PERIPH_REQ_SET (1 << 0)
162 #define CR0_BOOT_EN_SET (1 << 1)
163 #define CR0_BOOT_MAN_NS (1 << 2)
164 #define CR0_NUM_CHANS_SHIFT 4
165 #define CR0_NUM_CHANS_MASK 0x7
166 #define CR0_NUM_PERIPH_SHIFT 12
167 #define CR0_NUM_PERIPH_MASK 0x1f
168 #define CR0_NUM_EVENTS_SHIFT 17
169 #define CR0_NUM_EVENTS_MASK 0x1f
171 #define CR1_ICACHE_LEN_SHIFT 0
172 #define CR1_ICACHE_LEN_MASK 0x7
173 #define CR1_NUM_ICACHELINES_SHIFT 4
174 #define CR1_NUM_ICACHELINES_MASK 0xf
176 #define CRD_DATA_WIDTH_SHIFT 0
177 #define CRD_DATA_WIDTH_MASK 0x7
178 #define CRD_WR_CAP_SHIFT 4
179 #define CRD_WR_CAP_MASK 0x7
180 #define CRD_WR_Q_DEP_SHIFT 8
181 #define CRD_WR_Q_DEP_MASK 0xf
182 #define CRD_RD_CAP_SHIFT 12
183 #define CRD_RD_CAP_MASK 0x7
184 #define CRD_RD_Q_DEP_SHIFT 16
185 #define CRD_RD_Q_DEP_MASK 0xf
186 #define CRD_DATA_BUFF_SHIFT 20
187 #define CRD_DATA_BUFF_MASK 0x3ff
190 #define DESIGNER 0x41
192 #define INTEG_CFG 0x0
193 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
195 #define PCELL_ID_VAL 0xb105f00d
197 #define PL330_STATE_STOPPED (1 << 0)
198 #define PL330_STATE_EXECUTING (1 << 1)
199 #define PL330_STATE_WFE (1 << 2)
200 #define PL330_STATE_FAULTING (1 << 3)
201 #define PL330_STATE_COMPLETING (1 << 4)
202 #define PL330_STATE_WFP (1 << 5)
203 #define PL330_STATE_KILLING (1 << 6)
204 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
205 #define PL330_STATE_CACHEMISS (1 << 8)
206 #define PL330_STATE_UPDTPC (1 << 9)
207 #define PL330_STATE_ATBARRIER (1 << 10)
208 #define PL330_STATE_QUEUEBUSY (1 << 11)
209 #define PL330_STATE_INVALID (1 << 15)
211 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
212 | PL330_STATE_WFE | PL330_STATE_FAULTING)
214 #define CMD_DMAADDH 0x54
215 #define CMD_DMAEND 0x00
216 #define CMD_DMAFLUSHP 0x35
217 #define CMD_DMAGO 0xa0
218 #define CMD_DMALD 0x04
219 #define CMD_DMALDP 0x25
220 #define CMD_DMALP 0x20
221 #define CMD_DMALPEND 0x28
222 #define CMD_DMAKILL 0x01
223 #define CMD_DMAMOV 0xbc
224 #define CMD_DMANOP 0x18
225 #define CMD_DMARMB 0x12
226 #define CMD_DMASEV 0x34
227 #define CMD_DMAST 0x08
228 #define CMD_DMASTP 0x29
229 #define CMD_DMASTZ 0x0c
230 #define CMD_DMAWFE 0x36
231 #define CMD_DMAWFP 0x30
232 #define CMD_DMAWMB 0x13
236 #define SZ_DMAFLUSHP 2
240 #define SZ_DMALPEND 2
254 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
255 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
257 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
258 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
261 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
262 * at 1byte/burst for P<->M and M<->M respectively.
263 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
264 * should be enough for P<->M and M<->M respectively.
266 #define MCODE_BUFF_PER_REQ 256
268 /* If the _pl330_req is available to the client */
269 #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
271 /* Use this _only_ to wait on transient states */
272 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
274 #ifdef PL330_DEBUG_MCGEN
275 static unsigned cmd_line
;
276 #define PL330_DBGCMD_DUMP(off, x...) do { \
277 printk("%x:", cmd_line); \
281 #define PL330_DBGMC_START(addr) (cmd_line = addr)
283 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
284 #define PL330_DBGMC_START(addr) do {} while (0)
287 /* The number of default descriptors */
289 #define NR_DEFAULT_DESC 16
291 /* Populated by the PL330 core driver for DMA API driver's info */
292 struct pl330_config
{
295 #define DMAC_MODE_NS (1 << 0)
297 unsigned int data_bus_width
:10; /* In number of bits */
298 unsigned int data_buf_dep
:10;
299 unsigned int num_chan
:4;
300 unsigned int num_peri
:6;
302 unsigned int num_events
:6;
306 /* Handle to the DMAC provided to the PL330 core */
310 /* Size of MicroCode buffers for each channel. */
312 /* ioremap'ed address of PL330 registers. */
314 /* Client can freely use it. */
316 /* PL330 core data, Client must not touch it. */
318 /* Populated by the PL330 core driver during pl330_add */
319 struct pl330_config pcfg
;
321 * If the DMAC has some reset mechanism, then the
322 * client may want to provide pointer to the method.
324 void (*dmac_reset
)(struct pl330_info
*pi
);
328 * Request Configuration.
329 * The PL330 core does not modify this and uses the last
330 * working configuration if the request doesn't provide any.
332 * The Client may want to provide this info only for the
333 * first request and a request with new settings.
335 struct pl330_reqcfg
{
336 /* Address Incrementing */
341 * For now, the SRC & DST protection levels
342 * and burst size/length are assumed same.
348 unsigned brst_size
:3; /* in power of 2 */
350 enum pl330_dstcachectrl dcctl
;
351 enum pl330_srccachectrl scctl
;
352 enum pl330_byteswap swap
;
353 struct pl330_config
*pcfg
;
357 * One cycle of DMAC operation.
358 * There may be more than one xfer in a request.
366 * Pointer to next xfer in the list.
367 * The last xfer in the req must point to NULL.
369 struct pl330_xfer
*next
;
372 /* The xfer callbacks are made with one of these arguments. */
374 /* The all xfers in the request were success. */
376 /* If req aborted due to global error. */
378 /* If req failed due to problem with Channel. */
382 /* A request defining Scatter-Gather List ending with NULL xfer. */
384 enum pl330_reqtype rqtype
;
385 /* Index of peripheral for the xfer. */
387 /* Unique token for this xfer, set by the client. */
389 /* Callback to be called after xfer. */
390 void (*xfer_cb
)(void *token
, enum pl330_op_err err
);
391 /* If NULL, req will be done at last set parameters. */
392 struct pl330_reqcfg
*cfg
;
393 /* Pointer to first xfer in the request. */
394 struct pl330_xfer
*x
;
398 * To know the status of the channel and DMAC, the client
399 * provides a pointer to this structure. The PL330 core
400 * fills it with current information.
402 struct pl330_chanstatus
{
404 * If the DMAC engine halted due to some error,
405 * the client should remove-add DMAC.
409 * If channel is halted due to some error,
410 * the client should ABORT/FLUSH and START the channel.
413 /* Location of last load */
415 /* Location of last store */
418 * Pointer to the currently active req, NULL if channel is
419 * inactive, even though the requests may be present.
421 struct pl330_req
*top_req
;
422 /* Pointer to req waiting second in the queue if any. */
423 struct pl330_req
*wait_req
;
427 /* Start the channel */
429 /* Abort the active xfer */
431 /* Stop xfer and flush queue */
438 struct pl330_xfer
*x
;
461 /* Number of bytes taken to setup MC for the req */
464 /* Hook to attach to DMAC's list of reqs with due callback */
465 struct list_head rqd
;
468 /* ToBeDone for tasklet */
476 struct pl330_thread
{
479 /* If the channel is not yet acquired by any client */
482 struct pl330_dmac
*dmac
;
483 /* Only two at a time */
484 struct _pl330_req req
[2];
485 /* Index of the last enqueued request */
487 /* Index of the last submitted request or -1 if the DMA is stopped */
491 enum pl330_dmac_state
{
500 /* Holds list of reqs with due callbacks */
501 struct list_head req_done
;
502 /* Pointer to platform specific stuff */
503 struct pl330_info
*pinfo
;
504 /* Maximum possible events/irqs */
506 /* BUS address of MicroCode buffer */
508 /* CPU address of MicroCode buffer */
510 /* List of all Channel threads */
511 struct pl330_thread
*channels
;
512 /* Pointer to the MANAGER thread */
513 struct pl330_thread
*manager
;
514 /* To handle bad news in interrupt */
515 struct tasklet_struct tasks
;
516 struct _pl330_tbd dmac_tbd
;
517 /* State of DMAC operation */
518 enum pl330_dmac_state state
;
522 /* In the DMAC pool */
525 * Allocted to some channel during prep_xxx
526 * Also may be sitting on the work_list.
530 * Sitting on the work_list and already submitted
531 * to the PL330 core. Not more than two descriptors
532 * of a channel can be BUSY at any time.
536 * Sitting on the channel work_list but xfer done
542 struct dma_pl330_chan
{
543 /* Schedule desc completion */
544 struct tasklet_struct task
;
546 /* DMA-Engine Channel */
547 struct dma_chan chan
;
549 /* List of to be xfered descriptors */
550 struct list_head work_list
;
552 /* Pointer to the DMAC that manages this channel,
553 * NULL if the channel is available to be acquired.
554 * As the parent, this DMAC also provides descriptors
557 struct dma_pl330_dmac
*dmac
;
559 /* To protect channel manipulation */
562 /* Token of a hardware channel thread of PL330 DMAC
563 * NULL if the channel is available to be acquired.
567 /* For D-to-M and M-to-D channels */
568 int burst_sz
; /* the peripheral fifo width */
569 int burst_len
; /* the number of burst */
570 dma_addr_t fifo_addr
;
572 /* for cyclic capability */
576 struct dma_pl330_dmac
{
577 struct pl330_info pif
;
579 /* DMA-Engine Device */
580 struct dma_device ddma
;
582 /* Pool of descriptors available for the DMAC's channels */
583 struct list_head desc_pool
;
584 /* To protect desc_pool manipulation */
585 spinlock_t pool_lock
;
587 /* Peripheral channels connected to this DMAC */
588 struct dma_pl330_chan
*peripherals
; /* keep at end */
593 struct dma_pl330_desc
{
594 /* To attach to a queue as child */
595 struct list_head node
;
597 /* Descriptor for the DMA Engine API */
598 struct dma_async_tx_descriptor txd
;
600 /* Xfer for PL330 core */
601 struct pl330_xfer px
;
603 struct pl330_reqcfg rqcfg
;
604 struct pl330_req req
;
606 enum desc_status status
;
608 /* The channel which currently holds this desc */
609 struct dma_pl330_chan
*pchan
;
612 static inline void _callback(struct pl330_req
*r
, enum pl330_op_err err
)
615 r
->xfer_cb(r
->token
, err
);
618 static inline bool _queue_empty(struct pl330_thread
*thrd
)
620 return (IS_FREE(&thrd
->req
[0]) && IS_FREE(&thrd
->req
[1]))
624 static inline bool _queue_full(struct pl330_thread
*thrd
)
626 return (IS_FREE(&thrd
->req
[0]) || IS_FREE(&thrd
->req
[1]))
630 static inline bool is_manager(struct pl330_thread
*thrd
)
632 struct pl330_dmac
*pl330
= thrd
->dmac
;
634 /* MANAGER is indexed at the end */
635 if (thrd
->id
== pl330
->pinfo
->pcfg
.num_chan
)
641 /* If manager of the thread is in Non-Secure mode */
642 static inline bool _manager_ns(struct pl330_thread
*thrd
)
644 struct pl330_dmac
*pl330
= thrd
->dmac
;
646 return (pl330
->pinfo
->pcfg
.mode
& DMAC_MODE_NS
) ? true : false;
649 static inline u32
get_id(struct pl330_info
*pi
, u32 off
)
651 void __iomem
*regs
= pi
->base
;
654 id
|= (readb(regs
+ off
+ 0x0) << 0);
655 id
|= (readb(regs
+ off
+ 0x4) << 8);
656 id
|= (readb(regs
+ off
+ 0x8) << 16);
657 id
|= (readb(regs
+ off
+ 0xc) << 24);
662 static inline u32
get_revision(u32 periph_id
)
664 return (periph_id
>> PERIPH_REV_SHIFT
) & PERIPH_REV_MASK
;
667 static inline u32
_emit_ADDH(unsigned dry_run
, u8 buf
[],
668 enum pl330_dst da
, u16 val
)
673 buf
[0] = CMD_DMAADDH
;
675 *((u16
*)&buf
[1]) = val
;
677 PL330_DBGCMD_DUMP(SZ_DMAADDH
, "\tDMAADDH %s %u\n",
678 da
== 1 ? "DA" : "SA", val
);
683 static inline u32
_emit_END(unsigned dry_run
, u8 buf
[])
690 PL330_DBGCMD_DUMP(SZ_DMAEND
, "\tDMAEND\n");
695 static inline u32
_emit_FLUSHP(unsigned dry_run
, u8 buf
[], u8 peri
)
700 buf
[0] = CMD_DMAFLUSHP
;
706 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP
, "\tDMAFLUSHP %u\n", peri
>> 3);
711 static inline u32
_emit_LD(unsigned dry_run
, u8 buf
[], enum pl330_cond cond
)
719 buf
[0] |= (0 << 1) | (1 << 0);
720 else if (cond
== BURST
)
721 buf
[0] |= (1 << 1) | (1 << 0);
723 PL330_DBGCMD_DUMP(SZ_DMALD
, "\tDMALD%c\n",
724 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'));
729 static inline u32
_emit_LDP(unsigned dry_run
, u8 buf
[],
730 enum pl330_cond cond
, u8 peri
)
744 PL330_DBGCMD_DUMP(SZ_DMALDP
, "\tDMALDP%c %u\n",
745 cond
== SINGLE
? 'S' : 'B', peri
>> 3);
750 static inline u32
_emit_LP(unsigned dry_run
, u8 buf
[],
751 unsigned loop
, u8 cnt
)
761 cnt
--; /* DMAC increments by 1 internally */
764 PL330_DBGCMD_DUMP(SZ_DMALP
, "\tDMALP_%c %u\n", loop
? '1' : '0', cnt
);
770 enum pl330_cond cond
;
776 static inline u32
_emit_LPEND(unsigned dry_run
, u8 buf
[],
777 const struct _arg_LPEND
*arg
)
779 enum pl330_cond cond
= arg
->cond
;
780 bool forever
= arg
->forever
;
781 unsigned loop
= arg
->loop
;
782 u8 bjump
= arg
->bjump
;
787 buf
[0] = CMD_DMALPEND
;
796 buf
[0] |= (0 << 1) | (1 << 0);
797 else if (cond
== BURST
)
798 buf
[0] |= (1 << 1) | (1 << 0);
802 PL330_DBGCMD_DUMP(SZ_DMALPEND
, "\tDMALP%s%c_%c bjmpto_%x\n",
803 forever
? "FE" : "END",
804 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'),
811 static inline u32
_emit_KILL(unsigned dry_run
, u8 buf
[])
816 buf
[0] = CMD_DMAKILL
;
821 static inline u32
_emit_MOV(unsigned dry_run
, u8 buf
[],
822 enum dmamov_dst dst
, u32 val
)
829 *((u32
*)&buf
[2]) = val
;
831 PL330_DBGCMD_DUMP(SZ_DMAMOV
, "\tDMAMOV %s 0x%x\n",
832 dst
== SAR
? "SAR" : (dst
== DAR
? "DAR" : "CCR"), val
);
837 static inline u32
_emit_NOP(unsigned dry_run
, u8 buf
[])
844 PL330_DBGCMD_DUMP(SZ_DMANOP
, "\tDMANOP\n");
849 static inline u32
_emit_RMB(unsigned dry_run
, u8 buf
[])
856 PL330_DBGCMD_DUMP(SZ_DMARMB
, "\tDMARMB\n");
861 static inline u32
_emit_SEV(unsigned dry_run
, u8 buf
[], u8 ev
)
872 PL330_DBGCMD_DUMP(SZ_DMASEV
, "\tDMASEV %u\n", ev
>> 3);
877 static inline u32
_emit_ST(unsigned dry_run
, u8 buf
[], enum pl330_cond cond
)
885 buf
[0] |= (0 << 1) | (1 << 0);
886 else if (cond
== BURST
)
887 buf
[0] |= (1 << 1) | (1 << 0);
889 PL330_DBGCMD_DUMP(SZ_DMAST
, "\tDMAST%c\n",
890 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'));
895 static inline u32
_emit_STP(unsigned dry_run
, u8 buf
[],
896 enum pl330_cond cond
, u8 peri
)
910 PL330_DBGCMD_DUMP(SZ_DMASTP
, "\tDMASTP%c %u\n",
911 cond
== SINGLE
? 'S' : 'B', peri
>> 3);
916 static inline u32
_emit_STZ(unsigned dry_run
, u8 buf
[])
923 PL330_DBGCMD_DUMP(SZ_DMASTZ
, "\tDMASTZ\n");
928 static inline u32
_emit_WFE(unsigned dry_run
, u8 buf
[], u8 ev
,
943 PL330_DBGCMD_DUMP(SZ_DMAWFE
, "\tDMAWFE %u%s\n",
944 ev
>> 3, invalidate
? ", I" : "");
949 static inline u32
_emit_WFP(unsigned dry_run
, u8 buf
[],
950 enum pl330_cond cond
, u8 peri
)
958 buf
[0] |= (0 << 1) | (0 << 0);
959 else if (cond
== BURST
)
960 buf
[0] |= (1 << 1) | (0 << 0);
962 buf
[0] |= (0 << 1) | (1 << 0);
968 PL330_DBGCMD_DUMP(SZ_DMAWFP
, "\tDMAWFP%c %u\n",
969 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'P'), peri
>> 3);
974 static inline u32
_emit_WMB(unsigned dry_run
, u8 buf
[])
981 PL330_DBGCMD_DUMP(SZ_DMAWMB
, "\tDMAWMB\n");
992 static inline u32
_emit_GO(unsigned dry_run
, u8 buf
[],
993 const struct _arg_GO
*arg
)
996 u32 addr
= arg
->addr
;
997 unsigned ns
= arg
->ns
;
1003 buf
[0] |= (ns
<< 1);
1005 buf
[1] = chan
& 0x7;
1007 *((u32
*)&buf
[2]) = addr
;
1012 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
1014 /* Returns Time-Out */
1015 static bool _until_dmac_idle(struct pl330_thread
*thrd
)
1017 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1018 unsigned long loops
= msecs_to_loops(5);
1021 /* Until Manager is Idle */
1022 if (!(readl(regs
+ DBGSTATUS
) & DBG_BUSY
))
1034 static inline void _execute_DBGINSN(struct pl330_thread
*thrd
,
1035 u8 insn
[], bool as_manager
)
1037 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1040 val
= (insn
[0] << 16) | (insn
[1] << 24);
1043 val
|= (thrd
->id
<< 8); /* Channel Number */
1045 writel(val
, regs
+ DBGINST0
);
1047 val
= *((u32
*)&insn
[2]);
1048 writel(val
, regs
+ DBGINST1
);
1050 /* If timed out due to halted state-machine */
1051 if (_until_dmac_idle(thrd
)) {
1052 dev_err(thrd
->dmac
->pinfo
->dev
, "DMAC halted!\n");
1057 writel(0, regs
+ DBGCMD
);
1061 * Mark a _pl330_req as free.
1062 * We do it by writing DMAEND as the first instruction
1063 * because no valid request is going to have DMAEND as
1064 * its first instruction to execute.
1066 static void mark_free(struct pl330_thread
*thrd
, int idx
)
1068 struct _pl330_req
*req
= &thrd
->req
[idx
];
1070 _emit_END(0, req
->mc_cpu
);
1073 thrd
->req_running
= -1;
1076 static inline u32
_state(struct pl330_thread
*thrd
)
1078 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1081 if (is_manager(thrd
))
1082 val
= readl(regs
+ DS
) & 0xf;
1084 val
= readl(regs
+ CS(thrd
->id
)) & 0xf;
1088 return PL330_STATE_STOPPED
;
1090 return PL330_STATE_EXECUTING
;
1092 return PL330_STATE_CACHEMISS
;
1094 return PL330_STATE_UPDTPC
;
1096 return PL330_STATE_WFE
;
1098 return PL330_STATE_FAULTING
;
1100 if (is_manager(thrd
))
1101 return PL330_STATE_INVALID
;
1103 return PL330_STATE_ATBARRIER
;
1105 if (is_manager(thrd
))
1106 return PL330_STATE_INVALID
;
1108 return PL330_STATE_QUEUEBUSY
;
1110 if (is_manager(thrd
))
1111 return PL330_STATE_INVALID
;
1113 return PL330_STATE_WFP
;
1115 if (is_manager(thrd
))
1116 return PL330_STATE_INVALID
;
1118 return PL330_STATE_KILLING
;
1120 if (is_manager(thrd
))
1121 return PL330_STATE_INVALID
;
1123 return PL330_STATE_COMPLETING
;
1125 if (is_manager(thrd
))
1126 return PL330_STATE_INVALID
;
1128 return PL330_STATE_FAULT_COMPLETING
;
1130 return PL330_STATE_INVALID
;
1134 static void _stop(struct pl330_thread
*thrd
)
1136 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1137 u8 insn
[6] = {0, 0, 0, 0, 0, 0};
1139 if (_state(thrd
) == PL330_STATE_FAULT_COMPLETING
)
1140 UNTIL(thrd
, PL330_STATE_FAULTING
| PL330_STATE_KILLING
);
1142 /* Return if nothing needs to be done */
1143 if (_state(thrd
) == PL330_STATE_COMPLETING
1144 || _state(thrd
) == PL330_STATE_KILLING
1145 || _state(thrd
) == PL330_STATE_STOPPED
)
1148 _emit_KILL(0, insn
);
1150 /* Stop generating interrupts for SEV */
1151 writel(readl(regs
+ INTEN
) & ~(1 << thrd
->ev
), regs
+ INTEN
);
1153 _execute_DBGINSN(thrd
, insn
, is_manager(thrd
));
1156 /* Start doing req 'idx' of thread 'thrd' */
1157 static bool _trigger(struct pl330_thread
*thrd
)
1159 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1160 struct _pl330_req
*req
;
1161 struct pl330_req
*r
;
1164 u8 insn
[6] = {0, 0, 0, 0, 0, 0};
1167 /* Return if already ACTIVE */
1168 if (_state(thrd
) != PL330_STATE_STOPPED
)
1171 idx
= 1 - thrd
->lstenq
;
1172 if (!IS_FREE(&thrd
->req
[idx
]))
1173 req
= &thrd
->req
[idx
];
1176 if (!IS_FREE(&thrd
->req
[idx
]))
1177 req
= &thrd
->req
[idx
];
1182 /* Return if no request */
1183 if (!req
|| !req
->r
)
1189 ns
= r
->cfg
->nonsecure
? 1 : 0;
1190 else if (readl(regs
+ CS(thrd
->id
)) & CS_CNS
)
1195 /* See 'Abort Sources' point-4 at Page 2-25 */
1196 if (_manager_ns(thrd
) && !ns
)
1197 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d Recipe for ABORT!\n",
1198 __func__
, __LINE__
);
1201 go
.addr
= req
->mc_bus
;
1203 _emit_GO(0, insn
, &go
);
1205 /* Set to generate interrupts for SEV */
1206 writel(readl(regs
+ INTEN
) | (1 << thrd
->ev
), regs
+ INTEN
);
1208 /* Only manager can execute GO */
1209 _execute_DBGINSN(thrd
, insn
, true);
1211 thrd
->req_running
= idx
;
1216 static bool _start(struct pl330_thread
*thrd
)
1218 switch (_state(thrd
)) {
1219 case PL330_STATE_FAULT_COMPLETING
:
1220 UNTIL(thrd
, PL330_STATE_FAULTING
| PL330_STATE_KILLING
);
1222 if (_state(thrd
) == PL330_STATE_KILLING
)
1223 UNTIL(thrd
, PL330_STATE_STOPPED
)
1225 case PL330_STATE_FAULTING
:
1228 case PL330_STATE_KILLING
:
1229 case PL330_STATE_COMPLETING
:
1230 UNTIL(thrd
, PL330_STATE_STOPPED
)
1232 case PL330_STATE_STOPPED
:
1233 return _trigger(thrd
);
1235 case PL330_STATE_WFP
:
1236 case PL330_STATE_QUEUEBUSY
:
1237 case PL330_STATE_ATBARRIER
:
1238 case PL330_STATE_UPDTPC
:
1239 case PL330_STATE_CACHEMISS
:
1240 case PL330_STATE_EXECUTING
:
1243 case PL330_STATE_WFE
: /* For RESUME, nothing yet */
1249 static inline int _ldst_memtomem(unsigned dry_run
, u8 buf
[],
1250 const struct _xfer_spec
*pxs
, int cyc
)
1253 struct pl330_config
*pcfg
= pxs
->r
->cfg
->pcfg
;
1255 /* check lock-up free version */
1256 if (get_revision(pcfg
->periph_id
) >= PERIPH_REV_R1P0
) {
1258 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1259 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1263 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1264 off
+= _emit_RMB(dry_run
, &buf
[off
]);
1265 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1266 off
+= _emit_WMB(dry_run
, &buf
[off
]);
1273 static inline int _ldst_devtomem(unsigned dry_run
, u8 buf
[],
1274 const struct _xfer_spec
*pxs
, int cyc
)
1279 off
+= _emit_WFP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1280 off
+= _emit_LDP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1281 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1282 off
+= _emit_FLUSHP(dry_run
, &buf
[off
], pxs
->r
->peri
);
1288 static inline int _ldst_memtodev(unsigned dry_run
, u8 buf
[],
1289 const struct _xfer_spec
*pxs
, int cyc
)
1294 off
+= _emit_WFP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1295 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1296 off
+= _emit_STP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1297 off
+= _emit_FLUSHP(dry_run
, &buf
[off
], pxs
->r
->peri
);
1303 static int _bursts(unsigned dry_run
, u8 buf
[],
1304 const struct _xfer_spec
*pxs
, int cyc
)
1308 switch (pxs
->r
->rqtype
) {
1310 off
+= _ldst_memtodev(dry_run
, &buf
[off
], pxs
, cyc
);
1313 off
+= _ldst_devtomem(dry_run
, &buf
[off
], pxs
, cyc
);
1316 off
+= _ldst_memtomem(dry_run
, &buf
[off
], pxs
, cyc
);
1319 off
+= 0x40000000; /* Scare off the Client */
1326 /* Returns bytes consumed and updates bursts */
1327 static inline int _loop(unsigned dry_run
, u8 buf
[],
1328 unsigned long *bursts
, const struct _xfer_spec
*pxs
)
1330 int cyc
, cycmax
, szlp
, szlpend
, szbrst
, off
;
1331 unsigned lcnt0
, lcnt1
, ljmp0
, ljmp1
;
1332 struct _arg_LPEND lpend
;
1334 /* Max iterations possible in DMALP is 256 */
1335 if (*bursts
>= 256*256) {
1338 cyc
= *bursts
/ lcnt1
/ lcnt0
;
1339 } else if (*bursts
> 256) {
1341 lcnt0
= *bursts
/ lcnt1
;
1349 szlp
= _emit_LP(1, buf
, 0, 0);
1350 szbrst
= _bursts(1, buf
, pxs
, 1);
1352 lpend
.cond
= ALWAYS
;
1353 lpend
.forever
= false;
1356 szlpend
= _emit_LPEND(1, buf
, &lpend
);
1364 * Max bursts that we can unroll due to limit on the
1365 * size of backward jump that can be encoded in DMALPEND
1366 * which is 8-bits and hence 255
1368 cycmax
= (255 - (szlp
+ szlpend
)) / szbrst
;
1370 cyc
= (cycmax
< cyc
) ? cycmax
: cyc
;
1375 off
+= _emit_LP(dry_run
, &buf
[off
], 0, lcnt0
);
1379 off
+= _emit_LP(dry_run
, &buf
[off
], 1, lcnt1
);
1382 off
+= _bursts(dry_run
, &buf
[off
], pxs
, cyc
);
1384 lpend
.cond
= ALWAYS
;
1385 lpend
.forever
= false;
1387 lpend
.bjump
= off
- ljmp1
;
1388 off
+= _emit_LPEND(dry_run
, &buf
[off
], &lpend
);
1391 lpend
.cond
= ALWAYS
;
1392 lpend
.forever
= false;
1394 lpend
.bjump
= off
- ljmp0
;
1395 off
+= _emit_LPEND(dry_run
, &buf
[off
], &lpend
);
1398 *bursts
= lcnt1
* cyc
;
1405 static inline int _setup_loops(unsigned dry_run
, u8 buf
[],
1406 const struct _xfer_spec
*pxs
)
1408 struct pl330_xfer
*x
= pxs
->x
;
1410 unsigned long c
, bursts
= BYTE_TO_BURST(x
->bytes
, ccr
);
1415 off
+= _loop(dry_run
, &buf
[off
], &c
, pxs
);
1422 static inline int _setup_xfer(unsigned dry_run
, u8 buf
[],
1423 const struct _xfer_spec
*pxs
)
1425 struct pl330_xfer
*x
= pxs
->x
;
1428 /* DMAMOV SAR, x->src_addr */
1429 off
+= _emit_MOV(dry_run
, &buf
[off
], SAR
, x
->src_addr
);
1430 /* DMAMOV DAR, x->dst_addr */
1431 off
+= _emit_MOV(dry_run
, &buf
[off
], DAR
, x
->dst_addr
);
1434 off
+= _setup_loops(dry_run
, &buf
[off
], pxs
);
1440 * A req is a sequence of one or more xfer units.
1441 * Returns the number of bytes taken to setup the MC for the req.
1443 static int _setup_req(unsigned dry_run
, struct pl330_thread
*thrd
,
1444 unsigned index
, struct _xfer_spec
*pxs
)
1446 struct _pl330_req
*req
= &thrd
->req
[index
];
1447 struct pl330_xfer
*x
;
1448 u8
*buf
= req
->mc_cpu
;
1451 PL330_DBGMC_START(req
->mc_bus
);
1453 /* DMAMOV CCR, ccr */
1454 off
+= _emit_MOV(dry_run
, &buf
[off
], CCR
, pxs
->ccr
);
1458 /* Error if xfer length is not aligned at burst size */
1459 if (x
->bytes
% (BRST_SIZE(pxs
->ccr
) * BRST_LEN(pxs
->ccr
)))
1463 off
+= _setup_xfer(dry_run
, &buf
[off
], pxs
);
1468 /* DMASEV peripheral/event */
1469 off
+= _emit_SEV(dry_run
, &buf
[off
], thrd
->ev
);
1471 off
+= _emit_END(dry_run
, &buf
[off
]);
1476 static inline u32
_prepare_ccr(const struct pl330_reqcfg
*rqc
)
1486 /* We set same protection levels for Src and DST for now */
1487 if (rqc
->privileged
)
1488 ccr
|= CC_SRCPRI
| CC_DSTPRI
;
1490 ccr
|= CC_SRCNS
| CC_DSTNS
;
1491 if (rqc
->insnaccess
)
1492 ccr
|= CC_SRCIA
| CC_DSTIA
;
1494 ccr
|= (((rqc
->brst_len
- 1) & 0xf) << CC_SRCBRSTLEN_SHFT
);
1495 ccr
|= (((rqc
->brst_len
- 1) & 0xf) << CC_DSTBRSTLEN_SHFT
);
1497 ccr
|= (rqc
->brst_size
<< CC_SRCBRSTSIZE_SHFT
);
1498 ccr
|= (rqc
->brst_size
<< CC_DSTBRSTSIZE_SHFT
);
1500 ccr
|= (rqc
->scctl
<< CC_SRCCCTRL_SHFT
);
1501 ccr
|= (rqc
->dcctl
<< CC_DSTCCTRL_SHFT
);
1503 ccr
|= (rqc
->swap
<< CC_SWAP_SHFT
);
1508 static inline bool _is_valid(u32 ccr
)
1510 enum pl330_dstcachectrl dcctl
;
1511 enum pl330_srccachectrl scctl
;
1513 dcctl
= (ccr
>> CC_DSTCCTRL_SHFT
) & CC_DRCCCTRL_MASK
;
1514 scctl
= (ccr
>> CC_SRCCCTRL_SHFT
) & CC_SRCCCTRL_MASK
;
1516 if (dcctl
== DINVALID1
|| dcctl
== DINVALID2
1517 || scctl
== SINVALID1
|| scctl
== SINVALID2
)
1524 * Submit a list of xfers after which the client wants notification.
1525 * Client is not notified after each xfer unit, just once after all
1526 * xfer units are done or some error occurs.
1528 static int pl330_submit_req(void *ch_id
, struct pl330_req
*r
)
1530 struct pl330_thread
*thrd
= ch_id
;
1531 struct pl330_dmac
*pl330
;
1532 struct pl330_info
*pi
;
1533 struct _xfer_spec xs
;
1534 unsigned long flags
;
1540 /* No Req or Unacquired Channel or DMAC */
1541 if (!r
|| !thrd
|| thrd
->free
)
1548 if (pl330
->state
== DYING
1549 || pl330
->dmac_tbd
.reset_chan
& (1 << thrd
->id
)) {
1550 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d\n",
1551 __func__
, __LINE__
);
1555 /* If request for non-existing peripheral */
1556 if (r
->rqtype
!= MEMTOMEM
&& r
->peri
>= pi
->pcfg
.num_peri
) {
1557 dev_info(thrd
->dmac
->pinfo
->dev
,
1558 "%s:%d Invalid peripheral(%u)!\n",
1559 __func__
, __LINE__
, r
->peri
);
1563 spin_lock_irqsave(&pl330
->lock
, flags
);
1565 if (_queue_full(thrd
)) {
1570 /* Prefer Secure Channel */
1571 if (!_manager_ns(thrd
))
1572 r
->cfg
->nonsecure
= 0;
1574 r
->cfg
->nonsecure
= 1;
1576 /* Use last settings, if not provided */
1578 ccr
= _prepare_ccr(r
->cfg
);
1580 ccr
= readl(regs
+ CC(thrd
->id
));
1582 /* If this req doesn't have valid xfer settings */
1583 if (!_is_valid(ccr
)) {
1585 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d Invalid CCR(%x)!\n",
1586 __func__
, __LINE__
, ccr
);
1590 idx
= IS_FREE(&thrd
->req
[0]) ? 0 : 1;
1595 /* First dry run to check if req is acceptable */
1596 ret
= _setup_req(1, thrd
, idx
, &xs
);
1600 if (ret
> pi
->mcbufsz
/ 2) {
1601 dev_info(thrd
->dmac
->pinfo
->dev
,
1602 "%s:%d Trying increasing mcbufsz\n",
1603 __func__
, __LINE__
);
1608 /* Hook the request */
1610 thrd
->req
[idx
].mc_len
= _setup_req(0, thrd
, idx
, &xs
);
1611 thrd
->req
[idx
].r
= r
;
1616 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1621 static void pl330_dotask(unsigned long data
)
1623 struct pl330_dmac
*pl330
= (struct pl330_dmac
*) data
;
1624 struct pl330_info
*pi
= pl330
->pinfo
;
1625 unsigned long flags
;
1628 spin_lock_irqsave(&pl330
->lock
, flags
);
1630 /* The DMAC itself gone nuts */
1631 if (pl330
->dmac_tbd
.reset_dmac
) {
1632 pl330
->state
= DYING
;
1633 /* Reset the manager too */
1634 pl330
->dmac_tbd
.reset_mngr
= true;
1635 /* Clear the reset flag */
1636 pl330
->dmac_tbd
.reset_dmac
= false;
1639 if (pl330
->dmac_tbd
.reset_mngr
) {
1640 _stop(pl330
->manager
);
1641 /* Reset all channels */
1642 pl330
->dmac_tbd
.reset_chan
= (1 << pi
->pcfg
.num_chan
) - 1;
1643 /* Clear the reset flag */
1644 pl330
->dmac_tbd
.reset_mngr
= false;
1647 for (i
= 0; i
< pi
->pcfg
.num_chan
; i
++) {
1649 if (pl330
->dmac_tbd
.reset_chan
& (1 << i
)) {
1650 struct pl330_thread
*thrd
= &pl330
->channels
[i
];
1651 void __iomem
*regs
= pi
->base
;
1652 enum pl330_op_err err
;
1656 if (readl(regs
+ FSC
) & (1 << thrd
->id
))
1657 err
= PL330_ERR_FAIL
;
1659 err
= PL330_ERR_ABORT
;
1661 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1663 _callback(thrd
->req
[1 - thrd
->lstenq
].r
, err
);
1664 _callback(thrd
->req
[thrd
->lstenq
].r
, err
);
1666 spin_lock_irqsave(&pl330
->lock
, flags
);
1668 thrd
->req
[0].r
= NULL
;
1669 thrd
->req
[1].r
= NULL
;
1673 /* Clear the reset flag */
1674 pl330
->dmac_tbd
.reset_chan
&= ~(1 << i
);
1678 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1683 /* Returns 1 if state was updated, 0 otherwise */
1684 static int pl330_update(const struct pl330_info
*pi
)
1686 struct _pl330_req
*rqdone
;
1687 struct pl330_dmac
*pl330
;
1688 unsigned long flags
;
1691 int id
, ev
, ret
= 0;
1693 if (!pi
|| !pi
->pl330_data
)
1697 pl330
= pi
->pl330_data
;
1699 spin_lock_irqsave(&pl330
->lock
, flags
);
1701 val
= readl(regs
+ FSM
) & 0x1;
1703 pl330
->dmac_tbd
.reset_mngr
= true;
1705 pl330
->dmac_tbd
.reset_mngr
= false;
1707 val
= readl(regs
+ FSC
) & ((1 << pi
->pcfg
.num_chan
) - 1);
1708 pl330
->dmac_tbd
.reset_chan
|= val
;
1711 while (i
< pi
->pcfg
.num_chan
) {
1712 if (val
& (1 << i
)) {
1714 "Reset Channel-%d\t CS-%x FTC-%x\n",
1715 i
, readl(regs
+ CS(i
)),
1716 readl(regs
+ FTC(i
)));
1717 _stop(&pl330
->channels
[i
]);
1723 /* Check which event happened i.e, thread notified */
1724 val
= readl(regs
+ ES
);
1725 if (pi
->pcfg
.num_events
< 32
1726 && val
& ~((1 << pi
->pcfg
.num_events
) - 1)) {
1727 pl330
->dmac_tbd
.reset_dmac
= true;
1728 dev_err(pi
->dev
, "%s:%d Unexpected!\n", __func__
, __LINE__
);
1733 for (ev
= 0; ev
< pi
->pcfg
.num_events
; ev
++) {
1734 if (val
& (1 << ev
)) { /* Event occurred */
1735 struct pl330_thread
*thrd
;
1736 u32 inten
= readl(regs
+ INTEN
);
1739 /* Clear the event */
1740 if (inten
& (1 << ev
))
1741 writel(1 << ev
, regs
+ INTCLR
);
1745 id
= pl330
->events
[ev
];
1747 thrd
= &pl330
->channels
[id
];
1749 active
= thrd
->req_running
;
1750 if (active
== -1) /* Aborted */
1753 rqdone
= &thrd
->req
[active
];
1754 mark_free(thrd
, active
);
1756 /* Get going again ASAP */
1759 /* For now, just make a list of callbacks to be done */
1760 list_add_tail(&rqdone
->rqd
, &pl330
->req_done
);
1764 /* Now that we are in no hurry, do the callbacks */
1765 while (!list_empty(&pl330
->req_done
)) {
1766 struct pl330_req
*r
;
1768 rqdone
= container_of(pl330
->req_done
.next
,
1769 struct _pl330_req
, rqd
);
1771 list_del_init(&rqdone
->rqd
);
1773 /* Detach the req */
1777 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1778 _callback(r
, PL330_ERR_NONE
);
1779 spin_lock_irqsave(&pl330
->lock
, flags
);
1783 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1785 if (pl330
->dmac_tbd
.reset_dmac
1786 || pl330
->dmac_tbd
.reset_mngr
1787 || pl330
->dmac_tbd
.reset_chan
) {
1789 tasklet_schedule(&pl330
->tasks
);
1795 static int pl330_chan_ctrl(void *ch_id
, enum pl330_chan_op op
)
1797 struct pl330_thread
*thrd
= ch_id
;
1798 struct pl330_dmac
*pl330
;
1799 unsigned long flags
;
1800 int ret
= 0, active
;
1802 if (!thrd
|| thrd
->free
|| thrd
->dmac
->state
== DYING
)
1806 active
= thrd
->req_running
;
1808 spin_lock_irqsave(&pl330
->lock
, flags
);
1811 case PL330_OP_FLUSH
:
1812 /* Make sure the channel is stopped */
1815 thrd
->req
[0].r
= NULL
;
1816 thrd
->req
[1].r
= NULL
;
1821 case PL330_OP_ABORT
:
1822 /* Make sure the channel is stopped */
1825 /* ABORT is only for the active req */
1829 thrd
->req
[active
].r
= NULL
;
1830 mark_free(thrd
, active
);
1832 /* Start the next */
1833 case PL330_OP_START
:
1834 if ((active
== -1) && !_start(thrd
))
1842 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1846 /* Reserve an event */
1847 static inline int _alloc_event(struct pl330_thread
*thrd
)
1849 struct pl330_dmac
*pl330
= thrd
->dmac
;
1850 struct pl330_info
*pi
= pl330
->pinfo
;
1853 for (ev
= 0; ev
< pi
->pcfg
.num_events
; ev
++)
1854 if (pl330
->events
[ev
] == -1) {
1855 pl330
->events
[ev
] = thrd
->id
;
1862 static bool _chan_ns(const struct pl330_info
*pi
, int i
)
1864 return pi
->pcfg
.irq_ns
& (1 << i
);
1867 /* Upon success, returns IdentityToken for the
1868 * allocated channel, NULL otherwise.
1870 static void *pl330_request_channel(const struct pl330_info
*pi
)
1872 struct pl330_thread
*thrd
= NULL
;
1873 struct pl330_dmac
*pl330
;
1874 unsigned long flags
;
1877 if (!pi
|| !pi
->pl330_data
)
1880 pl330
= pi
->pl330_data
;
1882 if (pl330
->state
== DYING
)
1885 chans
= pi
->pcfg
.num_chan
;
1887 spin_lock_irqsave(&pl330
->lock
, flags
);
1889 for (i
= 0; i
< chans
; i
++) {
1890 thrd
= &pl330
->channels
[i
];
1891 if ((thrd
->free
) && (!_manager_ns(thrd
) ||
1893 thrd
->ev
= _alloc_event(thrd
);
1894 if (thrd
->ev
>= 0) {
1897 thrd
->req
[0].r
= NULL
;
1899 thrd
->req
[1].r
= NULL
;
1907 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1912 /* Release an event */
1913 static inline void _free_event(struct pl330_thread
*thrd
, int ev
)
1915 struct pl330_dmac
*pl330
= thrd
->dmac
;
1916 struct pl330_info
*pi
= pl330
->pinfo
;
1918 /* If the event is valid and was held by the thread */
1919 if (ev
>= 0 && ev
< pi
->pcfg
.num_events
1920 && pl330
->events
[ev
] == thrd
->id
)
1921 pl330
->events
[ev
] = -1;
1924 static void pl330_release_channel(void *ch_id
)
1926 struct pl330_thread
*thrd
= ch_id
;
1927 struct pl330_dmac
*pl330
;
1928 unsigned long flags
;
1930 if (!thrd
|| thrd
->free
)
1935 _callback(thrd
->req
[1 - thrd
->lstenq
].r
, PL330_ERR_ABORT
);
1936 _callback(thrd
->req
[thrd
->lstenq
].r
, PL330_ERR_ABORT
);
1940 spin_lock_irqsave(&pl330
->lock
, flags
);
1941 _free_event(thrd
, thrd
->ev
);
1943 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1946 /* Initialize the structure for PL330 configuration, that can be used
1947 * by the client driver the make best use of the DMAC
1949 static void read_dmac_config(struct pl330_info
*pi
)
1951 void __iomem
*regs
= pi
->base
;
1954 val
= readl(regs
+ CRD
) >> CRD_DATA_WIDTH_SHIFT
;
1955 val
&= CRD_DATA_WIDTH_MASK
;
1956 pi
->pcfg
.data_bus_width
= 8 * (1 << val
);
1958 val
= readl(regs
+ CRD
) >> CRD_DATA_BUFF_SHIFT
;
1959 val
&= CRD_DATA_BUFF_MASK
;
1960 pi
->pcfg
.data_buf_dep
= val
+ 1;
1962 val
= readl(regs
+ CR0
) >> CR0_NUM_CHANS_SHIFT
;
1963 val
&= CR0_NUM_CHANS_MASK
;
1965 pi
->pcfg
.num_chan
= val
;
1967 val
= readl(regs
+ CR0
);
1968 if (val
& CR0_PERIPH_REQ_SET
) {
1969 val
= (val
>> CR0_NUM_PERIPH_SHIFT
) & CR0_NUM_PERIPH_MASK
;
1971 pi
->pcfg
.num_peri
= val
;
1972 pi
->pcfg
.peri_ns
= readl(regs
+ CR4
);
1974 pi
->pcfg
.num_peri
= 0;
1977 val
= readl(regs
+ CR0
);
1978 if (val
& CR0_BOOT_MAN_NS
)
1979 pi
->pcfg
.mode
|= DMAC_MODE_NS
;
1981 pi
->pcfg
.mode
&= ~DMAC_MODE_NS
;
1983 val
= readl(regs
+ CR0
) >> CR0_NUM_EVENTS_SHIFT
;
1984 val
&= CR0_NUM_EVENTS_MASK
;
1986 pi
->pcfg
.num_events
= val
;
1988 pi
->pcfg
.irq_ns
= readl(regs
+ CR3
);
1990 pi
->pcfg
.periph_id
= get_id(pi
, PERIPH_ID
);
1991 pi
->pcfg
.pcell_id
= get_id(pi
, PCELL_ID
);
1994 static inline void _reset_thread(struct pl330_thread
*thrd
)
1996 struct pl330_dmac
*pl330
= thrd
->dmac
;
1997 struct pl330_info
*pi
= pl330
->pinfo
;
1999 thrd
->req
[0].mc_cpu
= pl330
->mcode_cpu
2000 + (thrd
->id
* pi
->mcbufsz
);
2001 thrd
->req
[0].mc_bus
= pl330
->mcode_bus
2002 + (thrd
->id
* pi
->mcbufsz
);
2003 thrd
->req
[0].r
= NULL
;
2006 thrd
->req
[1].mc_cpu
= thrd
->req
[0].mc_cpu
2008 thrd
->req
[1].mc_bus
= thrd
->req
[0].mc_bus
2010 thrd
->req
[1].r
= NULL
;
2014 static int dmac_alloc_threads(struct pl330_dmac
*pl330
)
2016 struct pl330_info
*pi
= pl330
->pinfo
;
2017 int chans
= pi
->pcfg
.num_chan
;
2018 struct pl330_thread
*thrd
;
2021 /* Allocate 1 Manager and 'chans' Channel threads */
2022 pl330
->channels
= kzalloc((1 + chans
) * sizeof(*thrd
),
2024 if (!pl330
->channels
)
2027 /* Init Channel threads */
2028 for (i
= 0; i
< chans
; i
++) {
2029 thrd
= &pl330
->channels
[i
];
2032 _reset_thread(thrd
);
2036 /* MANAGER is indexed at the end */
2037 thrd
= &pl330
->channels
[chans
];
2041 pl330
->manager
= thrd
;
2046 static int dmac_alloc_resources(struct pl330_dmac
*pl330
)
2048 struct pl330_info
*pi
= pl330
->pinfo
;
2049 int chans
= pi
->pcfg
.num_chan
;
2053 * Alloc MicroCode buffer for 'chans' Channel threads.
2054 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
2056 pl330
->mcode_cpu
= dma_alloc_coherent(pi
->dev
,
2057 chans
* pi
->mcbufsz
,
2058 &pl330
->mcode_bus
, GFP_KERNEL
);
2059 if (!pl330
->mcode_cpu
) {
2060 dev_err(pi
->dev
, "%s:%d Can't allocate memory!\n",
2061 __func__
, __LINE__
);
2065 ret
= dmac_alloc_threads(pl330
);
2067 dev_err(pi
->dev
, "%s:%d Can't to create channels for DMAC!\n",
2068 __func__
, __LINE__
);
2069 dma_free_coherent(pi
->dev
,
2070 chans
* pi
->mcbufsz
,
2071 pl330
->mcode_cpu
, pl330
->mcode_bus
);
2078 static int pl330_add(struct pl330_info
*pi
)
2080 struct pl330_dmac
*pl330
;
2084 if (!pi
|| !pi
->dev
)
2087 /* If already added */
2092 * If the SoC can perform reset on the DMAC, then do it
2093 * before reading its configuration.
2100 /* Check if we can handle this DMAC */
2101 if ((get_id(pi
, PERIPH_ID
) & 0xfffff) != PERIPH_ID_VAL
2102 || get_id(pi
, PCELL_ID
) != PCELL_ID_VAL
) {
2103 dev_err(pi
->dev
, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
2104 get_id(pi
, PERIPH_ID
), get_id(pi
, PCELL_ID
));
2108 /* Read the configuration of the DMAC */
2109 read_dmac_config(pi
);
2111 if (pi
->pcfg
.num_events
== 0) {
2112 dev_err(pi
->dev
, "%s:%d Can't work without events!\n",
2113 __func__
, __LINE__
);
2117 pl330
= kzalloc(sizeof(*pl330
), GFP_KERNEL
);
2119 dev_err(pi
->dev
, "%s:%d Can't allocate memory!\n",
2120 __func__
, __LINE__
);
2124 /* Assign the info structure and private data */
2126 pi
->pl330_data
= pl330
;
2128 spin_lock_init(&pl330
->lock
);
2130 INIT_LIST_HEAD(&pl330
->req_done
);
2132 /* Use default MC buffer size if not provided */
2134 pi
->mcbufsz
= MCODE_BUFF_PER_REQ
* 2;
2136 /* Mark all events as free */
2137 for (i
= 0; i
< pi
->pcfg
.num_events
; i
++)
2138 pl330
->events
[i
] = -1;
2140 /* Allocate resources needed by the DMAC */
2141 ret
= dmac_alloc_resources(pl330
);
2143 dev_err(pi
->dev
, "Unable to create channels for DMAC\n");
2148 tasklet_init(&pl330
->tasks
, pl330_dotask
, (unsigned long) pl330
);
2150 pl330
->state
= INIT
;
2155 static int dmac_free_threads(struct pl330_dmac
*pl330
)
2157 struct pl330_info
*pi
= pl330
->pinfo
;
2158 int chans
= pi
->pcfg
.num_chan
;
2159 struct pl330_thread
*thrd
;
2162 /* Release Channel threads */
2163 for (i
= 0; i
< chans
; i
++) {
2164 thrd
= &pl330
->channels
[i
];
2165 pl330_release_channel((void *)thrd
);
2169 kfree(pl330
->channels
);
2174 static void dmac_free_resources(struct pl330_dmac
*pl330
)
2176 struct pl330_info
*pi
= pl330
->pinfo
;
2177 int chans
= pi
->pcfg
.num_chan
;
2179 dmac_free_threads(pl330
);
2181 dma_free_coherent(pi
->dev
, chans
* pi
->mcbufsz
,
2182 pl330
->mcode_cpu
, pl330
->mcode_bus
);
2185 static void pl330_del(struct pl330_info
*pi
)
2187 struct pl330_dmac
*pl330
;
2189 if (!pi
|| !pi
->pl330_data
)
2192 pl330
= pi
->pl330_data
;
2194 pl330
->state
= UNINIT
;
2196 tasklet_kill(&pl330
->tasks
);
2198 /* Free DMAC resources */
2199 dmac_free_resources(pl330
);
2202 pi
->pl330_data
= NULL
;
2205 /* forward declaration */
2206 static struct amba_driver pl330_driver
;
2208 static inline struct dma_pl330_chan
*
2209 to_pchan(struct dma_chan
*ch
)
2214 return container_of(ch
, struct dma_pl330_chan
, chan
);
2217 static inline struct dma_pl330_desc
*
2218 to_desc(struct dma_async_tx_descriptor
*tx
)
2220 return container_of(tx
, struct dma_pl330_desc
, txd
);
2223 static inline void free_desc_list(struct list_head
*list
)
2225 struct dma_pl330_dmac
*pdmac
;
2226 struct dma_pl330_desc
*desc
;
2227 struct dma_pl330_chan
*pch
= NULL
;
2228 unsigned long flags
;
2230 /* Finish off the work list */
2231 list_for_each_entry(desc
, list
, node
) {
2232 dma_async_tx_callback callback
;
2235 /* All desc in a list belong to same channel */
2237 callback
= desc
->txd
.callback
;
2238 param
= desc
->txd
.callback_param
;
2246 /* pch will be unset if list was empty */
2252 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2253 list_splice_tail_init(list
, &pdmac
->desc_pool
);
2254 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2257 static inline void handle_cyclic_desc_list(struct list_head
*list
)
2259 struct dma_pl330_desc
*desc
;
2260 struct dma_pl330_chan
*pch
= NULL
;
2261 unsigned long flags
;
2263 list_for_each_entry(desc
, list
, node
) {
2264 dma_async_tx_callback callback
;
2266 /* Change status to reload it */
2267 desc
->status
= PREP
;
2269 callback
= desc
->txd
.callback
;
2271 callback(desc
->txd
.callback_param
);
2274 /* pch will be unset if list was empty */
2278 spin_lock_irqsave(&pch
->lock
, flags
);
2279 list_splice_tail_init(list
, &pch
->work_list
);
2280 spin_unlock_irqrestore(&pch
->lock
, flags
);
2283 static inline void fill_queue(struct dma_pl330_chan
*pch
)
2285 struct dma_pl330_desc
*desc
;
2288 list_for_each_entry(desc
, &pch
->work_list
, node
) {
2290 /* If already submitted */
2291 if (desc
->status
== BUSY
)
2294 ret
= pl330_submit_req(pch
->pl330_chid
,
2297 desc
->status
= BUSY
;
2299 } else if (ret
== -EAGAIN
) {
2300 /* QFull or DMAC Dying */
2303 /* Unacceptable request */
2304 desc
->status
= DONE
;
2305 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Bad Desc(%d)\n",
2306 __func__
, __LINE__
, desc
->txd
.cookie
);
2307 tasklet_schedule(&pch
->task
);
2312 static void pl330_tasklet(unsigned long data
)
2314 struct dma_pl330_chan
*pch
= (struct dma_pl330_chan
*)data
;
2315 struct dma_pl330_desc
*desc
, *_dt
;
2316 unsigned long flags
;
2319 spin_lock_irqsave(&pch
->lock
, flags
);
2321 /* Pick up ripe tomatoes */
2322 list_for_each_entry_safe(desc
, _dt
, &pch
->work_list
, node
)
2323 if (desc
->status
== DONE
) {
2325 dma_cookie_complete(&desc
->txd
);
2326 list_move_tail(&desc
->node
, &list
);
2329 /* Try to submit a req imm. next to the last completed cookie */
2332 /* Make sure the PL330 Channel thread is active */
2333 pl330_chan_ctrl(pch
->pl330_chid
, PL330_OP_START
);
2335 spin_unlock_irqrestore(&pch
->lock
, flags
);
2338 handle_cyclic_desc_list(&list
);
2340 free_desc_list(&list
);
2343 static void dma_pl330_rqcb(void *token
, enum pl330_op_err err
)
2345 struct dma_pl330_desc
*desc
= token
;
2346 struct dma_pl330_chan
*pch
= desc
->pchan
;
2347 unsigned long flags
;
2349 /* If desc aborted */
2353 spin_lock_irqsave(&pch
->lock
, flags
);
2355 desc
->status
= DONE
;
2357 spin_unlock_irqrestore(&pch
->lock
, flags
);
2359 tasklet_schedule(&pch
->task
);
2362 bool pl330_filter(struct dma_chan
*chan
, void *param
)
2366 if (chan
->device
->dev
->driver
!= &pl330_driver
.drv
)
2370 if (chan
->device
->dev
->of_node
) {
2371 const __be32
*prop_value
;
2373 struct device_node
*node
;
2375 prop_value
= ((struct property
*)param
)->value
;
2376 phandle
= be32_to_cpup(prop_value
++);
2377 node
= of_find_node_by_phandle(phandle
);
2378 return ((chan
->private == node
) &&
2379 (chan
->chan_id
== be32_to_cpup(prop_value
)));
2383 peri_id
= chan
->private;
2384 return *peri_id
== (unsigned)param
;
2386 EXPORT_SYMBOL(pl330_filter
);
2388 static int pl330_alloc_chan_resources(struct dma_chan
*chan
)
2390 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2391 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2392 unsigned long flags
;
2394 spin_lock_irqsave(&pch
->lock
, flags
);
2396 dma_cookie_init(chan
);
2397 pch
->cyclic
= false;
2399 pch
->pl330_chid
= pl330_request_channel(&pdmac
->pif
);
2400 if (!pch
->pl330_chid
) {
2401 spin_unlock_irqrestore(&pch
->lock
, flags
);
2405 tasklet_init(&pch
->task
, pl330_tasklet
, (unsigned long) pch
);
2407 spin_unlock_irqrestore(&pch
->lock
, flags
);
2412 static int pl330_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
, unsigned long arg
)
2414 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2415 struct dma_pl330_desc
*desc
, *_dt
;
2416 unsigned long flags
;
2417 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2418 struct dma_slave_config
*slave_config
;
2422 case DMA_TERMINATE_ALL
:
2423 spin_lock_irqsave(&pch
->lock
, flags
);
2425 /* FLUSH the PL330 Channel thread */
2426 pl330_chan_ctrl(pch
->pl330_chid
, PL330_OP_FLUSH
);
2428 /* Mark all desc done */
2429 list_for_each_entry_safe(desc
, _dt
, &pch
->work_list
, node
) {
2430 desc
->status
= DONE
;
2431 list_move_tail(&desc
->node
, &list
);
2434 list_splice_tail_init(&list
, &pdmac
->desc_pool
);
2435 spin_unlock_irqrestore(&pch
->lock
, flags
);
2437 case DMA_SLAVE_CONFIG
:
2438 slave_config
= (struct dma_slave_config
*)arg
;
2440 if (slave_config
->direction
== DMA_MEM_TO_DEV
) {
2441 if (slave_config
->dst_addr
)
2442 pch
->fifo_addr
= slave_config
->dst_addr
;
2443 if (slave_config
->dst_addr_width
)
2444 pch
->burst_sz
= __ffs(slave_config
->dst_addr_width
);
2445 if (slave_config
->dst_maxburst
)
2446 pch
->burst_len
= slave_config
->dst_maxburst
;
2447 } else if (slave_config
->direction
== DMA_DEV_TO_MEM
) {
2448 if (slave_config
->src_addr
)
2449 pch
->fifo_addr
= slave_config
->src_addr
;
2450 if (slave_config
->src_addr_width
)
2451 pch
->burst_sz
= __ffs(slave_config
->src_addr_width
);
2452 if (slave_config
->src_maxburst
)
2453 pch
->burst_len
= slave_config
->src_maxburst
;
2457 dev_err(pch
->dmac
->pif
.dev
, "Not supported command.\n");
2464 static void pl330_free_chan_resources(struct dma_chan
*chan
)
2466 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2467 unsigned long flags
;
2469 spin_lock_irqsave(&pch
->lock
, flags
);
2471 tasklet_kill(&pch
->task
);
2473 pl330_release_channel(pch
->pl330_chid
);
2474 pch
->pl330_chid
= NULL
;
2477 list_splice_tail_init(&pch
->work_list
, &pch
->dmac
->desc_pool
);
2479 spin_unlock_irqrestore(&pch
->lock
, flags
);
2482 static enum dma_status
2483 pl330_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
2484 struct dma_tx_state
*txstate
)
2486 return dma_cookie_status(chan
, cookie
, txstate
);
2489 static void pl330_issue_pending(struct dma_chan
*chan
)
2491 pl330_tasklet((unsigned long) to_pchan(chan
));
2495 * We returned the last one of the circular list of descriptor(s)
2496 * from prep_xxx, so the argument to submit corresponds to the last
2497 * descriptor of the list.
2499 static dma_cookie_t
pl330_tx_submit(struct dma_async_tx_descriptor
*tx
)
2501 struct dma_pl330_desc
*desc
, *last
= to_desc(tx
);
2502 struct dma_pl330_chan
*pch
= to_pchan(tx
->chan
);
2503 dma_cookie_t cookie
;
2504 unsigned long flags
;
2506 spin_lock_irqsave(&pch
->lock
, flags
);
2508 /* Assign cookies to all nodes */
2509 while (!list_empty(&last
->node
)) {
2510 desc
= list_entry(last
->node
.next
, struct dma_pl330_desc
, node
);
2512 dma_cookie_assign(&desc
->txd
);
2514 list_move_tail(&desc
->node
, &pch
->work_list
);
2517 cookie
= dma_cookie_assign(&last
->txd
);
2518 list_add_tail(&last
->node
, &pch
->work_list
);
2519 spin_unlock_irqrestore(&pch
->lock
, flags
);
2524 static inline void _init_desc(struct dma_pl330_desc
*desc
)
2527 desc
->req
.x
= &desc
->px
;
2528 desc
->req
.token
= desc
;
2529 desc
->rqcfg
.swap
= SWAP_NO
;
2530 desc
->rqcfg
.privileged
= 0;
2531 desc
->rqcfg
.insnaccess
= 0;
2532 desc
->rqcfg
.scctl
= SCCTRL0
;
2533 desc
->rqcfg
.dcctl
= DCCTRL0
;
2534 desc
->req
.cfg
= &desc
->rqcfg
;
2535 desc
->req
.xfer_cb
= dma_pl330_rqcb
;
2536 desc
->txd
.tx_submit
= pl330_tx_submit
;
2538 INIT_LIST_HEAD(&desc
->node
);
2541 /* Returns the number of descriptors added to the DMAC pool */
2542 int add_desc(struct dma_pl330_dmac
*pdmac
, gfp_t flg
, int count
)
2544 struct dma_pl330_desc
*desc
;
2545 unsigned long flags
;
2551 desc
= kmalloc(count
* sizeof(*desc
), flg
);
2555 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2557 for (i
= 0; i
< count
; i
++) {
2558 _init_desc(&desc
[i
]);
2559 list_add_tail(&desc
[i
].node
, &pdmac
->desc_pool
);
2562 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2567 static struct dma_pl330_desc
*
2568 pluck_desc(struct dma_pl330_dmac
*pdmac
)
2570 struct dma_pl330_desc
*desc
= NULL
;
2571 unsigned long flags
;
2576 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2578 if (!list_empty(&pdmac
->desc_pool
)) {
2579 desc
= list_entry(pdmac
->desc_pool
.next
,
2580 struct dma_pl330_desc
, node
);
2582 list_del_init(&desc
->node
);
2584 desc
->status
= PREP
;
2585 desc
->txd
.callback
= NULL
;
2588 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2593 static struct dma_pl330_desc
*pl330_get_desc(struct dma_pl330_chan
*pch
)
2595 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2596 u8
*peri_id
= pch
->chan
.private;
2597 struct dma_pl330_desc
*desc
;
2599 /* Pluck one desc from the pool of DMAC */
2600 desc
= pluck_desc(pdmac
);
2602 /* If the DMAC pool is empty, alloc new */
2604 if (!add_desc(pdmac
, GFP_ATOMIC
, 1))
2608 desc
= pluck_desc(pdmac
);
2610 dev_err(pch
->dmac
->pif
.dev
,
2611 "%s:%d ALERT!\n", __func__
, __LINE__
);
2616 /* Initialize the descriptor */
2618 desc
->txd
.cookie
= 0;
2619 async_tx_ack(&desc
->txd
);
2621 desc
->req
.peri
= peri_id
? pch
->chan
.chan_id
: 0;
2622 desc
->rqcfg
.pcfg
= &pch
->dmac
->pif
.pcfg
;
2624 dma_async_tx_descriptor_init(&desc
->txd
, &pch
->chan
);
2629 static inline void fill_px(struct pl330_xfer
*px
,
2630 dma_addr_t dst
, dma_addr_t src
, size_t len
)
2638 static struct dma_pl330_desc
*
2639 __pl330_prep_dma_memcpy(struct dma_pl330_chan
*pch
, dma_addr_t dst
,
2640 dma_addr_t src
, size_t len
)
2642 struct dma_pl330_desc
*desc
= pl330_get_desc(pch
);
2645 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Unable to fetch desc\n",
2646 __func__
, __LINE__
);
2651 * Ideally we should lookout for reqs bigger than
2652 * those that can be programmed with 256 bytes of
2653 * MC buffer, but considering a req size is seldom
2654 * going to be word-unaligned and more than 200MB,
2656 * Also, should the limit is reached we'd rather
2657 * have the platform increase MC buffer size than
2658 * complicating this API driver.
2660 fill_px(&desc
->px
, dst
, src
, len
);
2665 /* Call after fixing burst size */
2666 static inline int get_burst_len(struct dma_pl330_desc
*desc
, size_t len
)
2668 struct dma_pl330_chan
*pch
= desc
->pchan
;
2669 struct pl330_info
*pi
= &pch
->dmac
->pif
;
2672 burst_len
= pi
->pcfg
.data_bus_width
/ 8;
2673 burst_len
*= pi
->pcfg
.data_buf_dep
;
2674 burst_len
>>= desc
->rqcfg
.brst_size
;
2676 /* src/dst_burst_len can't be more than 16 */
2680 while (burst_len
> 1) {
2681 if (!(len
% (burst_len
<< desc
->rqcfg
.brst_size
)))
2689 static struct dma_async_tx_descriptor
*pl330_prep_dma_cyclic(
2690 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t len
,
2691 size_t period_len
, enum dma_transfer_direction direction
,
2694 struct dma_pl330_desc
*desc
;
2695 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2699 desc
= pl330_get_desc(pch
);
2701 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Unable to fetch desc\n",
2702 __func__
, __LINE__
);
2706 switch (direction
) {
2707 case DMA_MEM_TO_DEV
:
2708 desc
->rqcfg
.src_inc
= 1;
2709 desc
->rqcfg
.dst_inc
= 0;
2710 desc
->req
.rqtype
= MEMTODEV
;
2712 dst
= pch
->fifo_addr
;
2714 case DMA_DEV_TO_MEM
:
2715 desc
->rqcfg
.src_inc
= 0;
2716 desc
->rqcfg
.dst_inc
= 1;
2717 desc
->req
.rqtype
= DEVTOMEM
;
2718 src
= pch
->fifo_addr
;
2722 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Invalid dma direction\n",
2723 __func__
, __LINE__
);
2727 desc
->rqcfg
.brst_size
= pch
->burst_sz
;
2728 desc
->rqcfg
.brst_len
= 1;
2732 fill_px(&desc
->px
, dst
, src
, period_len
);
2737 static struct dma_async_tx_descriptor
*
2738 pl330_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dst
,
2739 dma_addr_t src
, size_t len
, unsigned long flags
)
2741 struct dma_pl330_desc
*desc
;
2742 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2743 struct pl330_info
*pi
;
2746 if (unlikely(!pch
|| !len
))
2749 pi
= &pch
->dmac
->pif
;
2751 desc
= __pl330_prep_dma_memcpy(pch
, dst
, src
, len
);
2755 desc
->rqcfg
.src_inc
= 1;
2756 desc
->rqcfg
.dst_inc
= 1;
2757 desc
->req
.rqtype
= MEMTOMEM
;
2759 /* Select max possible burst size */
2760 burst
= pi
->pcfg
.data_bus_width
/ 8;
2768 desc
->rqcfg
.brst_size
= 0;
2769 while (burst
!= (1 << desc
->rqcfg
.brst_size
))
2770 desc
->rqcfg
.brst_size
++;
2772 desc
->rqcfg
.brst_len
= get_burst_len(desc
, len
);
2774 desc
->txd
.flags
= flags
;
2779 static struct dma_async_tx_descriptor
*
2780 pl330_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
2781 unsigned int sg_len
, enum dma_transfer_direction direction
,
2782 unsigned long flg
, void *context
)
2784 struct dma_pl330_desc
*first
, *desc
= NULL
;
2785 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2786 struct scatterlist
*sg
;
2787 unsigned long flags
;
2791 if (unlikely(!pch
|| !sgl
|| !sg_len
))
2794 addr
= pch
->fifo_addr
;
2798 for_each_sg(sgl
, sg
, sg_len
, i
) {
2800 desc
= pl330_get_desc(pch
);
2802 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2804 dev_err(pch
->dmac
->pif
.dev
,
2805 "%s:%d Unable to fetch desc\n",
2806 __func__
, __LINE__
);
2810 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2812 while (!list_empty(&first
->node
)) {
2813 desc
= list_entry(first
->node
.next
,
2814 struct dma_pl330_desc
, node
);
2815 list_move_tail(&desc
->node
, &pdmac
->desc_pool
);
2818 list_move_tail(&first
->node
, &pdmac
->desc_pool
);
2820 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2828 list_add_tail(&desc
->node
, &first
->node
);
2830 if (direction
== DMA_MEM_TO_DEV
) {
2831 desc
->rqcfg
.src_inc
= 1;
2832 desc
->rqcfg
.dst_inc
= 0;
2833 desc
->req
.rqtype
= MEMTODEV
;
2835 addr
, sg_dma_address(sg
), sg_dma_len(sg
));
2837 desc
->rqcfg
.src_inc
= 0;
2838 desc
->rqcfg
.dst_inc
= 1;
2839 desc
->req
.rqtype
= DEVTOMEM
;
2841 sg_dma_address(sg
), addr
, sg_dma_len(sg
));
2844 desc
->rqcfg
.brst_size
= pch
->burst_sz
;
2845 desc
->rqcfg
.brst_len
= 1;
2848 /* Return the last desc in the chain */
2849 desc
->txd
.flags
= flg
;
2853 static irqreturn_t
pl330_irq_handler(int irq
, void *data
)
2855 if (pl330_update(data
))
2861 static int __devinit
2862 pl330_probe(struct amba_device
*adev
, const struct amba_id
*id
)
2864 struct dma_pl330_platdata
*pdat
;
2865 struct dma_pl330_dmac
*pdmac
;
2866 struct dma_pl330_chan
*pch
;
2867 struct pl330_info
*pi
;
2868 struct dma_device
*pd
;
2869 struct resource
*res
;
2873 pdat
= adev
->dev
.platform_data
;
2875 /* Allocate a new DMAC and its Channels */
2876 pdmac
= kzalloc(sizeof(*pdmac
), GFP_KERNEL
);
2878 dev_err(&adev
->dev
, "unable to allocate mem\n");
2883 pi
->dev
= &adev
->dev
;
2884 pi
->pl330_data
= NULL
;
2885 pi
->mcbufsz
= pdat
? pdat
->mcbuf_sz
: 0;
2888 request_mem_region(res
->start
, resource_size(res
), "dma-pl330");
2890 pi
->base
= ioremap(res
->start
, resource_size(res
));
2896 pdmac
->clk
= clk_get(&adev
->dev
, "dma");
2897 if (IS_ERR(pdmac
->clk
)) {
2898 dev_err(&adev
->dev
, "Cannot get operation clock.\n");
2903 amba_set_drvdata(adev
, pdmac
);
2905 #ifndef CONFIG_PM_RUNTIME
2906 /* enable dma clk */
2907 clk_enable(pdmac
->clk
);
2911 ret
= request_irq(irq
, pl330_irq_handler
, 0,
2912 dev_name(&adev
->dev
), pi
);
2916 ret
= pl330_add(pi
);
2920 INIT_LIST_HEAD(&pdmac
->desc_pool
);
2921 spin_lock_init(&pdmac
->pool_lock
);
2923 /* Create a descriptor pool of default size */
2924 if (!add_desc(pdmac
, GFP_KERNEL
, NR_DEFAULT_DESC
))
2925 dev_warn(&adev
->dev
, "unable to allocate desc\n");
2928 INIT_LIST_HEAD(&pd
->channels
);
2930 /* Initialize channel parameters */
2932 num_chan
= max_t(int, pdat
->nr_valid_peri
, pi
->pcfg
.num_chan
);
2934 num_chan
= max_t(int, pi
->pcfg
.num_peri
, pi
->pcfg
.num_chan
);
2936 pdmac
->peripherals
= kzalloc(num_chan
* sizeof(*pch
), GFP_KERNEL
);
2938 for (i
= 0; i
< num_chan
; i
++) {
2939 pch
= &pdmac
->peripherals
[i
];
2940 if (!adev
->dev
.of_node
)
2941 pch
->chan
.private = pdat
? &pdat
->peri_id
[i
] : NULL
;
2943 pch
->chan
.private = adev
->dev
.of_node
;
2945 INIT_LIST_HEAD(&pch
->work_list
);
2946 spin_lock_init(&pch
->lock
);
2947 pch
->pl330_chid
= NULL
;
2948 pch
->chan
.device
= pd
;
2951 /* Add the channel to the DMAC list */
2952 list_add_tail(&pch
->chan
.device_node
, &pd
->channels
);
2955 pd
->dev
= &adev
->dev
;
2957 pd
->cap_mask
= pdat
->cap_mask
;
2959 dma_cap_set(DMA_MEMCPY
, pd
->cap_mask
);
2960 if (pi
->pcfg
.num_peri
) {
2961 dma_cap_set(DMA_SLAVE
, pd
->cap_mask
);
2962 dma_cap_set(DMA_CYCLIC
, pd
->cap_mask
);
2966 pd
->device_alloc_chan_resources
= pl330_alloc_chan_resources
;
2967 pd
->device_free_chan_resources
= pl330_free_chan_resources
;
2968 pd
->device_prep_dma_memcpy
= pl330_prep_dma_memcpy
;
2969 pd
->device_prep_dma_cyclic
= pl330_prep_dma_cyclic
;
2970 pd
->device_tx_status
= pl330_tx_status
;
2971 pd
->device_prep_slave_sg
= pl330_prep_slave_sg
;
2972 pd
->device_control
= pl330_control
;
2973 pd
->device_issue_pending
= pl330_issue_pending
;
2975 ret
= dma_async_device_register(pd
);
2977 dev_err(&adev
->dev
, "unable to register DMAC\n");
2981 dev_info(&adev
->dev
,
2982 "Loaded driver for PL330 DMAC-%d\n", adev
->periphid
);
2983 dev_info(&adev
->dev
,
2984 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
2985 pi
->pcfg
.data_buf_dep
,
2986 pi
->pcfg
.data_bus_width
/ 8, pi
->pcfg
.num_chan
,
2987 pi
->pcfg
.num_peri
, pi
->pcfg
.num_events
);
2996 #ifndef CONFIG_PM_RUNTIME
2997 clk_disable(pdmac
->clk
);
2999 clk_put(pdmac
->clk
);
3003 release_mem_region(res
->start
, resource_size(res
));
3009 static int __devexit
pl330_remove(struct amba_device
*adev
)
3011 struct dma_pl330_dmac
*pdmac
= amba_get_drvdata(adev
);
3012 struct dma_pl330_chan
*pch
, *_p
;
3013 struct pl330_info
*pi
;
3014 struct resource
*res
;
3020 amba_set_drvdata(adev
, NULL
);
3023 list_for_each_entry_safe(pch
, _p
, &pdmac
->ddma
.channels
,
3026 /* Remove the channel */
3027 list_del(&pch
->chan
.device_node
);
3029 /* Flush the channel */
3030 pl330_control(&pch
->chan
, DMA_TERMINATE_ALL
, 0);
3031 pl330_free_chan_resources(&pch
->chan
);
3044 release_mem_region(res
->start
, resource_size(res
));
3046 #ifndef CONFIG_PM_RUNTIME
3047 clk_disable(pdmac
->clk
);
3055 static struct amba_id pl330_ids
[] = {
3063 MODULE_DEVICE_TABLE(amba
, pl330_ids
);
3065 #ifdef CONFIG_PM_RUNTIME
3066 static int pl330_runtime_suspend(struct device
*dev
)
3068 struct dma_pl330_dmac
*pdmac
= dev_get_drvdata(dev
);
3071 dev_err(dev
, "failed to get dmac\n");
3075 clk_disable(pdmac
->clk
);
3080 static int pl330_runtime_resume(struct device
*dev
)
3082 struct dma_pl330_dmac
*pdmac
= dev_get_drvdata(dev
);
3085 dev_err(dev
, "failed to get dmac\n");
3089 clk_enable(pdmac
->clk
);
3094 #define pl330_runtime_suspend NULL
3095 #define pl330_runtime_resume NULL
3096 #endif /* CONFIG_PM_RUNTIME */
3098 static const struct dev_pm_ops pl330_pm_ops
= {
3099 .runtime_suspend
= pl330_runtime_suspend
,
3100 .runtime_resume
= pl330_runtime_resume
,
3103 static struct amba_driver pl330_driver
= {
3105 .owner
= THIS_MODULE
,
3106 .name
= "dma-pl330",
3107 .pm
= &pl330_pm_ops
,
3109 .id_table
= pl330_ids
,
3110 .probe
= pl330_probe
,
3111 .remove
= pl330_remove
,
3114 module_amba_driver(pl330_driver
);
3116 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
3117 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3118 MODULE_LICENSE("GPL");