2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/amba/bus.h>
25 #include <linux/amba/pl330.h>
26 #include <linux/scatterlist.h>
28 #include <linux/of_dma.h>
29 #include <linux/err.h>
31 #include "dmaengine.h"
32 #define PL330_MAX_CHAN 8
33 #define PL330_MAX_IRQS 32
34 #define PL330_MAX_PERI 32
36 enum pl330_cachectrl
{
37 CCTRL0
, /* Noncacheable and nonbufferable */
38 CCTRL1
, /* Bufferable only */
39 CCTRL2
, /* Cacheable, but do not allocate */
40 CCTRL3
, /* Cacheable and bufferable, but do not allocate */
41 INVALID1
, /* AWCACHE = 0x1000 */
43 CCTRL6
, /* Cacheable write-through, allocate on writes only */
44 CCTRL7
, /* Cacheable write-back, allocate on writes only */
55 /* Register and Bit field Definitions */
57 #define DS_ST_STOP 0x0
58 #define DS_ST_EXEC 0x1
59 #define DS_ST_CMISS 0x2
60 #define DS_ST_UPDTPC 0x3
62 #define DS_ST_ATBRR 0x5
63 #define DS_ST_QBUSY 0x6
65 #define DS_ST_KILL 0x8
66 #define DS_ST_CMPLT 0x9
67 #define DS_ST_FLTCMP 0xe
68 #define DS_ST_FAULT 0xf
73 #define INTSTATUS 0x28
80 #define FTC(n) (_FTC + (n)*0x4)
83 #define CS(n) (_CS + (n)*0x8)
84 #define CS_CNS (1 << 21)
87 #define CPC(n) (_CPC + (n)*0x8)
90 #define SA(n) (_SA + (n)*0x20)
93 #define DA(n) (_DA + (n)*0x20)
96 #define CC(n) (_CC + (n)*0x20)
98 #define CC_SRCINC (1 << 0)
99 #define CC_DSTINC (1 << 14)
100 #define CC_SRCPRI (1 << 8)
101 #define CC_DSTPRI (1 << 22)
102 #define CC_SRCNS (1 << 9)
103 #define CC_DSTNS (1 << 23)
104 #define CC_SRCIA (1 << 10)
105 #define CC_DSTIA (1 << 24)
106 #define CC_SRCBRSTLEN_SHFT 4
107 #define CC_DSTBRSTLEN_SHFT 18
108 #define CC_SRCBRSTSIZE_SHFT 1
109 #define CC_DSTBRSTSIZE_SHFT 15
110 #define CC_SRCCCTRL_SHFT 11
111 #define CC_SRCCCTRL_MASK 0x7
112 #define CC_DSTCCTRL_SHFT 25
113 #define CC_DRCCCTRL_MASK 0x7
114 #define CC_SWAP_SHFT 28
117 #define LC0(n) (_LC0 + (n)*0x20)
120 #define LC1(n) (_LC1 + (n)*0x20)
122 #define DBGSTATUS 0xd00
123 #define DBG_BUSY (1 << 0)
126 #define DBGINST0 0xd08
127 #define DBGINST1 0xd0c
136 #define PERIPH_ID 0xfe0
137 #define PERIPH_REV_SHIFT 20
138 #define PERIPH_REV_MASK 0xf
139 #define PERIPH_REV_R0P0 0
140 #define PERIPH_REV_R1P0 1
141 #define PERIPH_REV_R1P1 2
143 #define CR0_PERIPH_REQ_SET (1 << 0)
144 #define CR0_BOOT_EN_SET (1 << 1)
145 #define CR0_BOOT_MAN_NS (1 << 2)
146 #define CR0_NUM_CHANS_SHIFT 4
147 #define CR0_NUM_CHANS_MASK 0x7
148 #define CR0_NUM_PERIPH_SHIFT 12
149 #define CR0_NUM_PERIPH_MASK 0x1f
150 #define CR0_NUM_EVENTS_SHIFT 17
151 #define CR0_NUM_EVENTS_MASK 0x1f
153 #define CR1_ICACHE_LEN_SHIFT 0
154 #define CR1_ICACHE_LEN_MASK 0x7
155 #define CR1_NUM_ICACHELINES_SHIFT 4
156 #define CR1_NUM_ICACHELINES_MASK 0xf
158 #define CRD_DATA_WIDTH_SHIFT 0
159 #define CRD_DATA_WIDTH_MASK 0x7
160 #define CRD_WR_CAP_SHIFT 4
161 #define CRD_WR_CAP_MASK 0x7
162 #define CRD_WR_Q_DEP_SHIFT 8
163 #define CRD_WR_Q_DEP_MASK 0xf
164 #define CRD_RD_CAP_SHIFT 12
165 #define CRD_RD_CAP_MASK 0x7
166 #define CRD_RD_Q_DEP_SHIFT 16
167 #define CRD_RD_Q_DEP_MASK 0xf
168 #define CRD_DATA_BUFF_SHIFT 20
169 #define CRD_DATA_BUFF_MASK 0x3ff
172 #define DESIGNER 0x41
174 #define INTEG_CFG 0x0
175 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
177 #define PL330_STATE_STOPPED (1 << 0)
178 #define PL330_STATE_EXECUTING (1 << 1)
179 #define PL330_STATE_WFE (1 << 2)
180 #define PL330_STATE_FAULTING (1 << 3)
181 #define PL330_STATE_COMPLETING (1 << 4)
182 #define PL330_STATE_WFP (1 << 5)
183 #define PL330_STATE_KILLING (1 << 6)
184 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
185 #define PL330_STATE_CACHEMISS (1 << 8)
186 #define PL330_STATE_UPDTPC (1 << 9)
187 #define PL330_STATE_ATBARRIER (1 << 10)
188 #define PL330_STATE_QUEUEBUSY (1 << 11)
189 #define PL330_STATE_INVALID (1 << 15)
191 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
192 | PL330_STATE_WFE | PL330_STATE_FAULTING)
194 #define CMD_DMAADDH 0x54
195 #define CMD_DMAEND 0x00
196 #define CMD_DMAFLUSHP 0x35
197 #define CMD_DMAGO 0xa0
198 #define CMD_DMALD 0x04
199 #define CMD_DMALDP 0x25
200 #define CMD_DMALP 0x20
201 #define CMD_DMALPEND 0x28
202 #define CMD_DMAKILL 0x01
203 #define CMD_DMAMOV 0xbc
204 #define CMD_DMANOP 0x18
205 #define CMD_DMARMB 0x12
206 #define CMD_DMASEV 0x34
207 #define CMD_DMAST 0x08
208 #define CMD_DMASTP 0x29
209 #define CMD_DMASTZ 0x0c
210 #define CMD_DMAWFE 0x36
211 #define CMD_DMAWFP 0x30
212 #define CMD_DMAWMB 0x13
216 #define SZ_DMAFLUSHP 2
220 #define SZ_DMALPEND 2
234 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
235 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
237 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
238 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
241 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
242 * at 1byte/burst for P<->M and M<->M respectively.
243 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
244 * should be enough for P<->M and M<->M respectively.
246 #define MCODE_BUFF_PER_REQ 256
248 /* If the _pl330_req is available to the client */
249 #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
251 /* Use this _only_ to wait on transient states */
252 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
254 #ifdef PL330_DEBUG_MCGEN
255 static unsigned cmd_line
;
256 #define PL330_DBGCMD_DUMP(off, x...) do { \
257 printk("%x:", cmd_line); \
261 #define PL330_DBGMC_START(addr) (cmd_line = addr)
263 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
264 #define PL330_DBGMC_START(addr) do {} while (0)
267 /* The number of default descriptors */
269 #define NR_DEFAULT_DESC 16
271 /* Populated by the PL330 core driver for DMA API driver's info */
272 struct pl330_config
{
274 #define DMAC_MODE_NS (1 << 0)
276 unsigned int data_bus_width
:10; /* In number of bits */
277 unsigned int data_buf_dep
:10;
278 unsigned int num_chan
:4;
279 unsigned int num_peri
:6;
281 unsigned int num_events
:6;
285 /* Handle to the DMAC provided to the PL330 core */
289 /* Size of MicroCode buffers for each channel. */
291 /* ioremap'ed address of PL330 registers. */
293 /* PL330 core data, Client must not touch it. */
295 /* Populated by the PL330 core driver during pl330_add */
296 struct pl330_config pcfg
;
300 * Request Configuration.
301 * The PL330 core does not modify this and uses the last
302 * working configuration if the request doesn't provide any.
304 * The Client may want to provide this info only for the
305 * first request and a request with new settings.
307 struct pl330_reqcfg
{
308 /* Address Incrementing */
313 * For now, the SRC & DST protection levels
314 * and burst size/length are assumed same.
320 unsigned brst_size
:3; /* in power of 2 */
322 enum pl330_cachectrl dcctl
;
323 enum pl330_cachectrl scctl
;
324 enum pl330_byteswap swap
;
325 struct pl330_config
*pcfg
;
329 * One cycle of DMAC operation.
330 * There may be more than one xfer in a request.
339 /* The xfer callbacks are made with one of these arguments. */
341 /* The all xfers in the request were success. */
343 /* If req aborted due to global error. */
345 /* If req failed due to problem with Channel. */
349 /* A request defining Scatter-Gather List ending with NULL xfer. */
351 enum dma_transfer_direction rqtype
;
352 /* Index of peripheral for the xfer. */
354 /* If NULL, req will be done at last set parameters. */
355 struct pl330_reqcfg
*cfg
;
356 /* Pointer to first xfer in the request. */
357 struct pl330_xfer
*x
;
358 /* Hook to attach to DMAC's list of reqs with due callback */
359 struct list_head rqd
;
363 /* Start the channel */
365 /* Abort the active xfer */
367 /* Stop xfer and flush queue */
374 struct pl330_xfer
*x
;
400 /* ToBeDone for tasklet */
408 struct pl330_thread
{
411 /* If the channel is not yet acquired by any client */
414 struct pl330_dmac
*dmac
;
415 /* Only two at a time */
416 struct _pl330_req req
[2];
417 /* Index of the last enqueued request */
419 /* Index of the last submitted request or -1 if the DMA is stopped */
423 enum pl330_dmac_state
{
432 /* Holds list of reqs with due callbacks */
433 struct list_head req_done
;
434 /* Pointer to platform specific stuff */
435 struct pl330_info
*pinfo
;
436 /* Maximum possible events/irqs */
438 /* BUS address of MicroCode buffer */
439 dma_addr_t mcode_bus
;
440 /* CPU address of MicroCode buffer */
442 /* List of all Channel threads */
443 struct pl330_thread
*channels
;
444 /* Pointer to the MANAGER thread */
445 struct pl330_thread
*manager
;
446 /* To handle bad news in interrupt */
447 struct tasklet_struct tasks
;
448 struct _pl330_tbd dmac_tbd
;
449 /* State of DMAC operation */
450 enum pl330_dmac_state state
;
454 /* In the DMAC pool */
457 * Allocated to some channel during prep_xxx
458 * Also may be sitting on the work_list.
462 * Sitting on the work_list and already submitted
463 * to the PL330 core. Not more than two descriptors
464 * of a channel can be BUSY at any time.
468 * Sitting on the channel work_list but xfer done
474 struct dma_pl330_chan
{
475 /* Schedule desc completion */
476 struct tasklet_struct task
;
478 /* DMA-Engine Channel */
479 struct dma_chan chan
;
481 /* List of submitted descriptors */
482 struct list_head submitted_list
;
483 /* List of issued descriptors */
484 struct list_head work_list
;
485 /* List of completed descriptors */
486 struct list_head completed_list
;
488 /* Pointer to the DMAC that manages this channel,
489 * NULL if the channel is available to be acquired.
490 * As the parent, this DMAC also provides descriptors
493 struct dma_pl330_dmac
*dmac
;
495 /* To protect channel manipulation */
499 * Hardware channel thread of PL330 DMAC. NULL if the channel is
502 struct pl330_thread
*thread
;
504 /* For D-to-M and M-to-D channels */
505 int burst_sz
; /* the peripheral fifo width */
506 int burst_len
; /* the number of burst */
507 dma_addr_t fifo_addr
;
509 /* for cyclic capability */
513 struct dma_pl330_dmac
{
514 struct pl330_info pif
;
516 /* DMA-Engine Device */
517 struct dma_device ddma
;
519 /* Holds info about sg limitations */
520 struct device_dma_parameters dma_parms
;
522 /* Pool of descriptors available for the DMAC's channels */
523 struct list_head desc_pool
;
524 /* To protect desc_pool manipulation */
525 spinlock_t pool_lock
;
527 /* Peripheral channels connected to this DMAC */
528 unsigned int num_peripherals
;
529 struct dma_pl330_chan
*peripherals
; /* keep at end */
532 struct dma_pl330_desc
{
533 /* To attach to a queue as child */
534 struct list_head node
;
536 /* Descriptor for the DMA Engine API */
537 struct dma_async_tx_descriptor txd
;
539 /* Xfer for PL330 core */
540 struct pl330_xfer px
;
542 struct pl330_reqcfg rqcfg
;
543 struct pl330_req req
;
545 enum desc_status status
;
547 /* The channel which currently holds this desc */
548 struct dma_pl330_chan
*pchan
;
551 static inline bool _queue_empty(struct pl330_thread
*thrd
)
553 return (IS_FREE(&thrd
->req
[0]) && IS_FREE(&thrd
->req
[1]))
557 static inline bool _queue_full(struct pl330_thread
*thrd
)
559 return (IS_FREE(&thrd
->req
[0]) || IS_FREE(&thrd
->req
[1]))
563 static inline bool is_manager(struct pl330_thread
*thrd
)
565 struct pl330_dmac
*pl330
= thrd
->dmac
;
567 /* MANAGER is indexed at the end */
568 if (thrd
->id
== pl330
->pinfo
->pcfg
.num_chan
)
574 /* If manager of the thread is in Non-Secure mode */
575 static inline bool _manager_ns(struct pl330_thread
*thrd
)
577 struct pl330_dmac
*pl330
= thrd
->dmac
;
579 return (pl330
->pinfo
->pcfg
.mode
& DMAC_MODE_NS
) ? true : false;
582 static inline u32
get_revision(u32 periph_id
)
584 return (periph_id
>> PERIPH_REV_SHIFT
) & PERIPH_REV_MASK
;
587 static inline u32
_emit_ADDH(unsigned dry_run
, u8 buf
[],
588 enum pl330_dst da
, u16 val
)
593 buf
[0] = CMD_DMAADDH
;
595 *((u16
*)&buf
[1]) = val
;
597 PL330_DBGCMD_DUMP(SZ_DMAADDH
, "\tDMAADDH %s %u\n",
598 da
== 1 ? "DA" : "SA", val
);
603 static inline u32
_emit_END(unsigned dry_run
, u8 buf
[])
610 PL330_DBGCMD_DUMP(SZ_DMAEND
, "\tDMAEND\n");
615 static inline u32
_emit_FLUSHP(unsigned dry_run
, u8 buf
[], u8 peri
)
620 buf
[0] = CMD_DMAFLUSHP
;
626 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP
, "\tDMAFLUSHP %u\n", peri
>> 3);
631 static inline u32
_emit_LD(unsigned dry_run
, u8 buf
[], enum pl330_cond cond
)
639 buf
[0] |= (0 << 1) | (1 << 0);
640 else if (cond
== BURST
)
641 buf
[0] |= (1 << 1) | (1 << 0);
643 PL330_DBGCMD_DUMP(SZ_DMALD
, "\tDMALD%c\n",
644 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'));
649 static inline u32
_emit_LDP(unsigned dry_run
, u8 buf
[],
650 enum pl330_cond cond
, u8 peri
)
664 PL330_DBGCMD_DUMP(SZ_DMALDP
, "\tDMALDP%c %u\n",
665 cond
== SINGLE
? 'S' : 'B', peri
>> 3);
670 static inline u32
_emit_LP(unsigned dry_run
, u8 buf
[],
671 unsigned loop
, u8 cnt
)
681 cnt
--; /* DMAC increments by 1 internally */
684 PL330_DBGCMD_DUMP(SZ_DMALP
, "\tDMALP_%c %u\n", loop
? '1' : '0', cnt
);
690 enum pl330_cond cond
;
696 static inline u32
_emit_LPEND(unsigned dry_run
, u8 buf
[],
697 const struct _arg_LPEND
*arg
)
699 enum pl330_cond cond
= arg
->cond
;
700 bool forever
= arg
->forever
;
701 unsigned loop
= arg
->loop
;
702 u8 bjump
= arg
->bjump
;
707 buf
[0] = CMD_DMALPEND
;
716 buf
[0] |= (0 << 1) | (1 << 0);
717 else if (cond
== BURST
)
718 buf
[0] |= (1 << 1) | (1 << 0);
722 PL330_DBGCMD_DUMP(SZ_DMALPEND
, "\tDMALP%s%c_%c bjmpto_%x\n",
723 forever
? "FE" : "END",
724 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'),
731 static inline u32
_emit_KILL(unsigned dry_run
, u8 buf
[])
736 buf
[0] = CMD_DMAKILL
;
741 static inline u32
_emit_MOV(unsigned dry_run
, u8 buf
[],
742 enum dmamov_dst dst
, u32 val
)
749 *((u32
*)&buf
[2]) = val
;
751 PL330_DBGCMD_DUMP(SZ_DMAMOV
, "\tDMAMOV %s 0x%x\n",
752 dst
== SAR
? "SAR" : (dst
== DAR
? "DAR" : "CCR"), val
);
757 static inline u32
_emit_NOP(unsigned dry_run
, u8 buf
[])
764 PL330_DBGCMD_DUMP(SZ_DMANOP
, "\tDMANOP\n");
769 static inline u32
_emit_RMB(unsigned dry_run
, u8 buf
[])
776 PL330_DBGCMD_DUMP(SZ_DMARMB
, "\tDMARMB\n");
781 static inline u32
_emit_SEV(unsigned dry_run
, u8 buf
[], u8 ev
)
792 PL330_DBGCMD_DUMP(SZ_DMASEV
, "\tDMASEV %u\n", ev
>> 3);
797 static inline u32
_emit_ST(unsigned dry_run
, u8 buf
[], enum pl330_cond cond
)
805 buf
[0] |= (0 << 1) | (1 << 0);
806 else if (cond
== BURST
)
807 buf
[0] |= (1 << 1) | (1 << 0);
809 PL330_DBGCMD_DUMP(SZ_DMAST
, "\tDMAST%c\n",
810 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'));
815 static inline u32
_emit_STP(unsigned dry_run
, u8 buf
[],
816 enum pl330_cond cond
, u8 peri
)
830 PL330_DBGCMD_DUMP(SZ_DMASTP
, "\tDMASTP%c %u\n",
831 cond
== SINGLE
? 'S' : 'B', peri
>> 3);
836 static inline u32
_emit_STZ(unsigned dry_run
, u8 buf
[])
843 PL330_DBGCMD_DUMP(SZ_DMASTZ
, "\tDMASTZ\n");
848 static inline u32
_emit_WFE(unsigned dry_run
, u8 buf
[], u8 ev
,
863 PL330_DBGCMD_DUMP(SZ_DMAWFE
, "\tDMAWFE %u%s\n",
864 ev
>> 3, invalidate
? ", I" : "");
869 static inline u32
_emit_WFP(unsigned dry_run
, u8 buf
[],
870 enum pl330_cond cond
, u8 peri
)
878 buf
[0] |= (0 << 1) | (0 << 0);
879 else if (cond
== BURST
)
880 buf
[0] |= (1 << 1) | (0 << 0);
882 buf
[0] |= (0 << 1) | (1 << 0);
888 PL330_DBGCMD_DUMP(SZ_DMAWFP
, "\tDMAWFP%c %u\n",
889 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'P'), peri
>> 3);
894 static inline u32
_emit_WMB(unsigned dry_run
, u8 buf
[])
901 PL330_DBGCMD_DUMP(SZ_DMAWMB
, "\tDMAWMB\n");
912 static inline u32
_emit_GO(unsigned dry_run
, u8 buf
[],
913 const struct _arg_GO
*arg
)
916 u32 addr
= arg
->addr
;
917 unsigned ns
= arg
->ns
;
927 *((u32
*)&buf
[2]) = addr
;
932 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
934 /* Returns Time-Out */
935 static bool _until_dmac_idle(struct pl330_thread
*thrd
)
937 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
938 unsigned long loops
= msecs_to_loops(5);
941 /* Until Manager is Idle */
942 if (!(readl(regs
+ DBGSTATUS
) & DBG_BUSY
))
954 static inline void _execute_DBGINSN(struct pl330_thread
*thrd
,
955 u8 insn
[], bool as_manager
)
957 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
960 val
= (insn
[0] << 16) | (insn
[1] << 24);
963 val
|= (thrd
->id
<< 8); /* Channel Number */
965 writel(val
, regs
+ DBGINST0
);
967 val
= *((u32
*)&insn
[2]);
968 writel(val
, regs
+ DBGINST1
);
970 /* If timed out due to halted state-machine */
971 if (_until_dmac_idle(thrd
)) {
972 dev_err(thrd
->dmac
->pinfo
->dev
, "DMAC halted!\n");
977 writel(0, regs
+ DBGCMD
);
981 * Mark a _pl330_req as free.
982 * We do it by writing DMAEND as the first instruction
983 * because no valid request is going to have DMAEND as
984 * its first instruction to execute.
986 static void mark_free(struct pl330_thread
*thrd
, int idx
)
988 struct _pl330_req
*req
= &thrd
->req
[idx
];
990 _emit_END(0, req
->mc_cpu
);
992 thrd
->req_running
= -1;
995 static inline u32
_state(struct pl330_thread
*thrd
)
997 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1000 if (is_manager(thrd
))
1001 val
= readl(regs
+ DS
) & 0xf;
1003 val
= readl(regs
+ CS(thrd
->id
)) & 0xf;
1007 return PL330_STATE_STOPPED
;
1009 return PL330_STATE_EXECUTING
;
1011 return PL330_STATE_CACHEMISS
;
1013 return PL330_STATE_UPDTPC
;
1015 return PL330_STATE_WFE
;
1017 return PL330_STATE_FAULTING
;
1019 if (is_manager(thrd
))
1020 return PL330_STATE_INVALID
;
1022 return PL330_STATE_ATBARRIER
;
1024 if (is_manager(thrd
))
1025 return PL330_STATE_INVALID
;
1027 return PL330_STATE_QUEUEBUSY
;
1029 if (is_manager(thrd
))
1030 return PL330_STATE_INVALID
;
1032 return PL330_STATE_WFP
;
1034 if (is_manager(thrd
))
1035 return PL330_STATE_INVALID
;
1037 return PL330_STATE_KILLING
;
1039 if (is_manager(thrd
))
1040 return PL330_STATE_INVALID
;
1042 return PL330_STATE_COMPLETING
;
1044 if (is_manager(thrd
))
1045 return PL330_STATE_INVALID
;
1047 return PL330_STATE_FAULT_COMPLETING
;
1049 return PL330_STATE_INVALID
;
1053 static void _stop(struct pl330_thread
*thrd
)
1055 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1056 u8 insn
[6] = {0, 0, 0, 0, 0, 0};
1058 if (_state(thrd
) == PL330_STATE_FAULT_COMPLETING
)
1059 UNTIL(thrd
, PL330_STATE_FAULTING
| PL330_STATE_KILLING
);
1061 /* Return if nothing needs to be done */
1062 if (_state(thrd
) == PL330_STATE_COMPLETING
1063 || _state(thrd
) == PL330_STATE_KILLING
1064 || _state(thrd
) == PL330_STATE_STOPPED
)
1067 _emit_KILL(0, insn
);
1069 /* Stop generating interrupts for SEV */
1070 writel(readl(regs
+ INTEN
) & ~(1 << thrd
->ev
), regs
+ INTEN
);
1072 _execute_DBGINSN(thrd
, insn
, is_manager(thrd
));
1075 /* Start doing req 'idx' of thread 'thrd' */
1076 static bool _trigger(struct pl330_thread
*thrd
)
1078 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1079 struct _pl330_req
*req
;
1080 struct pl330_req
*r
;
1083 u8 insn
[6] = {0, 0, 0, 0, 0, 0};
1086 /* Return if already ACTIVE */
1087 if (_state(thrd
) != PL330_STATE_STOPPED
)
1090 idx
= 1 - thrd
->lstenq
;
1091 if (!IS_FREE(&thrd
->req
[idx
]))
1092 req
= &thrd
->req
[idx
];
1095 if (!IS_FREE(&thrd
->req
[idx
]))
1096 req
= &thrd
->req
[idx
];
1101 /* Return if no request */
1102 if (!req
|| !req
->r
)
1108 ns
= r
->cfg
->nonsecure
? 1 : 0;
1109 else if (readl(regs
+ CS(thrd
->id
)) & CS_CNS
)
1114 /* See 'Abort Sources' point-4 at Page 2-25 */
1115 if (_manager_ns(thrd
) && !ns
)
1116 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d Recipe for ABORT!\n",
1117 __func__
, __LINE__
);
1120 go
.addr
= req
->mc_bus
;
1122 _emit_GO(0, insn
, &go
);
1124 /* Set to generate interrupts for SEV */
1125 writel(readl(regs
+ INTEN
) | (1 << thrd
->ev
), regs
+ INTEN
);
1127 /* Only manager can execute GO */
1128 _execute_DBGINSN(thrd
, insn
, true);
1130 thrd
->req_running
= idx
;
1135 static bool _start(struct pl330_thread
*thrd
)
1137 switch (_state(thrd
)) {
1138 case PL330_STATE_FAULT_COMPLETING
:
1139 UNTIL(thrd
, PL330_STATE_FAULTING
| PL330_STATE_KILLING
);
1141 if (_state(thrd
) == PL330_STATE_KILLING
)
1142 UNTIL(thrd
, PL330_STATE_STOPPED
)
1144 case PL330_STATE_FAULTING
:
1147 case PL330_STATE_KILLING
:
1148 case PL330_STATE_COMPLETING
:
1149 UNTIL(thrd
, PL330_STATE_STOPPED
)
1151 case PL330_STATE_STOPPED
:
1152 return _trigger(thrd
);
1154 case PL330_STATE_WFP
:
1155 case PL330_STATE_QUEUEBUSY
:
1156 case PL330_STATE_ATBARRIER
:
1157 case PL330_STATE_UPDTPC
:
1158 case PL330_STATE_CACHEMISS
:
1159 case PL330_STATE_EXECUTING
:
1162 case PL330_STATE_WFE
: /* For RESUME, nothing yet */
1168 static inline int _ldst_memtomem(unsigned dry_run
, u8 buf
[],
1169 const struct _xfer_spec
*pxs
, int cyc
)
1172 struct pl330_config
*pcfg
= pxs
->r
->cfg
->pcfg
;
1174 /* check lock-up free version */
1175 if (get_revision(pcfg
->periph_id
) >= PERIPH_REV_R1P0
) {
1177 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1178 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1182 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1183 off
+= _emit_RMB(dry_run
, &buf
[off
]);
1184 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1185 off
+= _emit_WMB(dry_run
, &buf
[off
]);
1192 static inline int _ldst_devtomem(unsigned dry_run
, u8 buf
[],
1193 const struct _xfer_spec
*pxs
, int cyc
)
1198 off
+= _emit_WFP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1199 off
+= _emit_LDP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1200 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1201 off
+= _emit_FLUSHP(dry_run
, &buf
[off
], pxs
->r
->peri
);
1207 static inline int _ldst_memtodev(unsigned dry_run
, u8 buf
[],
1208 const struct _xfer_spec
*pxs
, int cyc
)
1213 off
+= _emit_WFP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1214 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1215 off
+= _emit_STP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1216 off
+= _emit_FLUSHP(dry_run
, &buf
[off
], pxs
->r
->peri
);
1222 static int _bursts(unsigned dry_run
, u8 buf
[],
1223 const struct _xfer_spec
*pxs
, int cyc
)
1227 switch (pxs
->r
->rqtype
) {
1228 case DMA_MEM_TO_DEV
:
1229 off
+= _ldst_memtodev(dry_run
, &buf
[off
], pxs
, cyc
);
1231 case DMA_DEV_TO_MEM
:
1232 off
+= _ldst_devtomem(dry_run
, &buf
[off
], pxs
, cyc
);
1234 case DMA_MEM_TO_MEM
:
1235 off
+= _ldst_memtomem(dry_run
, &buf
[off
], pxs
, cyc
);
1238 off
+= 0x40000000; /* Scare off the Client */
1245 /* Returns bytes consumed and updates bursts */
1246 static inline int _loop(unsigned dry_run
, u8 buf
[],
1247 unsigned long *bursts
, const struct _xfer_spec
*pxs
)
1249 int cyc
, cycmax
, szlp
, szlpend
, szbrst
, off
;
1250 unsigned lcnt0
, lcnt1
, ljmp0
, ljmp1
;
1251 struct _arg_LPEND lpend
;
1253 /* Max iterations possible in DMALP is 256 */
1254 if (*bursts
>= 256*256) {
1257 cyc
= *bursts
/ lcnt1
/ lcnt0
;
1258 } else if (*bursts
> 256) {
1260 lcnt0
= *bursts
/ lcnt1
;
1268 szlp
= _emit_LP(1, buf
, 0, 0);
1269 szbrst
= _bursts(1, buf
, pxs
, 1);
1271 lpend
.cond
= ALWAYS
;
1272 lpend
.forever
= false;
1275 szlpend
= _emit_LPEND(1, buf
, &lpend
);
1283 * Max bursts that we can unroll due to limit on the
1284 * size of backward jump that can be encoded in DMALPEND
1285 * which is 8-bits and hence 255
1287 cycmax
= (255 - (szlp
+ szlpend
)) / szbrst
;
1289 cyc
= (cycmax
< cyc
) ? cycmax
: cyc
;
1294 off
+= _emit_LP(dry_run
, &buf
[off
], 0, lcnt0
);
1298 off
+= _emit_LP(dry_run
, &buf
[off
], 1, lcnt1
);
1301 off
+= _bursts(dry_run
, &buf
[off
], pxs
, cyc
);
1303 lpend
.cond
= ALWAYS
;
1304 lpend
.forever
= false;
1306 lpend
.bjump
= off
- ljmp1
;
1307 off
+= _emit_LPEND(dry_run
, &buf
[off
], &lpend
);
1310 lpend
.cond
= ALWAYS
;
1311 lpend
.forever
= false;
1313 lpend
.bjump
= off
- ljmp0
;
1314 off
+= _emit_LPEND(dry_run
, &buf
[off
], &lpend
);
1317 *bursts
= lcnt1
* cyc
;
1324 static inline int _setup_loops(unsigned dry_run
, u8 buf
[],
1325 const struct _xfer_spec
*pxs
)
1327 struct pl330_xfer
*x
= pxs
->x
;
1329 unsigned long c
, bursts
= BYTE_TO_BURST(x
->bytes
, ccr
);
1334 off
+= _loop(dry_run
, &buf
[off
], &c
, pxs
);
1341 static inline int _setup_xfer(unsigned dry_run
, u8 buf
[],
1342 const struct _xfer_spec
*pxs
)
1344 struct pl330_xfer
*x
= pxs
->x
;
1347 /* DMAMOV SAR, x->src_addr */
1348 off
+= _emit_MOV(dry_run
, &buf
[off
], SAR
, x
->src_addr
);
1349 /* DMAMOV DAR, x->dst_addr */
1350 off
+= _emit_MOV(dry_run
, &buf
[off
], DAR
, x
->dst_addr
);
1353 off
+= _setup_loops(dry_run
, &buf
[off
], pxs
);
1359 * A req is a sequence of one or more xfer units.
1360 * Returns the number of bytes taken to setup the MC for the req.
1362 static int _setup_req(unsigned dry_run
, struct pl330_thread
*thrd
,
1363 unsigned index
, struct _xfer_spec
*pxs
)
1365 struct _pl330_req
*req
= &thrd
->req
[index
];
1366 struct pl330_xfer
*x
;
1367 u8
*buf
= req
->mc_cpu
;
1370 PL330_DBGMC_START(req
->mc_bus
);
1372 /* DMAMOV CCR, ccr */
1373 off
+= _emit_MOV(dry_run
, &buf
[off
], CCR
, pxs
->ccr
);
1376 /* Error if xfer length is not aligned at burst size */
1377 if (x
->bytes
% (BRST_SIZE(pxs
->ccr
) * BRST_LEN(pxs
->ccr
)))
1381 off
+= _setup_xfer(dry_run
, &buf
[off
], pxs
);
1383 /* DMASEV peripheral/event */
1384 off
+= _emit_SEV(dry_run
, &buf
[off
], thrd
->ev
);
1386 off
+= _emit_END(dry_run
, &buf
[off
]);
1391 static inline u32
_prepare_ccr(const struct pl330_reqcfg
*rqc
)
1401 /* We set same protection levels for Src and DST for now */
1402 if (rqc
->privileged
)
1403 ccr
|= CC_SRCPRI
| CC_DSTPRI
;
1405 ccr
|= CC_SRCNS
| CC_DSTNS
;
1406 if (rqc
->insnaccess
)
1407 ccr
|= CC_SRCIA
| CC_DSTIA
;
1409 ccr
|= (((rqc
->brst_len
- 1) & 0xf) << CC_SRCBRSTLEN_SHFT
);
1410 ccr
|= (((rqc
->brst_len
- 1) & 0xf) << CC_DSTBRSTLEN_SHFT
);
1412 ccr
|= (rqc
->brst_size
<< CC_SRCBRSTSIZE_SHFT
);
1413 ccr
|= (rqc
->brst_size
<< CC_DSTBRSTSIZE_SHFT
);
1415 ccr
|= (rqc
->scctl
<< CC_SRCCCTRL_SHFT
);
1416 ccr
|= (rqc
->dcctl
<< CC_DSTCCTRL_SHFT
);
1418 ccr
|= (rqc
->swap
<< CC_SWAP_SHFT
);
1423 static inline bool _is_valid(u32 ccr
)
1425 enum pl330_cachectrl dcctl
;
1426 enum pl330_cachectrl scctl
;
1428 dcctl
= (ccr
>> CC_DSTCCTRL_SHFT
) & CC_DRCCCTRL_MASK
;
1429 scctl
= (ccr
>> CC_SRCCCTRL_SHFT
) & CC_SRCCCTRL_MASK
;
1431 if (dcctl
== INVALID1
|| dcctl
== INVALID2
1432 || scctl
== INVALID1
|| scctl
== INVALID2
)
1439 * Submit a list of xfers after which the client wants notification.
1440 * Client is not notified after each xfer unit, just once after all
1441 * xfer units are done or some error occurs.
1443 static int pl330_submit_req(struct pl330_thread
*thrd
, struct pl330_req
*r
)
1445 struct pl330_dmac
*pl330
;
1446 struct pl330_info
*pi
;
1447 struct _xfer_spec xs
;
1448 unsigned long flags
;
1454 /* No Req or Unacquired Channel or DMAC */
1455 if (!r
|| !thrd
|| thrd
->free
)
1462 if (pl330
->state
== DYING
1463 || pl330
->dmac_tbd
.reset_chan
& (1 << thrd
->id
)) {
1464 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d\n",
1465 __func__
, __LINE__
);
1469 /* If request for non-existing peripheral */
1470 if (r
->rqtype
!= DMA_MEM_TO_MEM
&& r
->peri
>= pi
->pcfg
.num_peri
) {
1471 dev_info(thrd
->dmac
->pinfo
->dev
,
1472 "%s:%d Invalid peripheral(%u)!\n",
1473 __func__
, __LINE__
, r
->peri
);
1477 spin_lock_irqsave(&pl330
->lock
, flags
);
1479 if (_queue_full(thrd
)) {
1485 /* Use last settings, if not provided */
1487 /* Prefer Secure Channel */
1488 if (!_manager_ns(thrd
))
1489 r
->cfg
->nonsecure
= 0;
1491 r
->cfg
->nonsecure
= 1;
1493 ccr
= _prepare_ccr(r
->cfg
);
1495 ccr
= readl(regs
+ CC(thrd
->id
));
1498 /* If this req doesn't have valid xfer settings */
1499 if (!_is_valid(ccr
)) {
1501 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d Invalid CCR(%x)!\n",
1502 __func__
, __LINE__
, ccr
);
1506 idx
= IS_FREE(&thrd
->req
[0]) ? 0 : 1;
1511 /* First dry run to check if req is acceptable */
1512 ret
= _setup_req(1, thrd
, idx
, &xs
);
1516 if (ret
> pi
->mcbufsz
/ 2) {
1517 dev_info(thrd
->dmac
->pinfo
->dev
,
1518 "%s:%d Trying increasing mcbufsz\n",
1519 __func__
, __LINE__
);
1524 /* Hook the request */
1526 thrd
->req
[idx
].r
= r
;
1527 _setup_req(0, thrd
, idx
, &xs
);
1532 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1537 static void dma_pl330_rqcb(struct pl330_req
*req
, enum pl330_op_err err
)
1539 struct dma_pl330_desc
*desc
= container_of(req
, struct dma_pl330_desc
, req
);
1540 struct dma_pl330_chan
*pch
= desc
->pchan
;
1541 unsigned long flags
;
1543 /* If desc aborted */
1547 spin_lock_irqsave(&pch
->lock
, flags
);
1549 desc
->status
= DONE
;
1551 spin_unlock_irqrestore(&pch
->lock
, flags
);
1553 tasklet_schedule(&pch
->task
);
1556 static void pl330_dotask(unsigned long data
)
1558 struct pl330_dmac
*pl330
= (struct pl330_dmac
*) data
;
1559 struct pl330_info
*pi
= pl330
->pinfo
;
1560 unsigned long flags
;
1563 spin_lock_irqsave(&pl330
->lock
, flags
);
1565 /* The DMAC itself gone nuts */
1566 if (pl330
->dmac_tbd
.reset_dmac
) {
1567 pl330
->state
= DYING
;
1568 /* Reset the manager too */
1569 pl330
->dmac_tbd
.reset_mngr
= true;
1570 /* Clear the reset flag */
1571 pl330
->dmac_tbd
.reset_dmac
= false;
1574 if (pl330
->dmac_tbd
.reset_mngr
) {
1575 _stop(pl330
->manager
);
1576 /* Reset all channels */
1577 pl330
->dmac_tbd
.reset_chan
= (1 << pi
->pcfg
.num_chan
) - 1;
1578 /* Clear the reset flag */
1579 pl330
->dmac_tbd
.reset_mngr
= false;
1582 for (i
= 0; i
< pi
->pcfg
.num_chan
; i
++) {
1584 if (pl330
->dmac_tbd
.reset_chan
& (1 << i
)) {
1585 struct pl330_thread
*thrd
= &pl330
->channels
[i
];
1586 void __iomem
*regs
= pi
->base
;
1587 enum pl330_op_err err
;
1591 if (readl(regs
+ FSC
) & (1 << thrd
->id
))
1592 err
= PL330_ERR_FAIL
;
1594 err
= PL330_ERR_ABORT
;
1596 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1597 dma_pl330_rqcb(thrd
->req
[1 - thrd
->lstenq
].r
, err
);
1598 dma_pl330_rqcb(thrd
->req
[thrd
->lstenq
].r
, err
);
1599 spin_lock_irqsave(&pl330
->lock
, flags
);
1601 thrd
->req
[0].r
= NULL
;
1602 thrd
->req
[1].r
= NULL
;
1606 /* Clear the reset flag */
1607 pl330
->dmac_tbd
.reset_chan
&= ~(1 << i
);
1611 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1616 /* Returns 1 if state was updated, 0 otherwise */
1617 static int pl330_update(const struct pl330_info
*pi
)
1619 struct pl330_req
*rqdone
, *tmp
;
1620 struct pl330_dmac
*pl330
;
1621 unsigned long flags
;
1624 int id
, ev
, ret
= 0;
1626 if (!pi
|| !pi
->pl330_data
)
1630 pl330
= pi
->pl330_data
;
1632 spin_lock_irqsave(&pl330
->lock
, flags
);
1634 val
= readl(regs
+ FSM
) & 0x1;
1636 pl330
->dmac_tbd
.reset_mngr
= true;
1638 pl330
->dmac_tbd
.reset_mngr
= false;
1640 val
= readl(regs
+ FSC
) & ((1 << pi
->pcfg
.num_chan
) - 1);
1641 pl330
->dmac_tbd
.reset_chan
|= val
;
1644 while (i
< pi
->pcfg
.num_chan
) {
1645 if (val
& (1 << i
)) {
1647 "Reset Channel-%d\t CS-%x FTC-%x\n",
1648 i
, readl(regs
+ CS(i
)),
1649 readl(regs
+ FTC(i
)));
1650 _stop(&pl330
->channels
[i
]);
1656 /* Check which event happened i.e, thread notified */
1657 val
= readl(regs
+ ES
);
1658 if (pi
->pcfg
.num_events
< 32
1659 && val
& ~((1 << pi
->pcfg
.num_events
) - 1)) {
1660 pl330
->dmac_tbd
.reset_dmac
= true;
1661 dev_err(pi
->dev
, "%s:%d Unexpected!\n", __func__
, __LINE__
);
1666 for (ev
= 0; ev
< pi
->pcfg
.num_events
; ev
++) {
1667 if (val
& (1 << ev
)) { /* Event occurred */
1668 struct pl330_thread
*thrd
;
1669 u32 inten
= readl(regs
+ INTEN
);
1672 /* Clear the event */
1673 if (inten
& (1 << ev
))
1674 writel(1 << ev
, regs
+ INTCLR
);
1678 id
= pl330
->events
[ev
];
1680 thrd
= &pl330
->channels
[id
];
1682 active
= thrd
->req_running
;
1683 if (active
== -1) /* Aborted */
1686 /* Detach the req */
1687 rqdone
= thrd
->req
[active
].r
;
1688 thrd
->req
[active
].r
= NULL
;
1690 mark_free(thrd
, active
);
1692 /* Get going again ASAP */
1695 /* For now, just make a list of callbacks to be done */
1696 list_add_tail(&rqdone
->rqd
, &pl330
->req_done
);
1700 /* Now that we are in no hurry, do the callbacks */
1701 list_for_each_entry_safe(rqdone
, tmp
, &pl330
->req_done
, rqd
) {
1702 list_del(&rqdone
->rqd
);
1704 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1705 dma_pl330_rqcb(rqdone
, PL330_ERR_NONE
);
1706 spin_lock_irqsave(&pl330
->lock
, flags
);
1710 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1712 if (pl330
->dmac_tbd
.reset_dmac
1713 || pl330
->dmac_tbd
.reset_mngr
1714 || pl330
->dmac_tbd
.reset_chan
) {
1716 tasklet_schedule(&pl330
->tasks
);
1722 static int pl330_chan_ctrl(struct pl330_thread
*thrd
, enum pl330_chan_op op
)
1724 struct pl330_dmac
*pl330
;
1725 unsigned long flags
;
1726 int ret
= 0, active
;
1728 if (!thrd
|| thrd
->free
|| thrd
->dmac
->state
== DYING
)
1732 active
= thrd
->req_running
;
1734 spin_lock_irqsave(&pl330
->lock
, flags
);
1737 case PL330_OP_FLUSH
:
1738 /* Make sure the channel is stopped */
1741 thrd
->req
[0].r
= NULL
;
1742 thrd
->req
[1].r
= NULL
;
1747 case PL330_OP_ABORT
:
1748 /* Make sure the channel is stopped */
1751 /* ABORT is only for the active req */
1755 thrd
->req
[active
].r
= NULL
;
1756 mark_free(thrd
, active
);
1758 /* Start the next */
1759 case PL330_OP_START
:
1760 if ((active
== -1) && !_start(thrd
))
1768 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1772 /* Reserve an event */
1773 static inline int _alloc_event(struct pl330_thread
*thrd
)
1775 struct pl330_dmac
*pl330
= thrd
->dmac
;
1776 struct pl330_info
*pi
= pl330
->pinfo
;
1779 for (ev
= 0; ev
< pi
->pcfg
.num_events
; ev
++)
1780 if (pl330
->events
[ev
] == -1) {
1781 pl330
->events
[ev
] = thrd
->id
;
1788 static bool _chan_ns(const struct pl330_info
*pi
, int i
)
1790 return pi
->pcfg
.irq_ns
& (1 << i
);
1793 /* Upon success, returns IdentityToken for the
1794 * allocated channel, NULL otherwise.
1796 static struct pl330_thread
*pl330_request_channel(const struct pl330_info
*pi
)
1798 struct pl330_thread
*thrd
= NULL
;
1799 struct pl330_dmac
*pl330
;
1800 unsigned long flags
;
1803 if (!pi
|| !pi
->pl330_data
)
1806 pl330
= pi
->pl330_data
;
1808 if (pl330
->state
== DYING
)
1811 chans
= pi
->pcfg
.num_chan
;
1813 spin_lock_irqsave(&pl330
->lock
, flags
);
1815 for (i
= 0; i
< chans
; i
++) {
1816 thrd
= &pl330
->channels
[i
];
1817 if ((thrd
->free
) && (!_manager_ns(thrd
) ||
1819 thrd
->ev
= _alloc_event(thrd
);
1820 if (thrd
->ev
>= 0) {
1823 thrd
->req
[0].r
= NULL
;
1825 thrd
->req
[1].r
= NULL
;
1833 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1838 /* Release an event */
1839 static inline void _free_event(struct pl330_thread
*thrd
, int ev
)
1841 struct pl330_dmac
*pl330
= thrd
->dmac
;
1842 struct pl330_info
*pi
= pl330
->pinfo
;
1844 /* If the event is valid and was held by the thread */
1845 if (ev
>= 0 && ev
< pi
->pcfg
.num_events
1846 && pl330
->events
[ev
] == thrd
->id
)
1847 pl330
->events
[ev
] = -1;
1850 static void pl330_release_channel(struct pl330_thread
*thrd
)
1852 struct pl330_dmac
*pl330
;
1853 unsigned long flags
;
1855 if (!thrd
|| thrd
->free
)
1860 dma_pl330_rqcb(thrd
->req
[1 - thrd
->lstenq
].r
, PL330_ERR_ABORT
);
1861 dma_pl330_rqcb(thrd
->req
[thrd
->lstenq
].r
, PL330_ERR_ABORT
);
1865 spin_lock_irqsave(&pl330
->lock
, flags
);
1866 _free_event(thrd
, thrd
->ev
);
1868 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1871 /* Initialize the structure for PL330 configuration, that can be used
1872 * by the client driver the make best use of the DMAC
1874 static void read_dmac_config(struct pl330_info
*pi
)
1876 void __iomem
*regs
= pi
->base
;
1879 val
= readl(regs
+ CRD
) >> CRD_DATA_WIDTH_SHIFT
;
1880 val
&= CRD_DATA_WIDTH_MASK
;
1881 pi
->pcfg
.data_bus_width
= 8 * (1 << val
);
1883 val
= readl(regs
+ CRD
) >> CRD_DATA_BUFF_SHIFT
;
1884 val
&= CRD_DATA_BUFF_MASK
;
1885 pi
->pcfg
.data_buf_dep
= val
+ 1;
1887 val
= readl(regs
+ CR0
) >> CR0_NUM_CHANS_SHIFT
;
1888 val
&= CR0_NUM_CHANS_MASK
;
1890 pi
->pcfg
.num_chan
= val
;
1892 val
= readl(regs
+ CR0
);
1893 if (val
& CR0_PERIPH_REQ_SET
) {
1894 val
= (val
>> CR0_NUM_PERIPH_SHIFT
) & CR0_NUM_PERIPH_MASK
;
1896 pi
->pcfg
.num_peri
= val
;
1897 pi
->pcfg
.peri_ns
= readl(regs
+ CR4
);
1899 pi
->pcfg
.num_peri
= 0;
1902 val
= readl(regs
+ CR0
);
1903 if (val
& CR0_BOOT_MAN_NS
)
1904 pi
->pcfg
.mode
|= DMAC_MODE_NS
;
1906 pi
->pcfg
.mode
&= ~DMAC_MODE_NS
;
1908 val
= readl(regs
+ CR0
) >> CR0_NUM_EVENTS_SHIFT
;
1909 val
&= CR0_NUM_EVENTS_MASK
;
1911 pi
->pcfg
.num_events
= val
;
1913 pi
->pcfg
.irq_ns
= readl(regs
+ CR3
);
1916 static inline void _reset_thread(struct pl330_thread
*thrd
)
1918 struct pl330_dmac
*pl330
= thrd
->dmac
;
1919 struct pl330_info
*pi
= pl330
->pinfo
;
1921 thrd
->req
[0].mc_cpu
= pl330
->mcode_cpu
1922 + (thrd
->id
* pi
->mcbufsz
);
1923 thrd
->req
[0].mc_bus
= pl330
->mcode_bus
1924 + (thrd
->id
* pi
->mcbufsz
);
1925 thrd
->req
[0].r
= NULL
;
1928 thrd
->req
[1].mc_cpu
= thrd
->req
[0].mc_cpu
1930 thrd
->req
[1].mc_bus
= thrd
->req
[0].mc_bus
1932 thrd
->req
[1].r
= NULL
;
1936 static int dmac_alloc_threads(struct pl330_dmac
*pl330
)
1938 struct pl330_info
*pi
= pl330
->pinfo
;
1939 int chans
= pi
->pcfg
.num_chan
;
1940 struct pl330_thread
*thrd
;
1943 /* Allocate 1 Manager and 'chans' Channel threads */
1944 pl330
->channels
= kzalloc((1 + chans
) * sizeof(*thrd
),
1946 if (!pl330
->channels
)
1949 /* Init Channel threads */
1950 for (i
= 0; i
< chans
; i
++) {
1951 thrd
= &pl330
->channels
[i
];
1954 _reset_thread(thrd
);
1958 /* MANAGER is indexed at the end */
1959 thrd
= &pl330
->channels
[chans
];
1963 pl330
->manager
= thrd
;
1968 static int dmac_alloc_resources(struct pl330_dmac
*pl330
)
1970 struct pl330_info
*pi
= pl330
->pinfo
;
1971 int chans
= pi
->pcfg
.num_chan
;
1975 * Alloc MicroCode buffer for 'chans' Channel threads.
1976 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1978 pl330
->mcode_cpu
= dma_alloc_coherent(pi
->dev
,
1979 chans
* pi
->mcbufsz
,
1980 &pl330
->mcode_bus
, GFP_KERNEL
);
1981 if (!pl330
->mcode_cpu
) {
1982 dev_err(pi
->dev
, "%s:%d Can't allocate memory!\n",
1983 __func__
, __LINE__
);
1987 ret
= dmac_alloc_threads(pl330
);
1989 dev_err(pi
->dev
, "%s:%d Can't to create channels for DMAC!\n",
1990 __func__
, __LINE__
);
1991 dma_free_coherent(pi
->dev
,
1992 chans
* pi
->mcbufsz
,
1993 pl330
->mcode_cpu
, pl330
->mcode_bus
);
2000 static int pl330_add(struct pl330_info
*pi
)
2002 struct pl330_dmac
*pl330
;
2006 if (!pi
|| !pi
->dev
)
2009 /* If already added */
2015 /* Check if we can handle this DMAC */
2016 if ((pi
->pcfg
.periph_id
& 0xfffff) != PERIPH_ID_VAL
) {
2017 dev_err(pi
->dev
, "PERIPH_ID 0x%x !\n", pi
->pcfg
.periph_id
);
2021 /* Read the configuration of the DMAC */
2022 read_dmac_config(pi
);
2024 if (pi
->pcfg
.num_events
== 0) {
2025 dev_err(pi
->dev
, "%s:%d Can't work without events!\n",
2026 __func__
, __LINE__
);
2030 pl330
= kzalloc(sizeof(*pl330
), GFP_KERNEL
);
2032 dev_err(pi
->dev
, "%s:%d Can't allocate memory!\n",
2033 __func__
, __LINE__
);
2037 /* Assign the info structure and private data */
2039 pi
->pl330_data
= pl330
;
2041 spin_lock_init(&pl330
->lock
);
2043 INIT_LIST_HEAD(&pl330
->req_done
);
2045 /* Use default MC buffer size if not provided */
2047 pi
->mcbufsz
= MCODE_BUFF_PER_REQ
* 2;
2049 /* Mark all events as free */
2050 for (i
= 0; i
< pi
->pcfg
.num_events
; i
++)
2051 pl330
->events
[i
] = -1;
2053 /* Allocate resources needed by the DMAC */
2054 ret
= dmac_alloc_resources(pl330
);
2056 dev_err(pi
->dev
, "Unable to create channels for DMAC\n");
2061 tasklet_init(&pl330
->tasks
, pl330_dotask
, (unsigned long) pl330
);
2063 pl330
->state
= INIT
;
2068 static int dmac_free_threads(struct pl330_dmac
*pl330
)
2070 struct pl330_info
*pi
= pl330
->pinfo
;
2071 int chans
= pi
->pcfg
.num_chan
;
2072 struct pl330_thread
*thrd
;
2075 /* Release Channel threads */
2076 for (i
= 0; i
< chans
; i
++) {
2077 thrd
= &pl330
->channels
[i
];
2078 pl330_release_channel(thrd
);
2082 kfree(pl330
->channels
);
2087 static void dmac_free_resources(struct pl330_dmac
*pl330
)
2089 struct pl330_info
*pi
= pl330
->pinfo
;
2090 int chans
= pi
->pcfg
.num_chan
;
2092 dmac_free_threads(pl330
);
2094 dma_free_coherent(pi
->dev
, chans
* pi
->mcbufsz
,
2095 pl330
->mcode_cpu
, pl330
->mcode_bus
);
2098 static void pl330_del(struct pl330_info
*pi
)
2100 struct pl330_dmac
*pl330
;
2102 if (!pi
|| !pi
->pl330_data
)
2105 pl330
= pi
->pl330_data
;
2107 pl330
->state
= UNINIT
;
2109 tasklet_kill(&pl330
->tasks
);
2111 /* Free DMAC resources */
2112 dmac_free_resources(pl330
);
2115 pi
->pl330_data
= NULL
;
2118 /* forward declaration */
2119 static struct amba_driver pl330_driver
;
2121 static inline struct dma_pl330_chan
*
2122 to_pchan(struct dma_chan
*ch
)
2127 return container_of(ch
, struct dma_pl330_chan
, chan
);
2130 static inline struct dma_pl330_desc
*
2131 to_desc(struct dma_async_tx_descriptor
*tx
)
2133 return container_of(tx
, struct dma_pl330_desc
, txd
);
2136 static inline void fill_queue(struct dma_pl330_chan
*pch
)
2138 struct dma_pl330_desc
*desc
;
2141 list_for_each_entry(desc
, &pch
->work_list
, node
) {
2143 /* If already submitted */
2144 if (desc
->status
== BUSY
)
2147 ret
= pl330_submit_req(pch
->thread
, &desc
->req
);
2149 desc
->status
= BUSY
;
2150 } else if (ret
== -EAGAIN
) {
2151 /* QFull or DMAC Dying */
2154 /* Unacceptable request */
2155 desc
->status
= DONE
;
2156 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Bad Desc(%d)\n",
2157 __func__
, __LINE__
, desc
->txd
.cookie
);
2158 tasklet_schedule(&pch
->task
);
2163 static void pl330_tasklet(unsigned long data
)
2165 struct dma_pl330_chan
*pch
= (struct dma_pl330_chan
*)data
;
2166 struct dma_pl330_desc
*desc
, *_dt
;
2167 unsigned long flags
;
2169 spin_lock_irqsave(&pch
->lock
, flags
);
2171 /* Pick up ripe tomatoes */
2172 list_for_each_entry_safe(desc
, _dt
, &pch
->work_list
, node
)
2173 if (desc
->status
== DONE
) {
2175 dma_cookie_complete(&desc
->txd
);
2176 list_move_tail(&desc
->node
, &pch
->completed_list
);
2179 /* Try to submit a req imm. next to the last completed cookie */
2182 /* Make sure the PL330 Channel thread is active */
2183 pl330_chan_ctrl(pch
->thread
, PL330_OP_START
);
2185 while (!list_empty(&pch
->completed_list
)) {
2186 dma_async_tx_callback callback
;
2187 void *callback_param
;
2189 desc
= list_first_entry(&pch
->completed_list
,
2190 struct dma_pl330_desc
, node
);
2192 callback
= desc
->txd
.callback
;
2193 callback_param
= desc
->txd
.callback_param
;
2196 desc
->status
= PREP
;
2197 list_move_tail(&desc
->node
, &pch
->work_list
);
2199 desc
->status
= FREE
;
2200 list_move_tail(&desc
->node
, &pch
->dmac
->desc_pool
);
2203 dma_descriptor_unmap(&desc
->txd
);
2206 spin_unlock_irqrestore(&pch
->lock
, flags
);
2207 callback(callback_param
);
2208 spin_lock_irqsave(&pch
->lock
, flags
);
2211 spin_unlock_irqrestore(&pch
->lock
, flags
);
2214 bool pl330_filter(struct dma_chan
*chan
, void *param
)
2218 if (chan
->device
->dev
->driver
!= &pl330_driver
.drv
)
2221 peri_id
= chan
->private;
2222 return *peri_id
== (unsigned long)param
;
2224 EXPORT_SYMBOL(pl330_filter
);
2226 static struct dma_chan
*of_dma_pl330_xlate(struct of_phandle_args
*dma_spec
,
2227 struct of_dma
*ofdma
)
2229 int count
= dma_spec
->args_count
;
2230 struct dma_pl330_dmac
*pdmac
= ofdma
->of_dma_data
;
2231 unsigned int chan_id
;
2236 chan_id
= dma_spec
->args
[0];
2237 if (chan_id
>= pdmac
->num_peripherals
)
2240 return dma_get_slave_channel(&pdmac
->peripherals
[chan_id
].chan
);
2243 static int pl330_alloc_chan_resources(struct dma_chan
*chan
)
2245 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2246 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2247 unsigned long flags
;
2249 spin_lock_irqsave(&pch
->lock
, flags
);
2251 dma_cookie_init(chan
);
2252 pch
->cyclic
= false;
2254 pch
->thread
= pl330_request_channel(&pdmac
->pif
);
2256 spin_unlock_irqrestore(&pch
->lock
, flags
);
2260 tasklet_init(&pch
->task
, pl330_tasklet
, (unsigned long) pch
);
2262 spin_unlock_irqrestore(&pch
->lock
, flags
);
2267 static int pl330_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
, unsigned long arg
)
2269 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2270 struct dma_pl330_desc
*desc
;
2271 unsigned long flags
;
2272 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2273 struct dma_slave_config
*slave_config
;
2277 case DMA_TERMINATE_ALL
:
2278 spin_lock_irqsave(&pch
->lock
, flags
);
2280 /* FLUSH the PL330 Channel thread */
2281 pl330_chan_ctrl(pch
->thread
, PL330_OP_FLUSH
);
2283 /* Mark all desc done */
2284 list_for_each_entry(desc
, &pch
->submitted_list
, node
) {
2285 desc
->status
= FREE
;
2286 dma_cookie_complete(&desc
->txd
);
2289 list_for_each_entry(desc
, &pch
->work_list
, node
) {
2290 desc
->status
= FREE
;
2291 dma_cookie_complete(&desc
->txd
);
2294 list_for_each_entry(desc
, &pch
->completed_list
, node
) {
2295 desc
->status
= FREE
;
2296 dma_cookie_complete(&desc
->txd
);
2299 list_splice_tail_init(&pch
->submitted_list
, &pdmac
->desc_pool
);
2300 list_splice_tail_init(&pch
->work_list
, &pdmac
->desc_pool
);
2301 list_splice_tail_init(&pch
->completed_list
, &pdmac
->desc_pool
);
2302 spin_unlock_irqrestore(&pch
->lock
, flags
);
2304 case DMA_SLAVE_CONFIG
:
2305 slave_config
= (struct dma_slave_config
*)arg
;
2307 if (slave_config
->direction
== DMA_MEM_TO_DEV
) {
2308 if (slave_config
->dst_addr
)
2309 pch
->fifo_addr
= slave_config
->dst_addr
;
2310 if (slave_config
->dst_addr_width
)
2311 pch
->burst_sz
= __ffs(slave_config
->dst_addr_width
);
2312 if (slave_config
->dst_maxburst
)
2313 pch
->burst_len
= slave_config
->dst_maxburst
;
2314 } else if (slave_config
->direction
== DMA_DEV_TO_MEM
) {
2315 if (slave_config
->src_addr
)
2316 pch
->fifo_addr
= slave_config
->src_addr
;
2317 if (slave_config
->src_addr_width
)
2318 pch
->burst_sz
= __ffs(slave_config
->src_addr_width
);
2319 if (slave_config
->src_maxburst
)
2320 pch
->burst_len
= slave_config
->src_maxburst
;
2324 dev_err(pch
->dmac
->pif
.dev
, "Not supported command.\n");
2331 static void pl330_free_chan_resources(struct dma_chan
*chan
)
2333 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2334 unsigned long flags
;
2336 tasklet_kill(&pch
->task
);
2338 spin_lock_irqsave(&pch
->lock
, flags
);
2340 pl330_release_channel(pch
->thread
);
2344 list_splice_tail_init(&pch
->work_list
, &pch
->dmac
->desc_pool
);
2346 spin_unlock_irqrestore(&pch
->lock
, flags
);
2349 static enum dma_status
2350 pl330_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
2351 struct dma_tx_state
*txstate
)
2353 return dma_cookie_status(chan
, cookie
, txstate
);
2356 static void pl330_issue_pending(struct dma_chan
*chan
)
2358 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2359 unsigned long flags
;
2361 spin_lock_irqsave(&pch
->lock
, flags
);
2362 list_splice_tail_init(&pch
->submitted_list
, &pch
->work_list
);
2363 spin_unlock_irqrestore(&pch
->lock
, flags
);
2365 pl330_tasklet((unsigned long)pch
);
2369 * We returned the last one of the circular list of descriptor(s)
2370 * from prep_xxx, so the argument to submit corresponds to the last
2371 * descriptor of the list.
2373 static dma_cookie_t
pl330_tx_submit(struct dma_async_tx_descriptor
*tx
)
2375 struct dma_pl330_desc
*desc
, *last
= to_desc(tx
);
2376 struct dma_pl330_chan
*pch
= to_pchan(tx
->chan
);
2377 dma_cookie_t cookie
;
2378 unsigned long flags
;
2380 spin_lock_irqsave(&pch
->lock
, flags
);
2382 /* Assign cookies to all nodes */
2383 while (!list_empty(&last
->node
)) {
2384 desc
= list_entry(last
->node
.next
, struct dma_pl330_desc
, node
);
2386 desc
->txd
.callback
= last
->txd
.callback
;
2387 desc
->txd
.callback_param
= last
->txd
.callback_param
;
2390 dma_cookie_assign(&desc
->txd
);
2392 list_move_tail(&desc
->node
, &pch
->submitted_list
);
2395 cookie
= dma_cookie_assign(&last
->txd
);
2396 list_add_tail(&last
->node
, &pch
->submitted_list
);
2397 spin_unlock_irqrestore(&pch
->lock
, flags
);
2402 static inline void _init_desc(struct dma_pl330_desc
*desc
)
2404 desc
->req
.x
= &desc
->px
;
2405 desc
->rqcfg
.swap
= SWAP_NO
;
2406 desc
->rqcfg
.scctl
= CCTRL0
;
2407 desc
->rqcfg
.dcctl
= CCTRL0
;
2408 desc
->req
.cfg
= &desc
->rqcfg
;
2409 desc
->txd
.tx_submit
= pl330_tx_submit
;
2411 INIT_LIST_HEAD(&desc
->node
);
2414 /* Returns the number of descriptors added to the DMAC pool */
2415 static int add_desc(struct dma_pl330_dmac
*pdmac
, gfp_t flg
, int count
)
2417 struct dma_pl330_desc
*desc
;
2418 unsigned long flags
;
2424 desc
= kcalloc(count
, sizeof(*desc
), flg
);
2428 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2430 for (i
= 0; i
< count
; i
++) {
2431 _init_desc(&desc
[i
]);
2432 list_add_tail(&desc
[i
].node
, &pdmac
->desc_pool
);
2435 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2440 static struct dma_pl330_desc
*
2441 pluck_desc(struct dma_pl330_dmac
*pdmac
)
2443 struct dma_pl330_desc
*desc
= NULL
;
2444 unsigned long flags
;
2449 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2451 if (!list_empty(&pdmac
->desc_pool
)) {
2452 desc
= list_entry(pdmac
->desc_pool
.next
,
2453 struct dma_pl330_desc
, node
);
2455 list_del_init(&desc
->node
);
2457 desc
->status
= PREP
;
2458 desc
->txd
.callback
= NULL
;
2461 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2466 static struct dma_pl330_desc
*pl330_get_desc(struct dma_pl330_chan
*pch
)
2468 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2469 u8
*peri_id
= pch
->chan
.private;
2470 struct dma_pl330_desc
*desc
;
2472 /* Pluck one desc from the pool of DMAC */
2473 desc
= pluck_desc(pdmac
);
2475 /* If the DMAC pool is empty, alloc new */
2477 if (!add_desc(pdmac
, GFP_ATOMIC
, 1))
2481 desc
= pluck_desc(pdmac
);
2483 dev_err(pch
->dmac
->pif
.dev
,
2484 "%s:%d ALERT!\n", __func__
, __LINE__
);
2489 /* Initialize the descriptor */
2491 desc
->txd
.cookie
= 0;
2492 async_tx_ack(&desc
->txd
);
2494 desc
->req
.peri
= peri_id
? pch
->chan
.chan_id
: 0;
2495 desc
->rqcfg
.pcfg
= &pch
->dmac
->pif
.pcfg
;
2497 dma_async_tx_descriptor_init(&desc
->txd
, &pch
->chan
);
2502 static inline void fill_px(struct pl330_xfer
*px
,
2503 dma_addr_t dst
, dma_addr_t src
, size_t len
)
2510 static struct dma_pl330_desc
*
2511 __pl330_prep_dma_memcpy(struct dma_pl330_chan
*pch
, dma_addr_t dst
,
2512 dma_addr_t src
, size_t len
)
2514 struct dma_pl330_desc
*desc
= pl330_get_desc(pch
);
2517 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Unable to fetch desc\n",
2518 __func__
, __LINE__
);
2523 * Ideally we should lookout for reqs bigger than
2524 * those that can be programmed with 256 bytes of
2525 * MC buffer, but considering a req size is seldom
2526 * going to be word-unaligned and more than 200MB,
2528 * Also, should the limit is reached we'd rather
2529 * have the platform increase MC buffer size than
2530 * complicating this API driver.
2532 fill_px(&desc
->px
, dst
, src
, len
);
2537 /* Call after fixing burst size */
2538 static inline int get_burst_len(struct dma_pl330_desc
*desc
, size_t len
)
2540 struct dma_pl330_chan
*pch
= desc
->pchan
;
2541 struct pl330_info
*pi
= &pch
->dmac
->pif
;
2544 burst_len
= pi
->pcfg
.data_bus_width
/ 8;
2545 burst_len
*= pi
->pcfg
.data_buf_dep
;
2546 burst_len
>>= desc
->rqcfg
.brst_size
;
2548 /* src/dst_burst_len can't be more than 16 */
2552 while (burst_len
> 1) {
2553 if (!(len
% (burst_len
<< desc
->rqcfg
.brst_size
)))
2561 static struct dma_async_tx_descriptor
*pl330_prep_dma_cyclic(
2562 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t len
,
2563 size_t period_len
, enum dma_transfer_direction direction
,
2564 unsigned long flags
, void *context
)
2566 struct dma_pl330_desc
*desc
= NULL
, *first
= NULL
;
2567 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2568 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2573 if (len
% period_len
!= 0)
2576 if (!is_slave_direction(direction
)) {
2577 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Invalid dma direction\n",
2578 __func__
, __LINE__
);
2582 for (i
= 0; i
< len
/ period_len
; i
++) {
2583 desc
= pl330_get_desc(pch
);
2585 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Unable to fetch desc\n",
2586 __func__
, __LINE__
);
2591 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2593 while (!list_empty(&first
->node
)) {
2594 desc
= list_entry(first
->node
.next
,
2595 struct dma_pl330_desc
, node
);
2596 list_move_tail(&desc
->node
, &pdmac
->desc_pool
);
2599 list_move_tail(&first
->node
, &pdmac
->desc_pool
);
2601 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2606 switch (direction
) {
2607 case DMA_MEM_TO_DEV
:
2608 desc
->rqcfg
.src_inc
= 1;
2609 desc
->rqcfg
.dst_inc
= 0;
2611 dst
= pch
->fifo_addr
;
2613 case DMA_DEV_TO_MEM
:
2614 desc
->rqcfg
.src_inc
= 0;
2615 desc
->rqcfg
.dst_inc
= 1;
2616 src
= pch
->fifo_addr
;
2623 desc
->req
.rqtype
= direction
;
2624 desc
->rqcfg
.brst_size
= pch
->burst_sz
;
2625 desc
->rqcfg
.brst_len
= 1;
2626 fill_px(&desc
->px
, dst
, src
, period_len
);
2631 list_add_tail(&desc
->node
, &first
->node
);
2633 dma_addr
+= period_len
;
2640 desc
->txd
.flags
= flags
;
2645 static struct dma_async_tx_descriptor
*
2646 pl330_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dst
,
2647 dma_addr_t src
, size_t len
, unsigned long flags
)
2649 struct dma_pl330_desc
*desc
;
2650 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2651 struct pl330_info
*pi
;
2654 if (unlikely(!pch
|| !len
))
2657 pi
= &pch
->dmac
->pif
;
2659 desc
= __pl330_prep_dma_memcpy(pch
, dst
, src
, len
);
2663 desc
->rqcfg
.src_inc
= 1;
2664 desc
->rqcfg
.dst_inc
= 1;
2665 desc
->req
.rqtype
= DMA_MEM_TO_MEM
;
2667 /* Select max possible burst size */
2668 burst
= pi
->pcfg
.data_bus_width
/ 8;
2676 desc
->rqcfg
.brst_size
= 0;
2677 while (burst
!= (1 << desc
->rqcfg
.brst_size
))
2678 desc
->rqcfg
.brst_size
++;
2680 desc
->rqcfg
.brst_len
= get_burst_len(desc
, len
);
2682 desc
->txd
.flags
= flags
;
2687 static void __pl330_giveback_desc(struct dma_pl330_dmac
*pdmac
,
2688 struct dma_pl330_desc
*first
)
2690 unsigned long flags
;
2691 struct dma_pl330_desc
*desc
;
2696 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2698 while (!list_empty(&first
->node
)) {
2699 desc
= list_entry(first
->node
.next
,
2700 struct dma_pl330_desc
, node
);
2701 list_move_tail(&desc
->node
, &pdmac
->desc_pool
);
2704 list_move_tail(&first
->node
, &pdmac
->desc_pool
);
2706 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2709 static struct dma_async_tx_descriptor
*
2710 pl330_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
2711 unsigned int sg_len
, enum dma_transfer_direction direction
,
2712 unsigned long flg
, void *context
)
2714 struct dma_pl330_desc
*first
, *desc
= NULL
;
2715 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2716 struct scatterlist
*sg
;
2720 if (unlikely(!pch
|| !sgl
|| !sg_len
))
2723 addr
= pch
->fifo_addr
;
2727 for_each_sg(sgl
, sg
, sg_len
, i
) {
2729 desc
= pl330_get_desc(pch
);
2731 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2733 dev_err(pch
->dmac
->pif
.dev
,
2734 "%s:%d Unable to fetch desc\n",
2735 __func__
, __LINE__
);
2736 __pl330_giveback_desc(pdmac
, first
);
2744 list_add_tail(&desc
->node
, &first
->node
);
2746 if (direction
== DMA_MEM_TO_DEV
) {
2747 desc
->rqcfg
.src_inc
= 1;
2748 desc
->rqcfg
.dst_inc
= 0;
2750 addr
, sg_dma_address(sg
), sg_dma_len(sg
));
2752 desc
->rqcfg
.src_inc
= 0;
2753 desc
->rqcfg
.dst_inc
= 1;
2755 sg_dma_address(sg
), addr
, sg_dma_len(sg
));
2758 desc
->rqcfg
.brst_size
= pch
->burst_sz
;
2759 desc
->rqcfg
.brst_len
= 1;
2760 desc
->req
.rqtype
= direction
;
2763 /* Return the last desc in the chain */
2764 desc
->txd
.flags
= flg
;
2768 static irqreturn_t
pl330_irq_handler(int irq
, void *data
)
2770 if (pl330_update(data
))
2776 #define PL330_DMA_BUSWIDTHS \
2777 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2778 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2779 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2780 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2781 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2783 static int pl330_dma_device_slave_caps(struct dma_chan
*dchan
,
2784 struct dma_slave_caps
*caps
)
2786 caps
->src_addr_widths
= PL330_DMA_BUSWIDTHS
;
2787 caps
->dstn_addr_widths
= PL330_DMA_BUSWIDTHS
;
2788 caps
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
2789 caps
->cmd_pause
= false;
2790 caps
->cmd_terminate
= true;
2791 caps
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
2797 pl330_probe(struct amba_device
*adev
, const struct amba_id
*id
)
2799 struct dma_pl330_platdata
*pdat
;
2800 struct dma_pl330_dmac
*pdmac
;
2801 struct dma_pl330_chan
*pch
, *_p
;
2802 struct pl330_info
*pi
;
2803 struct dma_device
*pd
;
2804 struct resource
*res
;
2808 pdat
= dev_get_platdata(&adev
->dev
);
2810 ret
= dma_set_mask_and_coherent(&adev
->dev
, DMA_BIT_MASK(32));
2814 /* Allocate a new DMAC and its Channels */
2815 pdmac
= devm_kzalloc(&adev
->dev
, sizeof(*pdmac
), GFP_KERNEL
);
2817 dev_err(&adev
->dev
, "unable to allocate mem\n");
2822 pi
->dev
= &adev
->dev
;
2823 pi
->pl330_data
= NULL
;
2824 pi
->mcbufsz
= pdat
? pdat
->mcbuf_sz
: 0;
2827 pi
->base
= devm_ioremap_resource(&adev
->dev
, res
);
2828 if (IS_ERR(pi
->base
))
2829 return PTR_ERR(pi
->base
);
2831 amba_set_drvdata(adev
, pdmac
);
2833 for (i
= 0; i
< AMBA_NR_IRQS
; i
++) {
2836 ret
= devm_request_irq(&adev
->dev
, irq
,
2837 pl330_irq_handler
, 0,
2838 dev_name(&adev
->dev
), pi
);
2846 pi
->pcfg
.periph_id
= adev
->periphid
;
2847 ret
= pl330_add(pi
);
2851 INIT_LIST_HEAD(&pdmac
->desc_pool
);
2852 spin_lock_init(&pdmac
->pool_lock
);
2854 /* Create a descriptor pool of default size */
2855 if (!add_desc(pdmac
, GFP_KERNEL
, NR_DEFAULT_DESC
))
2856 dev_warn(&adev
->dev
, "unable to allocate desc\n");
2859 INIT_LIST_HEAD(&pd
->channels
);
2861 /* Initialize channel parameters */
2863 num_chan
= max_t(int, pdat
->nr_valid_peri
, pi
->pcfg
.num_chan
);
2865 num_chan
= max_t(int, pi
->pcfg
.num_peri
, pi
->pcfg
.num_chan
);
2867 pdmac
->num_peripherals
= num_chan
;
2869 pdmac
->peripherals
= kzalloc(num_chan
* sizeof(*pch
), GFP_KERNEL
);
2870 if (!pdmac
->peripherals
) {
2872 dev_err(&adev
->dev
, "unable to allocate pdmac->peripherals\n");
2876 for (i
= 0; i
< num_chan
; i
++) {
2877 pch
= &pdmac
->peripherals
[i
];
2878 if (!adev
->dev
.of_node
)
2879 pch
->chan
.private = pdat
? &pdat
->peri_id
[i
] : NULL
;
2881 pch
->chan
.private = adev
->dev
.of_node
;
2883 INIT_LIST_HEAD(&pch
->submitted_list
);
2884 INIT_LIST_HEAD(&pch
->work_list
);
2885 INIT_LIST_HEAD(&pch
->completed_list
);
2886 spin_lock_init(&pch
->lock
);
2888 pch
->chan
.device
= pd
;
2891 /* Add the channel to the DMAC list */
2892 list_add_tail(&pch
->chan
.device_node
, &pd
->channels
);
2895 pd
->dev
= &adev
->dev
;
2897 pd
->cap_mask
= pdat
->cap_mask
;
2899 dma_cap_set(DMA_MEMCPY
, pd
->cap_mask
);
2900 if (pi
->pcfg
.num_peri
) {
2901 dma_cap_set(DMA_SLAVE
, pd
->cap_mask
);
2902 dma_cap_set(DMA_CYCLIC
, pd
->cap_mask
);
2903 dma_cap_set(DMA_PRIVATE
, pd
->cap_mask
);
2907 pd
->device_alloc_chan_resources
= pl330_alloc_chan_resources
;
2908 pd
->device_free_chan_resources
= pl330_free_chan_resources
;
2909 pd
->device_prep_dma_memcpy
= pl330_prep_dma_memcpy
;
2910 pd
->device_prep_dma_cyclic
= pl330_prep_dma_cyclic
;
2911 pd
->device_tx_status
= pl330_tx_status
;
2912 pd
->device_prep_slave_sg
= pl330_prep_slave_sg
;
2913 pd
->device_control
= pl330_control
;
2914 pd
->device_issue_pending
= pl330_issue_pending
;
2915 pd
->device_slave_caps
= pl330_dma_device_slave_caps
;
2917 ret
= dma_async_device_register(pd
);
2919 dev_err(&adev
->dev
, "unable to register DMAC\n");
2923 if (adev
->dev
.of_node
) {
2924 ret
= of_dma_controller_register(adev
->dev
.of_node
,
2925 of_dma_pl330_xlate
, pdmac
);
2928 "unable to register DMA to the generic DT DMA helpers\n");
2932 adev
->dev
.dma_parms
= &pdmac
->dma_parms
;
2935 * This is the limit for transfers with a buswidth of 1, larger
2936 * buswidths will have larger limits.
2938 ret
= dma_set_max_seg_size(&adev
->dev
, 1900800);
2940 dev_err(&adev
->dev
, "unable to set the seg size\n");
2943 dev_info(&adev
->dev
,
2944 "Loaded driver for PL330 DMAC-%d\n", adev
->periphid
);
2945 dev_info(&adev
->dev
,
2946 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
2947 pi
->pcfg
.data_buf_dep
,
2948 pi
->pcfg
.data_bus_width
/ 8, pi
->pcfg
.num_chan
,
2949 pi
->pcfg
.num_peri
, pi
->pcfg
.num_events
);
2954 list_for_each_entry_safe(pch
, _p
, &pdmac
->ddma
.channels
,
2957 /* Remove the channel */
2958 list_del(&pch
->chan
.device_node
);
2960 /* Flush the channel */
2961 pl330_control(&pch
->chan
, DMA_TERMINATE_ALL
, 0);
2962 pl330_free_chan_resources(&pch
->chan
);
2970 static int pl330_remove(struct amba_device
*adev
)
2972 struct dma_pl330_dmac
*pdmac
= amba_get_drvdata(adev
);
2973 struct dma_pl330_chan
*pch
, *_p
;
2974 struct pl330_info
*pi
;
2979 if (adev
->dev
.of_node
)
2980 of_dma_controller_free(adev
->dev
.of_node
);
2982 dma_async_device_unregister(&pdmac
->ddma
);
2985 list_for_each_entry_safe(pch
, _p
, &pdmac
->ddma
.channels
,
2988 /* Remove the channel */
2989 list_del(&pch
->chan
.device_node
);
2991 /* Flush the channel */
2992 pl330_control(&pch
->chan
, DMA_TERMINATE_ALL
, 0);
2993 pl330_free_chan_resources(&pch
->chan
);
3003 static struct amba_id pl330_ids
[] = {
3011 MODULE_DEVICE_TABLE(amba
, pl330_ids
);
3013 static struct amba_driver pl330_driver
= {
3015 .owner
= THIS_MODULE
,
3016 .name
= "dma-pl330",
3018 .id_table
= pl330_ids
,
3019 .probe
= pl330_probe
,
3020 .remove
= pl330_remove
,
3023 module_amba_driver(pl330_driver
);
3025 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
3026 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3027 MODULE_LICENSE("GPL");