2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/amba/bus.h>
25 #include <linux/amba/pl330.h>
26 #include <linux/scatterlist.h>
28 #include <linux/of_dma.h>
29 #include <linux/err.h>
31 #include "dmaengine.h"
32 #define PL330_MAX_CHAN 8
33 #define PL330_MAX_IRQS 32
34 #define PL330_MAX_PERI 32
36 enum pl330_cachectrl
{
37 CCTRL0
, /* Noncacheable and nonbufferable */
38 CCTRL1
, /* Bufferable only */
39 CCTRL2
, /* Cacheable, but do not allocate */
40 CCTRL3
, /* Cacheable and bufferable, but do not allocate */
41 INVALID1
, /* AWCACHE = 0x1000 */
43 CCTRL6
, /* Cacheable write-through, allocate on writes only */
44 CCTRL7
, /* Cacheable write-back, allocate on writes only */
55 /* Register and Bit field Definitions */
57 #define DS_ST_STOP 0x0
58 #define DS_ST_EXEC 0x1
59 #define DS_ST_CMISS 0x2
60 #define DS_ST_UPDTPC 0x3
62 #define DS_ST_ATBRR 0x5
63 #define DS_ST_QBUSY 0x6
65 #define DS_ST_KILL 0x8
66 #define DS_ST_CMPLT 0x9
67 #define DS_ST_FLTCMP 0xe
68 #define DS_ST_FAULT 0xf
73 #define INTSTATUS 0x28
80 #define FTC(n) (_FTC + (n)*0x4)
83 #define CS(n) (_CS + (n)*0x8)
84 #define CS_CNS (1 << 21)
87 #define CPC(n) (_CPC + (n)*0x8)
90 #define SA(n) (_SA + (n)*0x20)
93 #define DA(n) (_DA + (n)*0x20)
96 #define CC(n) (_CC + (n)*0x20)
98 #define CC_SRCINC (1 << 0)
99 #define CC_DSTINC (1 << 14)
100 #define CC_SRCPRI (1 << 8)
101 #define CC_DSTPRI (1 << 22)
102 #define CC_SRCNS (1 << 9)
103 #define CC_DSTNS (1 << 23)
104 #define CC_SRCIA (1 << 10)
105 #define CC_DSTIA (1 << 24)
106 #define CC_SRCBRSTLEN_SHFT 4
107 #define CC_DSTBRSTLEN_SHFT 18
108 #define CC_SRCBRSTSIZE_SHFT 1
109 #define CC_DSTBRSTSIZE_SHFT 15
110 #define CC_SRCCCTRL_SHFT 11
111 #define CC_SRCCCTRL_MASK 0x7
112 #define CC_DSTCCTRL_SHFT 25
113 #define CC_DRCCCTRL_MASK 0x7
114 #define CC_SWAP_SHFT 28
117 #define LC0(n) (_LC0 + (n)*0x20)
120 #define LC1(n) (_LC1 + (n)*0x20)
122 #define DBGSTATUS 0xd00
123 #define DBG_BUSY (1 << 0)
126 #define DBGINST0 0xd08
127 #define DBGINST1 0xd0c
136 #define PERIPH_ID 0xfe0
137 #define PERIPH_REV_SHIFT 20
138 #define PERIPH_REV_MASK 0xf
139 #define PERIPH_REV_R0P0 0
140 #define PERIPH_REV_R1P0 1
141 #define PERIPH_REV_R1P1 2
143 #define CR0_PERIPH_REQ_SET (1 << 0)
144 #define CR0_BOOT_EN_SET (1 << 1)
145 #define CR0_BOOT_MAN_NS (1 << 2)
146 #define CR0_NUM_CHANS_SHIFT 4
147 #define CR0_NUM_CHANS_MASK 0x7
148 #define CR0_NUM_PERIPH_SHIFT 12
149 #define CR0_NUM_PERIPH_MASK 0x1f
150 #define CR0_NUM_EVENTS_SHIFT 17
151 #define CR0_NUM_EVENTS_MASK 0x1f
153 #define CR1_ICACHE_LEN_SHIFT 0
154 #define CR1_ICACHE_LEN_MASK 0x7
155 #define CR1_NUM_ICACHELINES_SHIFT 4
156 #define CR1_NUM_ICACHELINES_MASK 0xf
158 #define CRD_DATA_WIDTH_SHIFT 0
159 #define CRD_DATA_WIDTH_MASK 0x7
160 #define CRD_WR_CAP_SHIFT 4
161 #define CRD_WR_CAP_MASK 0x7
162 #define CRD_WR_Q_DEP_SHIFT 8
163 #define CRD_WR_Q_DEP_MASK 0xf
164 #define CRD_RD_CAP_SHIFT 12
165 #define CRD_RD_CAP_MASK 0x7
166 #define CRD_RD_Q_DEP_SHIFT 16
167 #define CRD_RD_Q_DEP_MASK 0xf
168 #define CRD_DATA_BUFF_SHIFT 20
169 #define CRD_DATA_BUFF_MASK 0x3ff
172 #define DESIGNER 0x41
174 #define INTEG_CFG 0x0
175 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
177 #define PL330_STATE_STOPPED (1 << 0)
178 #define PL330_STATE_EXECUTING (1 << 1)
179 #define PL330_STATE_WFE (1 << 2)
180 #define PL330_STATE_FAULTING (1 << 3)
181 #define PL330_STATE_COMPLETING (1 << 4)
182 #define PL330_STATE_WFP (1 << 5)
183 #define PL330_STATE_KILLING (1 << 6)
184 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
185 #define PL330_STATE_CACHEMISS (1 << 8)
186 #define PL330_STATE_UPDTPC (1 << 9)
187 #define PL330_STATE_ATBARRIER (1 << 10)
188 #define PL330_STATE_QUEUEBUSY (1 << 11)
189 #define PL330_STATE_INVALID (1 << 15)
191 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
192 | PL330_STATE_WFE | PL330_STATE_FAULTING)
194 #define CMD_DMAADDH 0x54
195 #define CMD_DMAEND 0x00
196 #define CMD_DMAFLUSHP 0x35
197 #define CMD_DMAGO 0xa0
198 #define CMD_DMALD 0x04
199 #define CMD_DMALDP 0x25
200 #define CMD_DMALP 0x20
201 #define CMD_DMALPEND 0x28
202 #define CMD_DMAKILL 0x01
203 #define CMD_DMAMOV 0xbc
204 #define CMD_DMANOP 0x18
205 #define CMD_DMARMB 0x12
206 #define CMD_DMASEV 0x34
207 #define CMD_DMAST 0x08
208 #define CMD_DMASTP 0x29
209 #define CMD_DMASTZ 0x0c
210 #define CMD_DMAWFE 0x36
211 #define CMD_DMAWFP 0x30
212 #define CMD_DMAWMB 0x13
216 #define SZ_DMAFLUSHP 2
220 #define SZ_DMALPEND 2
234 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
235 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
237 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
238 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
241 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
242 * at 1byte/burst for P<->M and M<->M respectively.
243 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
244 * should be enough for P<->M and M<->M respectively.
246 #define MCODE_BUFF_PER_REQ 256
248 /* If the _pl330_req is available to the client */
249 #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
251 /* Use this _only_ to wait on transient states */
252 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
254 #ifdef PL330_DEBUG_MCGEN
255 static unsigned cmd_line
;
256 #define PL330_DBGCMD_DUMP(off, x...) do { \
257 printk("%x:", cmd_line); \
261 #define PL330_DBGMC_START(addr) (cmd_line = addr)
263 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
264 #define PL330_DBGMC_START(addr) do {} while (0)
267 /* The number of default descriptors */
269 #define NR_DEFAULT_DESC 16
271 /* Populated by the PL330 core driver for DMA API driver's info */
272 struct pl330_config
{
274 #define DMAC_MODE_NS (1 << 0)
276 unsigned int data_bus_width
:10; /* In number of bits */
277 unsigned int data_buf_dep
:10;
278 unsigned int num_chan
:4;
279 unsigned int num_peri
:6;
281 unsigned int num_events
:6;
285 /* Handle to the DMAC provided to the PL330 core */
289 /* Size of MicroCode buffers for each channel. */
291 /* ioremap'ed address of PL330 registers. */
293 /* PL330 core data, Client must not touch it. */
295 /* Populated by the PL330 core driver during pl330_add */
296 struct pl330_config pcfg
;
300 * Request Configuration.
301 * The PL330 core does not modify this and uses the last
302 * working configuration if the request doesn't provide any.
304 * The Client may want to provide this info only for the
305 * first request and a request with new settings.
307 struct pl330_reqcfg
{
308 /* Address Incrementing */
313 * For now, the SRC & DST protection levels
314 * and burst size/length are assumed same.
320 unsigned brst_size
:3; /* in power of 2 */
322 enum pl330_cachectrl dcctl
;
323 enum pl330_cachectrl scctl
;
324 enum pl330_byteswap swap
;
325 struct pl330_config
*pcfg
;
329 * One cycle of DMAC operation.
330 * There may be more than one xfer in a request.
339 /* The xfer callbacks are made with one of these arguments. */
341 /* The all xfers in the request were success. */
343 /* If req aborted due to global error. */
345 /* If req failed due to problem with Channel. */
349 /* A request defining Scatter-Gather List ending with NULL xfer. */
351 enum dma_transfer_direction rqtype
;
352 /* Index of peripheral for the xfer. */
354 /* If NULL, req will be done at last set parameters. */
355 struct pl330_reqcfg
*cfg
;
356 /* Pointer to first xfer in the request. */
357 struct pl330_xfer
*x
;
358 /* Hook to attach to DMAC's list of reqs with due callback */
359 struct list_head rqd
;
363 /* Start the channel */
365 /* Abort the active xfer */
367 /* Stop xfer and flush queue */
374 struct pl330_xfer
*x
;
400 /* ToBeDone for tasklet */
408 struct pl330_thread
{
411 /* If the channel is not yet acquired by any client */
414 struct pl330_dmac
*dmac
;
415 /* Only two at a time */
416 struct _pl330_req req
[2];
417 /* Index of the last enqueued request */
419 /* Index of the last submitted request or -1 if the DMA is stopped */
423 enum pl330_dmac_state
{
432 /* Holds list of reqs with due callbacks */
433 struct list_head req_done
;
434 /* Pointer to platform specific stuff */
435 struct pl330_info
*pinfo
;
436 /* Maximum possible events/irqs */
438 /* BUS address of MicroCode buffer */
439 dma_addr_t mcode_bus
;
440 /* CPU address of MicroCode buffer */
442 /* List of all Channel threads */
443 struct pl330_thread
*channels
;
444 /* Pointer to the MANAGER thread */
445 struct pl330_thread
*manager
;
446 /* To handle bad news in interrupt */
447 struct tasklet_struct tasks
;
448 struct _pl330_tbd dmac_tbd
;
449 /* State of DMAC operation */
450 enum pl330_dmac_state state
;
454 /* In the DMAC pool */
457 * Allocated to some channel during prep_xxx
458 * Also may be sitting on the work_list.
462 * Sitting on the work_list and already submitted
463 * to the PL330 core. Not more than two descriptors
464 * of a channel can be BUSY at any time.
468 * Sitting on the channel work_list but xfer done
474 struct dma_pl330_chan
{
475 /* Schedule desc completion */
476 struct tasklet_struct task
;
478 /* DMA-Engine Channel */
479 struct dma_chan chan
;
481 /* List of submitted descriptors */
482 struct list_head submitted_list
;
483 /* List of issued descriptors */
484 struct list_head work_list
;
485 /* List of completed descriptors */
486 struct list_head completed_list
;
488 /* Pointer to the DMAC that manages this channel,
489 * NULL if the channel is available to be acquired.
490 * As the parent, this DMAC also provides descriptors
493 struct dma_pl330_dmac
*dmac
;
495 /* To protect channel manipulation */
498 /* Token of a hardware channel thread of PL330 DMAC
499 * NULL if the channel is available to be acquired.
503 /* For D-to-M and M-to-D channels */
504 int burst_sz
; /* the peripheral fifo width */
505 int burst_len
; /* the number of burst */
506 dma_addr_t fifo_addr
;
508 /* for cyclic capability */
512 struct dma_pl330_dmac
{
513 struct pl330_info pif
;
515 /* DMA-Engine Device */
516 struct dma_device ddma
;
518 /* Holds info about sg limitations */
519 struct device_dma_parameters dma_parms
;
521 /* Pool of descriptors available for the DMAC's channels */
522 struct list_head desc_pool
;
523 /* To protect desc_pool manipulation */
524 spinlock_t pool_lock
;
526 /* Peripheral channels connected to this DMAC */
527 unsigned int num_peripherals
;
528 struct dma_pl330_chan
*peripherals
; /* keep at end */
531 struct dma_pl330_desc
{
532 /* To attach to a queue as child */
533 struct list_head node
;
535 /* Descriptor for the DMA Engine API */
536 struct dma_async_tx_descriptor txd
;
538 /* Xfer for PL330 core */
539 struct pl330_xfer px
;
541 struct pl330_reqcfg rqcfg
;
542 struct pl330_req req
;
544 enum desc_status status
;
546 /* The channel which currently holds this desc */
547 struct dma_pl330_chan
*pchan
;
550 static inline bool _queue_empty(struct pl330_thread
*thrd
)
552 return (IS_FREE(&thrd
->req
[0]) && IS_FREE(&thrd
->req
[1]))
556 static inline bool _queue_full(struct pl330_thread
*thrd
)
558 return (IS_FREE(&thrd
->req
[0]) || IS_FREE(&thrd
->req
[1]))
562 static inline bool is_manager(struct pl330_thread
*thrd
)
564 struct pl330_dmac
*pl330
= thrd
->dmac
;
566 /* MANAGER is indexed at the end */
567 if (thrd
->id
== pl330
->pinfo
->pcfg
.num_chan
)
573 /* If manager of the thread is in Non-Secure mode */
574 static inline bool _manager_ns(struct pl330_thread
*thrd
)
576 struct pl330_dmac
*pl330
= thrd
->dmac
;
578 return (pl330
->pinfo
->pcfg
.mode
& DMAC_MODE_NS
) ? true : false;
581 static inline u32
get_revision(u32 periph_id
)
583 return (periph_id
>> PERIPH_REV_SHIFT
) & PERIPH_REV_MASK
;
586 static inline u32
_emit_ADDH(unsigned dry_run
, u8 buf
[],
587 enum pl330_dst da
, u16 val
)
592 buf
[0] = CMD_DMAADDH
;
594 *((u16
*)&buf
[1]) = val
;
596 PL330_DBGCMD_DUMP(SZ_DMAADDH
, "\tDMAADDH %s %u\n",
597 da
== 1 ? "DA" : "SA", val
);
602 static inline u32
_emit_END(unsigned dry_run
, u8 buf
[])
609 PL330_DBGCMD_DUMP(SZ_DMAEND
, "\tDMAEND\n");
614 static inline u32
_emit_FLUSHP(unsigned dry_run
, u8 buf
[], u8 peri
)
619 buf
[0] = CMD_DMAFLUSHP
;
625 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP
, "\tDMAFLUSHP %u\n", peri
>> 3);
630 static inline u32
_emit_LD(unsigned dry_run
, u8 buf
[], enum pl330_cond cond
)
638 buf
[0] |= (0 << 1) | (1 << 0);
639 else if (cond
== BURST
)
640 buf
[0] |= (1 << 1) | (1 << 0);
642 PL330_DBGCMD_DUMP(SZ_DMALD
, "\tDMALD%c\n",
643 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'));
648 static inline u32
_emit_LDP(unsigned dry_run
, u8 buf
[],
649 enum pl330_cond cond
, u8 peri
)
663 PL330_DBGCMD_DUMP(SZ_DMALDP
, "\tDMALDP%c %u\n",
664 cond
== SINGLE
? 'S' : 'B', peri
>> 3);
669 static inline u32
_emit_LP(unsigned dry_run
, u8 buf
[],
670 unsigned loop
, u8 cnt
)
680 cnt
--; /* DMAC increments by 1 internally */
683 PL330_DBGCMD_DUMP(SZ_DMALP
, "\tDMALP_%c %u\n", loop
? '1' : '0', cnt
);
689 enum pl330_cond cond
;
695 static inline u32
_emit_LPEND(unsigned dry_run
, u8 buf
[],
696 const struct _arg_LPEND
*arg
)
698 enum pl330_cond cond
= arg
->cond
;
699 bool forever
= arg
->forever
;
700 unsigned loop
= arg
->loop
;
701 u8 bjump
= arg
->bjump
;
706 buf
[0] = CMD_DMALPEND
;
715 buf
[0] |= (0 << 1) | (1 << 0);
716 else if (cond
== BURST
)
717 buf
[0] |= (1 << 1) | (1 << 0);
721 PL330_DBGCMD_DUMP(SZ_DMALPEND
, "\tDMALP%s%c_%c bjmpto_%x\n",
722 forever
? "FE" : "END",
723 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'),
730 static inline u32
_emit_KILL(unsigned dry_run
, u8 buf
[])
735 buf
[0] = CMD_DMAKILL
;
740 static inline u32
_emit_MOV(unsigned dry_run
, u8 buf
[],
741 enum dmamov_dst dst
, u32 val
)
748 *((u32
*)&buf
[2]) = val
;
750 PL330_DBGCMD_DUMP(SZ_DMAMOV
, "\tDMAMOV %s 0x%x\n",
751 dst
== SAR
? "SAR" : (dst
== DAR
? "DAR" : "CCR"), val
);
756 static inline u32
_emit_NOP(unsigned dry_run
, u8 buf
[])
763 PL330_DBGCMD_DUMP(SZ_DMANOP
, "\tDMANOP\n");
768 static inline u32
_emit_RMB(unsigned dry_run
, u8 buf
[])
775 PL330_DBGCMD_DUMP(SZ_DMARMB
, "\tDMARMB\n");
780 static inline u32
_emit_SEV(unsigned dry_run
, u8 buf
[], u8 ev
)
791 PL330_DBGCMD_DUMP(SZ_DMASEV
, "\tDMASEV %u\n", ev
>> 3);
796 static inline u32
_emit_ST(unsigned dry_run
, u8 buf
[], enum pl330_cond cond
)
804 buf
[0] |= (0 << 1) | (1 << 0);
805 else if (cond
== BURST
)
806 buf
[0] |= (1 << 1) | (1 << 0);
808 PL330_DBGCMD_DUMP(SZ_DMAST
, "\tDMAST%c\n",
809 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'));
814 static inline u32
_emit_STP(unsigned dry_run
, u8 buf
[],
815 enum pl330_cond cond
, u8 peri
)
829 PL330_DBGCMD_DUMP(SZ_DMASTP
, "\tDMASTP%c %u\n",
830 cond
== SINGLE
? 'S' : 'B', peri
>> 3);
835 static inline u32
_emit_STZ(unsigned dry_run
, u8 buf
[])
842 PL330_DBGCMD_DUMP(SZ_DMASTZ
, "\tDMASTZ\n");
847 static inline u32
_emit_WFE(unsigned dry_run
, u8 buf
[], u8 ev
,
862 PL330_DBGCMD_DUMP(SZ_DMAWFE
, "\tDMAWFE %u%s\n",
863 ev
>> 3, invalidate
? ", I" : "");
868 static inline u32
_emit_WFP(unsigned dry_run
, u8 buf
[],
869 enum pl330_cond cond
, u8 peri
)
877 buf
[0] |= (0 << 1) | (0 << 0);
878 else if (cond
== BURST
)
879 buf
[0] |= (1 << 1) | (0 << 0);
881 buf
[0] |= (0 << 1) | (1 << 0);
887 PL330_DBGCMD_DUMP(SZ_DMAWFP
, "\tDMAWFP%c %u\n",
888 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'P'), peri
>> 3);
893 static inline u32
_emit_WMB(unsigned dry_run
, u8 buf
[])
900 PL330_DBGCMD_DUMP(SZ_DMAWMB
, "\tDMAWMB\n");
911 static inline u32
_emit_GO(unsigned dry_run
, u8 buf
[],
912 const struct _arg_GO
*arg
)
915 u32 addr
= arg
->addr
;
916 unsigned ns
= arg
->ns
;
926 *((u32
*)&buf
[2]) = addr
;
931 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
933 /* Returns Time-Out */
934 static bool _until_dmac_idle(struct pl330_thread
*thrd
)
936 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
937 unsigned long loops
= msecs_to_loops(5);
940 /* Until Manager is Idle */
941 if (!(readl(regs
+ DBGSTATUS
) & DBG_BUSY
))
953 static inline void _execute_DBGINSN(struct pl330_thread
*thrd
,
954 u8 insn
[], bool as_manager
)
956 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
959 val
= (insn
[0] << 16) | (insn
[1] << 24);
962 val
|= (thrd
->id
<< 8); /* Channel Number */
964 writel(val
, regs
+ DBGINST0
);
966 val
= *((u32
*)&insn
[2]);
967 writel(val
, regs
+ DBGINST1
);
969 /* If timed out due to halted state-machine */
970 if (_until_dmac_idle(thrd
)) {
971 dev_err(thrd
->dmac
->pinfo
->dev
, "DMAC halted!\n");
976 writel(0, regs
+ DBGCMD
);
980 * Mark a _pl330_req as free.
981 * We do it by writing DMAEND as the first instruction
982 * because no valid request is going to have DMAEND as
983 * its first instruction to execute.
985 static void mark_free(struct pl330_thread
*thrd
, int idx
)
987 struct _pl330_req
*req
= &thrd
->req
[idx
];
989 _emit_END(0, req
->mc_cpu
);
991 thrd
->req_running
= -1;
994 static inline u32
_state(struct pl330_thread
*thrd
)
996 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
999 if (is_manager(thrd
))
1000 val
= readl(regs
+ DS
) & 0xf;
1002 val
= readl(regs
+ CS(thrd
->id
)) & 0xf;
1006 return PL330_STATE_STOPPED
;
1008 return PL330_STATE_EXECUTING
;
1010 return PL330_STATE_CACHEMISS
;
1012 return PL330_STATE_UPDTPC
;
1014 return PL330_STATE_WFE
;
1016 return PL330_STATE_FAULTING
;
1018 if (is_manager(thrd
))
1019 return PL330_STATE_INVALID
;
1021 return PL330_STATE_ATBARRIER
;
1023 if (is_manager(thrd
))
1024 return PL330_STATE_INVALID
;
1026 return PL330_STATE_QUEUEBUSY
;
1028 if (is_manager(thrd
))
1029 return PL330_STATE_INVALID
;
1031 return PL330_STATE_WFP
;
1033 if (is_manager(thrd
))
1034 return PL330_STATE_INVALID
;
1036 return PL330_STATE_KILLING
;
1038 if (is_manager(thrd
))
1039 return PL330_STATE_INVALID
;
1041 return PL330_STATE_COMPLETING
;
1043 if (is_manager(thrd
))
1044 return PL330_STATE_INVALID
;
1046 return PL330_STATE_FAULT_COMPLETING
;
1048 return PL330_STATE_INVALID
;
1052 static void _stop(struct pl330_thread
*thrd
)
1054 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1055 u8 insn
[6] = {0, 0, 0, 0, 0, 0};
1057 if (_state(thrd
) == PL330_STATE_FAULT_COMPLETING
)
1058 UNTIL(thrd
, PL330_STATE_FAULTING
| PL330_STATE_KILLING
);
1060 /* Return if nothing needs to be done */
1061 if (_state(thrd
) == PL330_STATE_COMPLETING
1062 || _state(thrd
) == PL330_STATE_KILLING
1063 || _state(thrd
) == PL330_STATE_STOPPED
)
1066 _emit_KILL(0, insn
);
1068 /* Stop generating interrupts for SEV */
1069 writel(readl(regs
+ INTEN
) & ~(1 << thrd
->ev
), regs
+ INTEN
);
1071 _execute_DBGINSN(thrd
, insn
, is_manager(thrd
));
1074 /* Start doing req 'idx' of thread 'thrd' */
1075 static bool _trigger(struct pl330_thread
*thrd
)
1077 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1078 struct _pl330_req
*req
;
1079 struct pl330_req
*r
;
1082 u8 insn
[6] = {0, 0, 0, 0, 0, 0};
1085 /* Return if already ACTIVE */
1086 if (_state(thrd
) != PL330_STATE_STOPPED
)
1089 idx
= 1 - thrd
->lstenq
;
1090 if (!IS_FREE(&thrd
->req
[idx
]))
1091 req
= &thrd
->req
[idx
];
1094 if (!IS_FREE(&thrd
->req
[idx
]))
1095 req
= &thrd
->req
[idx
];
1100 /* Return if no request */
1101 if (!req
|| !req
->r
)
1107 ns
= r
->cfg
->nonsecure
? 1 : 0;
1108 else if (readl(regs
+ CS(thrd
->id
)) & CS_CNS
)
1113 /* See 'Abort Sources' point-4 at Page 2-25 */
1114 if (_manager_ns(thrd
) && !ns
)
1115 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d Recipe for ABORT!\n",
1116 __func__
, __LINE__
);
1119 go
.addr
= req
->mc_bus
;
1121 _emit_GO(0, insn
, &go
);
1123 /* Set to generate interrupts for SEV */
1124 writel(readl(regs
+ INTEN
) | (1 << thrd
->ev
), regs
+ INTEN
);
1126 /* Only manager can execute GO */
1127 _execute_DBGINSN(thrd
, insn
, true);
1129 thrd
->req_running
= idx
;
1134 static bool _start(struct pl330_thread
*thrd
)
1136 switch (_state(thrd
)) {
1137 case PL330_STATE_FAULT_COMPLETING
:
1138 UNTIL(thrd
, PL330_STATE_FAULTING
| PL330_STATE_KILLING
);
1140 if (_state(thrd
) == PL330_STATE_KILLING
)
1141 UNTIL(thrd
, PL330_STATE_STOPPED
)
1143 case PL330_STATE_FAULTING
:
1146 case PL330_STATE_KILLING
:
1147 case PL330_STATE_COMPLETING
:
1148 UNTIL(thrd
, PL330_STATE_STOPPED
)
1150 case PL330_STATE_STOPPED
:
1151 return _trigger(thrd
);
1153 case PL330_STATE_WFP
:
1154 case PL330_STATE_QUEUEBUSY
:
1155 case PL330_STATE_ATBARRIER
:
1156 case PL330_STATE_UPDTPC
:
1157 case PL330_STATE_CACHEMISS
:
1158 case PL330_STATE_EXECUTING
:
1161 case PL330_STATE_WFE
: /* For RESUME, nothing yet */
1167 static inline int _ldst_memtomem(unsigned dry_run
, u8 buf
[],
1168 const struct _xfer_spec
*pxs
, int cyc
)
1171 struct pl330_config
*pcfg
= pxs
->r
->cfg
->pcfg
;
1173 /* check lock-up free version */
1174 if (get_revision(pcfg
->periph_id
) >= PERIPH_REV_R1P0
) {
1176 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1177 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1181 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1182 off
+= _emit_RMB(dry_run
, &buf
[off
]);
1183 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1184 off
+= _emit_WMB(dry_run
, &buf
[off
]);
1191 static inline int _ldst_devtomem(unsigned dry_run
, u8 buf
[],
1192 const struct _xfer_spec
*pxs
, int cyc
)
1197 off
+= _emit_WFP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1198 off
+= _emit_LDP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1199 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1200 off
+= _emit_FLUSHP(dry_run
, &buf
[off
], pxs
->r
->peri
);
1206 static inline int _ldst_memtodev(unsigned dry_run
, u8 buf
[],
1207 const struct _xfer_spec
*pxs
, int cyc
)
1212 off
+= _emit_WFP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1213 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1214 off
+= _emit_STP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1215 off
+= _emit_FLUSHP(dry_run
, &buf
[off
], pxs
->r
->peri
);
1221 static int _bursts(unsigned dry_run
, u8 buf
[],
1222 const struct _xfer_spec
*pxs
, int cyc
)
1226 switch (pxs
->r
->rqtype
) {
1227 case DMA_MEM_TO_DEV
:
1228 off
+= _ldst_memtodev(dry_run
, &buf
[off
], pxs
, cyc
);
1230 case DMA_DEV_TO_MEM
:
1231 off
+= _ldst_devtomem(dry_run
, &buf
[off
], pxs
, cyc
);
1233 case DMA_MEM_TO_MEM
:
1234 off
+= _ldst_memtomem(dry_run
, &buf
[off
], pxs
, cyc
);
1237 off
+= 0x40000000; /* Scare off the Client */
1244 /* Returns bytes consumed and updates bursts */
1245 static inline int _loop(unsigned dry_run
, u8 buf
[],
1246 unsigned long *bursts
, const struct _xfer_spec
*pxs
)
1248 int cyc
, cycmax
, szlp
, szlpend
, szbrst
, off
;
1249 unsigned lcnt0
, lcnt1
, ljmp0
, ljmp1
;
1250 struct _arg_LPEND lpend
;
1252 /* Max iterations possible in DMALP is 256 */
1253 if (*bursts
>= 256*256) {
1256 cyc
= *bursts
/ lcnt1
/ lcnt0
;
1257 } else if (*bursts
> 256) {
1259 lcnt0
= *bursts
/ lcnt1
;
1267 szlp
= _emit_LP(1, buf
, 0, 0);
1268 szbrst
= _bursts(1, buf
, pxs
, 1);
1270 lpend
.cond
= ALWAYS
;
1271 lpend
.forever
= false;
1274 szlpend
= _emit_LPEND(1, buf
, &lpend
);
1282 * Max bursts that we can unroll due to limit on the
1283 * size of backward jump that can be encoded in DMALPEND
1284 * which is 8-bits and hence 255
1286 cycmax
= (255 - (szlp
+ szlpend
)) / szbrst
;
1288 cyc
= (cycmax
< cyc
) ? cycmax
: cyc
;
1293 off
+= _emit_LP(dry_run
, &buf
[off
], 0, lcnt0
);
1297 off
+= _emit_LP(dry_run
, &buf
[off
], 1, lcnt1
);
1300 off
+= _bursts(dry_run
, &buf
[off
], pxs
, cyc
);
1302 lpend
.cond
= ALWAYS
;
1303 lpend
.forever
= false;
1305 lpend
.bjump
= off
- ljmp1
;
1306 off
+= _emit_LPEND(dry_run
, &buf
[off
], &lpend
);
1309 lpend
.cond
= ALWAYS
;
1310 lpend
.forever
= false;
1312 lpend
.bjump
= off
- ljmp0
;
1313 off
+= _emit_LPEND(dry_run
, &buf
[off
], &lpend
);
1316 *bursts
= lcnt1
* cyc
;
1323 static inline int _setup_loops(unsigned dry_run
, u8 buf
[],
1324 const struct _xfer_spec
*pxs
)
1326 struct pl330_xfer
*x
= pxs
->x
;
1328 unsigned long c
, bursts
= BYTE_TO_BURST(x
->bytes
, ccr
);
1333 off
+= _loop(dry_run
, &buf
[off
], &c
, pxs
);
1340 static inline int _setup_xfer(unsigned dry_run
, u8 buf
[],
1341 const struct _xfer_spec
*pxs
)
1343 struct pl330_xfer
*x
= pxs
->x
;
1346 /* DMAMOV SAR, x->src_addr */
1347 off
+= _emit_MOV(dry_run
, &buf
[off
], SAR
, x
->src_addr
);
1348 /* DMAMOV DAR, x->dst_addr */
1349 off
+= _emit_MOV(dry_run
, &buf
[off
], DAR
, x
->dst_addr
);
1352 off
+= _setup_loops(dry_run
, &buf
[off
], pxs
);
1358 * A req is a sequence of one or more xfer units.
1359 * Returns the number of bytes taken to setup the MC for the req.
1361 static int _setup_req(unsigned dry_run
, struct pl330_thread
*thrd
,
1362 unsigned index
, struct _xfer_spec
*pxs
)
1364 struct _pl330_req
*req
= &thrd
->req
[index
];
1365 struct pl330_xfer
*x
;
1366 u8
*buf
= req
->mc_cpu
;
1369 PL330_DBGMC_START(req
->mc_bus
);
1371 /* DMAMOV CCR, ccr */
1372 off
+= _emit_MOV(dry_run
, &buf
[off
], CCR
, pxs
->ccr
);
1375 /* Error if xfer length is not aligned at burst size */
1376 if (x
->bytes
% (BRST_SIZE(pxs
->ccr
) * BRST_LEN(pxs
->ccr
)))
1380 off
+= _setup_xfer(dry_run
, &buf
[off
], pxs
);
1382 /* DMASEV peripheral/event */
1383 off
+= _emit_SEV(dry_run
, &buf
[off
], thrd
->ev
);
1385 off
+= _emit_END(dry_run
, &buf
[off
]);
1390 static inline u32
_prepare_ccr(const struct pl330_reqcfg
*rqc
)
1400 /* We set same protection levels for Src and DST for now */
1401 if (rqc
->privileged
)
1402 ccr
|= CC_SRCPRI
| CC_DSTPRI
;
1404 ccr
|= CC_SRCNS
| CC_DSTNS
;
1405 if (rqc
->insnaccess
)
1406 ccr
|= CC_SRCIA
| CC_DSTIA
;
1408 ccr
|= (((rqc
->brst_len
- 1) & 0xf) << CC_SRCBRSTLEN_SHFT
);
1409 ccr
|= (((rqc
->brst_len
- 1) & 0xf) << CC_DSTBRSTLEN_SHFT
);
1411 ccr
|= (rqc
->brst_size
<< CC_SRCBRSTSIZE_SHFT
);
1412 ccr
|= (rqc
->brst_size
<< CC_DSTBRSTSIZE_SHFT
);
1414 ccr
|= (rqc
->scctl
<< CC_SRCCCTRL_SHFT
);
1415 ccr
|= (rqc
->dcctl
<< CC_DSTCCTRL_SHFT
);
1417 ccr
|= (rqc
->swap
<< CC_SWAP_SHFT
);
1422 static inline bool _is_valid(u32 ccr
)
1424 enum pl330_cachectrl dcctl
;
1425 enum pl330_cachectrl scctl
;
1427 dcctl
= (ccr
>> CC_DSTCCTRL_SHFT
) & CC_DRCCCTRL_MASK
;
1428 scctl
= (ccr
>> CC_SRCCCTRL_SHFT
) & CC_SRCCCTRL_MASK
;
1430 if (dcctl
== INVALID1
|| dcctl
== INVALID2
1431 || scctl
== INVALID1
|| scctl
== INVALID2
)
1438 * Submit a list of xfers after which the client wants notification.
1439 * Client is not notified after each xfer unit, just once after all
1440 * xfer units are done or some error occurs.
1442 static int pl330_submit_req(void *ch_id
, struct pl330_req
*r
)
1444 struct pl330_thread
*thrd
= ch_id
;
1445 struct pl330_dmac
*pl330
;
1446 struct pl330_info
*pi
;
1447 struct _xfer_spec xs
;
1448 unsigned long flags
;
1454 /* No Req or Unacquired Channel or DMAC */
1455 if (!r
|| !thrd
|| thrd
->free
)
1462 if (pl330
->state
== DYING
1463 || pl330
->dmac_tbd
.reset_chan
& (1 << thrd
->id
)) {
1464 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d\n",
1465 __func__
, __LINE__
);
1469 /* If request for non-existing peripheral */
1470 if (r
->rqtype
!= DMA_MEM_TO_MEM
&& r
->peri
>= pi
->pcfg
.num_peri
) {
1471 dev_info(thrd
->dmac
->pinfo
->dev
,
1472 "%s:%d Invalid peripheral(%u)!\n",
1473 __func__
, __LINE__
, r
->peri
);
1477 spin_lock_irqsave(&pl330
->lock
, flags
);
1479 if (_queue_full(thrd
)) {
1485 /* Use last settings, if not provided */
1487 /* Prefer Secure Channel */
1488 if (!_manager_ns(thrd
))
1489 r
->cfg
->nonsecure
= 0;
1491 r
->cfg
->nonsecure
= 1;
1493 ccr
= _prepare_ccr(r
->cfg
);
1495 ccr
= readl(regs
+ CC(thrd
->id
));
1498 /* If this req doesn't have valid xfer settings */
1499 if (!_is_valid(ccr
)) {
1501 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d Invalid CCR(%x)!\n",
1502 __func__
, __LINE__
, ccr
);
1506 idx
= IS_FREE(&thrd
->req
[0]) ? 0 : 1;
1511 /* First dry run to check if req is acceptable */
1512 ret
= _setup_req(1, thrd
, idx
, &xs
);
1516 if (ret
> pi
->mcbufsz
/ 2) {
1517 dev_info(thrd
->dmac
->pinfo
->dev
,
1518 "%s:%d Trying increasing mcbufsz\n",
1519 __func__
, __LINE__
);
1524 /* Hook the request */
1526 thrd
->req
[idx
].r
= r
;
1527 _setup_req(0, thrd
, idx
, &xs
);
1532 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1537 static void dma_pl330_rqcb(struct pl330_req
*req
, enum pl330_op_err err
)
1539 struct dma_pl330_desc
*desc
= container_of(req
, struct dma_pl330_desc
, req
);
1540 struct dma_pl330_chan
*pch
= desc
->pchan
;
1541 unsigned long flags
;
1543 /* If desc aborted */
1547 spin_lock_irqsave(&pch
->lock
, flags
);
1549 desc
->status
= DONE
;
1551 spin_unlock_irqrestore(&pch
->lock
, flags
);
1553 tasklet_schedule(&pch
->task
);
1556 static void pl330_dotask(unsigned long data
)
1558 struct pl330_dmac
*pl330
= (struct pl330_dmac
*) data
;
1559 struct pl330_info
*pi
= pl330
->pinfo
;
1560 unsigned long flags
;
1563 spin_lock_irqsave(&pl330
->lock
, flags
);
1565 /* The DMAC itself gone nuts */
1566 if (pl330
->dmac_tbd
.reset_dmac
) {
1567 pl330
->state
= DYING
;
1568 /* Reset the manager too */
1569 pl330
->dmac_tbd
.reset_mngr
= true;
1570 /* Clear the reset flag */
1571 pl330
->dmac_tbd
.reset_dmac
= false;
1574 if (pl330
->dmac_tbd
.reset_mngr
) {
1575 _stop(pl330
->manager
);
1576 /* Reset all channels */
1577 pl330
->dmac_tbd
.reset_chan
= (1 << pi
->pcfg
.num_chan
) - 1;
1578 /* Clear the reset flag */
1579 pl330
->dmac_tbd
.reset_mngr
= false;
1582 for (i
= 0; i
< pi
->pcfg
.num_chan
; i
++) {
1584 if (pl330
->dmac_tbd
.reset_chan
& (1 << i
)) {
1585 struct pl330_thread
*thrd
= &pl330
->channels
[i
];
1586 void __iomem
*regs
= pi
->base
;
1587 enum pl330_op_err err
;
1591 if (readl(regs
+ FSC
) & (1 << thrd
->id
))
1592 err
= PL330_ERR_FAIL
;
1594 err
= PL330_ERR_ABORT
;
1596 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1597 dma_pl330_rqcb(thrd
->req
[1 - thrd
->lstenq
].r
, err
);
1598 dma_pl330_rqcb(thrd
->req
[thrd
->lstenq
].r
, err
);
1599 spin_lock_irqsave(&pl330
->lock
, flags
);
1601 thrd
->req
[0].r
= NULL
;
1602 thrd
->req
[1].r
= NULL
;
1606 /* Clear the reset flag */
1607 pl330
->dmac_tbd
.reset_chan
&= ~(1 << i
);
1611 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1616 /* Returns 1 if state was updated, 0 otherwise */
1617 static int pl330_update(const struct pl330_info
*pi
)
1619 struct pl330_req
*rqdone
, *tmp
;
1620 struct pl330_dmac
*pl330
;
1621 unsigned long flags
;
1624 int id
, ev
, ret
= 0;
1626 if (!pi
|| !pi
->pl330_data
)
1630 pl330
= pi
->pl330_data
;
1632 spin_lock_irqsave(&pl330
->lock
, flags
);
1634 val
= readl(regs
+ FSM
) & 0x1;
1636 pl330
->dmac_tbd
.reset_mngr
= true;
1638 pl330
->dmac_tbd
.reset_mngr
= false;
1640 val
= readl(regs
+ FSC
) & ((1 << pi
->pcfg
.num_chan
) - 1);
1641 pl330
->dmac_tbd
.reset_chan
|= val
;
1644 while (i
< pi
->pcfg
.num_chan
) {
1645 if (val
& (1 << i
)) {
1647 "Reset Channel-%d\t CS-%x FTC-%x\n",
1648 i
, readl(regs
+ CS(i
)),
1649 readl(regs
+ FTC(i
)));
1650 _stop(&pl330
->channels
[i
]);
1656 /* Check which event happened i.e, thread notified */
1657 val
= readl(regs
+ ES
);
1658 if (pi
->pcfg
.num_events
< 32
1659 && val
& ~((1 << pi
->pcfg
.num_events
) - 1)) {
1660 pl330
->dmac_tbd
.reset_dmac
= true;
1661 dev_err(pi
->dev
, "%s:%d Unexpected!\n", __func__
, __LINE__
);
1666 for (ev
= 0; ev
< pi
->pcfg
.num_events
; ev
++) {
1667 if (val
& (1 << ev
)) { /* Event occurred */
1668 struct pl330_thread
*thrd
;
1669 u32 inten
= readl(regs
+ INTEN
);
1672 /* Clear the event */
1673 if (inten
& (1 << ev
))
1674 writel(1 << ev
, regs
+ INTCLR
);
1678 id
= pl330
->events
[ev
];
1680 thrd
= &pl330
->channels
[id
];
1682 active
= thrd
->req_running
;
1683 if (active
== -1) /* Aborted */
1686 /* Detach the req */
1687 rqdone
= thrd
->req
[active
].r
;
1688 thrd
->req
[active
].r
= NULL
;
1690 mark_free(thrd
, active
);
1692 /* Get going again ASAP */
1695 /* For now, just make a list of callbacks to be done */
1696 list_add_tail(&rqdone
->rqd
, &pl330
->req_done
);
1700 /* Now that we are in no hurry, do the callbacks */
1701 list_for_each_entry_safe(rqdone
, tmp
, &pl330
->req_done
, rqd
) {
1702 list_del(&rqdone
->rqd
);
1704 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1705 dma_pl330_rqcb(rqdone
, PL330_ERR_NONE
);
1706 spin_lock_irqsave(&pl330
->lock
, flags
);
1710 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1712 if (pl330
->dmac_tbd
.reset_dmac
1713 || pl330
->dmac_tbd
.reset_mngr
1714 || pl330
->dmac_tbd
.reset_chan
) {
1716 tasklet_schedule(&pl330
->tasks
);
1722 static int pl330_chan_ctrl(void *ch_id
, enum pl330_chan_op op
)
1724 struct pl330_thread
*thrd
= ch_id
;
1725 struct pl330_dmac
*pl330
;
1726 unsigned long flags
;
1727 int ret
= 0, active
;
1729 if (!thrd
|| thrd
->free
|| thrd
->dmac
->state
== DYING
)
1733 active
= thrd
->req_running
;
1735 spin_lock_irqsave(&pl330
->lock
, flags
);
1738 case PL330_OP_FLUSH
:
1739 /* Make sure the channel is stopped */
1742 thrd
->req
[0].r
= NULL
;
1743 thrd
->req
[1].r
= NULL
;
1748 case PL330_OP_ABORT
:
1749 /* Make sure the channel is stopped */
1752 /* ABORT is only for the active req */
1756 thrd
->req
[active
].r
= NULL
;
1757 mark_free(thrd
, active
);
1759 /* Start the next */
1760 case PL330_OP_START
:
1761 if ((active
== -1) && !_start(thrd
))
1769 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1773 /* Reserve an event */
1774 static inline int _alloc_event(struct pl330_thread
*thrd
)
1776 struct pl330_dmac
*pl330
= thrd
->dmac
;
1777 struct pl330_info
*pi
= pl330
->pinfo
;
1780 for (ev
= 0; ev
< pi
->pcfg
.num_events
; ev
++)
1781 if (pl330
->events
[ev
] == -1) {
1782 pl330
->events
[ev
] = thrd
->id
;
1789 static bool _chan_ns(const struct pl330_info
*pi
, int i
)
1791 return pi
->pcfg
.irq_ns
& (1 << i
);
1794 /* Upon success, returns IdentityToken for the
1795 * allocated channel, NULL otherwise.
1797 static void *pl330_request_channel(const struct pl330_info
*pi
)
1799 struct pl330_thread
*thrd
= NULL
;
1800 struct pl330_dmac
*pl330
;
1801 unsigned long flags
;
1804 if (!pi
|| !pi
->pl330_data
)
1807 pl330
= pi
->pl330_data
;
1809 if (pl330
->state
== DYING
)
1812 chans
= pi
->pcfg
.num_chan
;
1814 spin_lock_irqsave(&pl330
->lock
, flags
);
1816 for (i
= 0; i
< chans
; i
++) {
1817 thrd
= &pl330
->channels
[i
];
1818 if ((thrd
->free
) && (!_manager_ns(thrd
) ||
1820 thrd
->ev
= _alloc_event(thrd
);
1821 if (thrd
->ev
>= 0) {
1824 thrd
->req
[0].r
= NULL
;
1826 thrd
->req
[1].r
= NULL
;
1834 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1839 /* Release an event */
1840 static inline void _free_event(struct pl330_thread
*thrd
, int ev
)
1842 struct pl330_dmac
*pl330
= thrd
->dmac
;
1843 struct pl330_info
*pi
= pl330
->pinfo
;
1845 /* If the event is valid and was held by the thread */
1846 if (ev
>= 0 && ev
< pi
->pcfg
.num_events
1847 && pl330
->events
[ev
] == thrd
->id
)
1848 pl330
->events
[ev
] = -1;
1851 static void pl330_release_channel(void *ch_id
)
1853 struct pl330_thread
*thrd
= ch_id
;
1854 struct pl330_dmac
*pl330
;
1855 unsigned long flags
;
1857 if (!thrd
|| thrd
->free
)
1862 dma_pl330_rqcb(thrd
->req
[1 - thrd
->lstenq
].r
, PL330_ERR_ABORT
);
1863 dma_pl330_rqcb(thrd
->req
[thrd
->lstenq
].r
, PL330_ERR_ABORT
);
1867 spin_lock_irqsave(&pl330
->lock
, flags
);
1868 _free_event(thrd
, thrd
->ev
);
1870 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1873 /* Initialize the structure for PL330 configuration, that can be used
1874 * by the client driver the make best use of the DMAC
1876 static void read_dmac_config(struct pl330_info
*pi
)
1878 void __iomem
*regs
= pi
->base
;
1881 val
= readl(regs
+ CRD
) >> CRD_DATA_WIDTH_SHIFT
;
1882 val
&= CRD_DATA_WIDTH_MASK
;
1883 pi
->pcfg
.data_bus_width
= 8 * (1 << val
);
1885 val
= readl(regs
+ CRD
) >> CRD_DATA_BUFF_SHIFT
;
1886 val
&= CRD_DATA_BUFF_MASK
;
1887 pi
->pcfg
.data_buf_dep
= val
+ 1;
1889 val
= readl(regs
+ CR0
) >> CR0_NUM_CHANS_SHIFT
;
1890 val
&= CR0_NUM_CHANS_MASK
;
1892 pi
->pcfg
.num_chan
= val
;
1894 val
= readl(regs
+ CR0
);
1895 if (val
& CR0_PERIPH_REQ_SET
) {
1896 val
= (val
>> CR0_NUM_PERIPH_SHIFT
) & CR0_NUM_PERIPH_MASK
;
1898 pi
->pcfg
.num_peri
= val
;
1899 pi
->pcfg
.peri_ns
= readl(regs
+ CR4
);
1901 pi
->pcfg
.num_peri
= 0;
1904 val
= readl(regs
+ CR0
);
1905 if (val
& CR0_BOOT_MAN_NS
)
1906 pi
->pcfg
.mode
|= DMAC_MODE_NS
;
1908 pi
->pcfg
.mode
&= ~DMAC_MODE_NS
;
1910 val
= readl(regs
+ CR0
) >> CR0_NUM_EVENTS_SHIFT
;
1911 val
&= CR0_NUM_EVENTS_MASK
;
1913 pi
->pcfg
.num_events
= val
;
1915 pi
->pcfg
.irq_ns
= readl(regs
+ CR3
);
1918 static inline void _reset_thread(struct pl330_thread
*thrd
)
1920 struct pl330_dmac
*pl330
= thrd
->dmac
;
1921 struct pl330_info
*pi
= pl330
->pinfo
;
1923 thrd
->req
[0].mc_cpu
= pl330
->mcode_cpu
1924 + (thrd
->id
* pi
->mcbufsz
);
1925 thrd
->req
[0].mc_bus
= pl330
->mcode_bus
1926 + (thrd
->id
* pi
->mcbufsz
);
1927 thrd
->req
[0].r
= NULL
;
1930 thrd
->req
[1].mc_cpu
= thrd
->req
[0].mc_cpu
1932 thrd
->req
[1].mc_bus
= thrd
->req
[0].mc_bus
1934 thrd
->req
[1].r
= NULL
;
1938 static int dmac_alloc_threads(struct pl330_dmac
*pl330
)
1940 struct pl330_info
*pi
= pl330
->pinfo
;
1941 int chans
= pi
->pcfg
.num_chan
;
1942 struct pl330_thread
*thrd
;
1945 /* Allocate 1 Manager and 'chans' Channel threads */
1946 pl330
->channels
= kzalloc((1 + chans
) * sizeof(*thrd
),
1948 if (!pl330
->channels
)
1951 /* Init Channel threads */
1952 for (i
= 0; i
< chans
; i
++) {
1953 thrd
= &pl330
->channels
[i
];
1956 _reset_thread(thrd
);
1960 /* MANAGER is indexed at the end */
1961 thrd
= &pl330
->channels
[chans
];
1965 pl330
->manager
= thrd
;
1970 static int dmac_alloc_resources(struct pl330_dmac
*pl330
)
1972 struct pl330_info
*pi
= pl330
->pinfo
;
1973 int chans
= pi
->pcfg
.num_chan
;
1977 * Alloc MicroCode buffer for 'chans' Channel threads.
1978 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1980 pl330
->mcode_cpu
= dma_alloc_coherent(pi
->dev
,
1981 chans
* pi
->mcbufsz
,
1982 &pl330
->mcode_bus
, GFP_KERNEL
);
1983 if (!pl330
->mcode_cpu
) {
1984 dev_err(pi
->dev
, "%s:%d Can't allocate memory!\n",
1985 __func__
, __LINE__
);
1989 ret
= dmac_alloc_threads(pl330
);
1991 dev_err(pi
->dev
, "%s:%d Can't to create channels for DMAC!\n",
1992 __func__
, __LINE__
);
1993 dma_free_coherent(pi
->dev
,
1994 chans
* pi
->mcbufsz
,
1995 pl330
->mcode_cpu
, pl330
->mcode_bus
);
2002 static int pl330_add(struct pl330_info
*pi
)
2004 struct pl330_dmac
*pl330
;
2008 if (!pi
|| !pi
->dev
)
2011 /* If already added */
2017 /* Check if we can handle this DMAC */
2018 if ((pi
->pcfg
.periph_id
& 0xfffff) != PERIPH_ID_VAL
) {
2019 dev_err(pi
->dev
, "PERIPH_ID 0x%x !\n", pi
->pcfg
.periph_id
);
2023 /* Read the configuration of the DMAC */
2024 read_dmac_config(pi
);
2026 if (pi
->pcfg
.num_events
== 0) {
2027 dev_err(pi
->dev
, "%s:%d Can't work without events!\n",
2028 __func__
, __LINE__
);
2032 pl330
= kzalloc(sizeof(*pl330
), GFP_KERNEL
);
2034 dev_err(pi
->dev
, "%s:%d Can't allocate memory!\n",
2035 __func__
, __LINE__
);
2039 /* Assign the info structure and private data */
2041 pi
->pl330_data
= pl330
;
2043 spin_lock_init(&pl330
->lock
);
2045 INIT_LIST_HEAD(&pl330
->req_done
);
2047 /* Use default MC buffer size if not provided */
2049 pi
->mcbufsz
= MCODE_BUFF_PER_REQ
* 2;
2051 /* Mark all events as free */
2052 for (i
= 0; i
< pi
->pcfg
.num_events
; i
++)
2053 pl330
->events
[i
] = -1;
2055 /* Allocate resources needed by the DMAC */
2056 ret
= dmac_alloc_resources(pl330
);
2058 dev_err(pi
->dev
, "Unable to create channels for DMAC\n");
2063 tasklet_init(&pl330
->tasks
, pl330_dotask
, (unsigned long) pl330
);
2065 pl330
->state
= INIT
;
2070 static int dmac_free_threads(struct pl330_dmac
*pl330
)
2072 struct pl330_info
*pi
= pl330
->pinfo
;
2073 int chans
= pi
->pcfg
.num_chan
;
2074 struct pl330_thread
*thrd
;
2077 /* Release Channel threads */
2078 for (i
= 0; i
< chans
; i
++) {
2079 thrd
= &pl330
->channels
[i
];
2080 pl330_release_channel((void *)thrd
);
2084 kfree(pl330
->channels
);
2089 static void dmac_free_resources(struct pl330_dmac
*pl330
)
2091 struct pl330_info
*pi
= pl330
->pinfo
;
2092 int chans
= pi
->pcfg
.num_chan
;
2094 dmac_free_threads(pl330
);
2096 dma_free_coherent(pi
->dev
, chans
* pi
->mcbufsz
,
2097 pl330
->mcode_cpu
, pl330
->mcode_bus
);
2100 static void pl330_del(struct pl330_info
*pi
)
2102 struct pl330_dmac
*pl330
;
2104 if (!pi
|| !pi
->pl330_data
)
2107 pl330
= pi
->pl330_data
;
2109 pl330
->state
= UNINIT
;
2111 tasklet_kill(&pl330
->tasks
);
2113 /* Free DMAC resources */
2114 dmac_free_resources(pl330
);
2117 pi
->pl330_data
= NULL
;
2120 /* forward declaration */
2121 static struct amba_driver pl330_driver
;
2123 static inline struct dma_pl330_chan
*
2124 to_pchan(struct dma_chan
*ch
)
2129 return container_of(ch
, struct dma_pl330_chan
, chan
);
2132 static inline struct dma_pl330_desc
*
2133 to_desc(struct dma_async_tx_descriptor
*tx
)
2135 return container_of(tx
, struct dma_pl330_desc
, txd
);
2138 static inline void fill_queue(struct dma_pl330_chan
*pch
)
2140 struct dma_pl330_desc
*desc
;
2143 list_for_each_entry(desc
, &pch
->work_list
, node
) {
2145 /* If already submitted */
2146 if (desc
->status
== BUSY
)
2149 ret
= pl330_submit_req(pch
->pl330_chid
,
2152 desc
->status
= BUSY
;
2153 } else if (ret
== -EAGAIN
) {
2154 /* QFull or DMAC Dying */
2157 /* Unacceptable request */
2158 desc
->status
= DONE
;
2159 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Bad Desc(%d)\n",
2160 __func__
, __LINE__
, desc
->txd
.cookie
);
2161 tasklet_schedule(&pch
->task
);
2166 static void pl330_tasklet(unsigned long data
)
2168 struct dma_pl330_chan
*pch
= (struct dma_pl330_chan
*)data
;
2169 struct dma_pl330_desc
*desc
, *_dt
;
2170 unsigned long flags
;
2172 spin_lock_irqsave(&pch
->lock
, flags
);
2174 /* Pick up ripe tomatoes */
2175 list_for_each_entry_safe(desc
, _dt
, &pch
->work_list
, node
)
2176 if (desc
->status
== DONE
) {
2178 dma_cookie_complete(&desc
->txd
);
2179 list_move_tail(&desc
->node
, &pch
->completed_list
);
2182 /* Try to submit a req imm. next to the last completed cookie */
2185 /* Make sure the PL330 Channel thread is active */
2186 pl330_chan_ctrl(pch
->pl330_chid
, PL330_OP_START
);
2188 while (!list_empty(&pch
->completed_list
)) {
2189 dma_async_tx_callback callback
;
2190 void *callback_param
;
2192 desc
= list_first_entry(&pch
->completed_list
,
2193 struct dma_pl330_desc
, node
);
2195 callback
= desc
->txd
.callback
;
2196 callback_param
= desc
->txd
.callback_param
;
2199 desc
->status
= PREP
;
2200 list_move_tail(&desc
->node
, &pch
->work_list
);
2202 desc
->status
= FREE
;
2203 list_move_tail(&desc
->node
, &pch
->dmac
->desc_pool
);
2206 dma_descriptor_unmap(&desc
->txd
);
2209 spin_unlock_irqrestore(&pch
->lock
, flags
);
2210 callback(callback_param
);
2211 spin_lock_irqsave(&pch
->lock
, flags
);
2214 spin_unlock_irqrestore(&pch
->lock
, flags
);
2217 bool pl330_filter(struct dma_chan
*chan
, void *param
)
2221 if (chan
->device
->dev
->driver
!= &pl330_driver
.drv
)
2224 peri_id
= chan
->private;
2225 return *peri_id
== (unsigned long)param
;
2227 EXPORT_SYMBOL(pl330_filter
);
2229 static struct dma_chan
*of_dma_pl330_xlate(struct of_phandle_args
*dma_spec
,
2230 struct of_dma
*ofdma
)
2232 int count
= dma_spec
->args_count
;
2233 struct dma_pl330_dmac
*pdmac
= ofdma
->of_dma_data
;
2234 unsigned int chan_id
;
2239 chan_id
= dma_spec
->args
[0];
2240 if (chan_id
>= pdmac
->num_peripherals
)
2243 return dma_get_slave_channel(&pdmac
->peripherals
[chan_id
].chan
);
2246 static int pl330_alloc_chan_resources(struct dma_chan
*chan
)
2248 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2249 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2250 unsigned long flags
;
2252 spin_lock_irqsave(&pch
->lock
, flags
);
2254 dma_cookie_init(chan
);
2255 pch
->cyclic
= false;
2257 pch
->pl330_chid
= pl330_request_channel(&pdmac
->pif
);
2258 if (!pch
->pl330_chid
) {
2259 spin_unlock_irqrestore(&pch
->lock
, flags
);
2263 tasklet_init(&pch
->task
, pl330_tasklet
, (unsigned long) pch
);
2265 spin_unlock_irqrestore(&pch
->lock
, flags
);
2270 static int pl330_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
, unsigned long arg
)
2272 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2273 struct dma_pl330_desc
*desc
;
2274 unsigned long flags
;
2275 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2276 struct dma_slave_config
*slave_config
;
2280 case DMA_TERMINATE_ALL
:
2281 spin_lock_irqsave(&pch
->lock
, flags
);
2283 /* FLUSH the PL330 Channel thread */
2284 pl330_chan_ctrl(pch
->pl330_chid
, PL330_OP_FLUSH
);
2286 /* Mark all desc done */
2287 list_for_each_entry(desc
, &pch
->submitted_list
, node
) {
2288 desc
->status
= FREE
;
2289 dma_cookie_complete(&desc
->txd
);
2292 list_for_each_entry(desc
, &pch
->work_list
, node
) {
2293 desc
->status
= FREE
;
2294 dma_cookie_complete(&desc
->txd
);
2297 list_for_each_entry(desc
, &pch
->completed_list
, node
) {
2298 desc
->status
= FREE
;
2299 dma_cookie_complete(&desc
->txd
);
2302 list_splice_tail_init(&pch
->submitted_list
, &pdmac
->desc_pool
);
2303 list_splice_tail_init(&pch
->work_list
, &pdmac
->desc_pool
);
2304 list_splice_tail_init(&pch
->completed_list
, &pdmac
->desc_pool
);
2305 spin_unlock_irqrestore(&pch
->lock
, flags
);
2307 case DMA_SLAVE_CONFIG
:
2308 slave_config
= (struct dma_slave_config
*)arg
;
2310 if (slave_config
->direction
== DMA_MEM_TO_DEV
) {
2311 if (slave_config
->dst_addr
)
2312 pch
->fifo_addr
= slave_config
->dst_addr
;
2313 if (slave_config
->dst_addr_width
)
2314 pch
->burst_sz
= __ffs(slave_config
->dst_addr_width
);
2315 if (slave_config
->dst_maxburst
)
2316 pch
->burst_len
= slave_config
->dst_maxburst
;
2317 } else if (slave_config
->direction
== DMA_DEV_TO_MEM
) {
2318 if (slave_config
->src_addr
)
2319 pch
->fifo_addr
= slave_config
->src_addr
;
2320 if (slave_config
->src_addr_width
)
2321 pch
->burst_sz
= __ffs(slave_config
->src_addr_width
);
2322 if (slave_config
->src_maxburst
)
2323 pch
->burst_len
= slave_config
->src_maxburst
;
2327 dev_err(pch
->dmac
->pif
.dev
, "Not supported command.\n");
2334 static void pl330_free_chan_resources(struct dma_chan
*chan
)
2336 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2337 unsigned long flags
;
2339 tasklet_kill(&pch
->task
);
2341 spin_lock_irqsave(&pch
->lock
, flags
);
2343 pl330_release_channel(pch
->pl330_chid
);
2344 pch
->pl330_chid
= NULL
;
2347 list_splice_tail_init(&pch
->work_list
, &pch
->dmac
->desc_pool
);
2349 spin_unlock_irqrestore(&pch
->lock
, flags
);
2352 static enum dma_status
2353 pl330_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
2354 struct dma_tx_state
*txstate
)
2356 return dma_cookie_status(chan
, cookie
, txstate
);
2359 static void pl330_issue_pending(struct dma_chan
*chan
)
2361 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2362 unsigned long flags
;
2364 spin_lock_irqsave(&pch
->lock
, flags
);
2365 list_splice_tail_init(&pch
->submitted_list
, &pch
->work_list
);
2366 spin_unlock_irqrestore(&pch
->lock
, flags
);
2368 pl330_tasklet((unsigned long)pch
);
2372 * We returned the last one of the circular list of descriptor(s)
2373 * from prep_xxx, so the argument to submit corresponds to the last
2374 * descriptor of the list.
2376 static dma_cookie_t
pl330_tx_submit(struct dma_async_tx_descriptor
*tx
)
2378 struct dma_pl330_desc
*desc
, *last
= to_desc(tx
);
2379 struct dma_pl330_chan
*pch
= to_pchan(tx
->chan
);
2380 dma_cookie_t cookie
;
2381 unsigned long flags
;
2383 spin_lock_irqsave(&pch
->lock
, flags
);
2385 /* Assign cookies to all nodes */
2386 while (!list_empty(&last
->node
)) {
2387 desc
= list_entry(last
->node
.next
, struct dma_pl330_desc
, node
);
2389 desc
->txd
.callback
= last
->txd
.callback
;
2390 desc
->txd
.callback_param
= last
->txd
.callback_param
;
2393 dma_cookie_assign(&desc
->txd
);
2395 list_move_tail(&desc
->node
, &pch
->submitted_list
);
2398 cookie
= dma_cookie_assign(&last
->txd
);
2399 list_add_tail(&last
->node
, &pch
->submitted_list
);
2400 spin_unlock_irqrestore(&pch
->lock
, flags
);
2405 static inline void _init_desc(struct dma_pl330_desc
*desc
)
2407 desc
->req
.x
= &desc
->px
;
2408 desc
->rqcfg
.swap
= SWAP_NO
;
2409 desc
->rqcfg
.scctl
= CCTRL0
;
2410 desc
->rqcfg
.dcctl
= CCTRL0
;
2411 desc
->req
.cfg
= &desc
->rqcfg
;
2412 desc
->txd
.tx_submit
= pl330_tx_submit
;
2414 INIT_LIST_HEAD(&desc
->node
);
2417 /* Returns the number of descriptors added to the DMAC pool */
2418 static int add_desc(struct dma_pl330_dmac
*pdmac
, gfp_t flg
, int count
)
2420 struct dma_pl330_desc
*desc
;
2421 unsigned long flags
;
2427 desc
= kcalloc(count
, sizeof(*desc
), flg
);
2431 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2433 for (i
= 0; i
< count
; i
++) {
2434 _init_desc(&desc
[i
]);
2435 list_add_tail(&desc
[i
].node
, &pdmac
->desc_pool
);
2438 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2443 static struct dma_pl330_desc
*
2444 pluck_desc(struct dma_pl330_dmac
*pdmac
)
2446 struct dma_pl330_desc
*desc
= NULL
;
2447 unsigned long flags
;
2452 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2454 if (!list_empty(&pdmac
->desc_pool
)) {
2455 desc
= list_entry(pdmac
->desc_pool
.next
,
2456 struct dma_pl330_desc
, node
);
2458 list_del_init(&desc
->node
);
2460 desc
->status
= PREP
;
2461 desc
->txd
.callback
= NULL
;
2464 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2469 static struct dma_pl330_desc
*pl330_get_desc(struct dma_pl330_chan
*pch
)
2471 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2472 u8
*peri_id
= pch
->chan
.private;
2473 struct dma_pl330_desc
*desc
;
2475 /* Pluck one desc from the pool of DMAC */
2476 desc
= pluck_desc(pdmac
);
2478 /* If the DMAC pool is empty, alloc new */
2480 if (!add_desc(pdmac
, GFP_ATOMIC
, 1))
2484 desc
= pluck_desc(pdmac
);
2486 dev_err(pch
->dmac
->pif
.dev
,
2487 "%s:%d ALERT!\n", __func__
, __LINE__
);
2492 /* Initialize the descriptor */
2494 desc
->txd
.cookie
= 0;
2495 async_tx_ack(&desc
->txd
);
2497 desc
->req
.peri
= peri_id
? pch
->chan
.chan_id
: 0;
2498 desc
->rqcfg
.pcfg
= &pch
->dmac
->pif
.pcfg
;
2500 dma_async_tx_descriptor_init(&desc
->txd
, &pch
->chan
);
2505 static inline void fill_px(struct pl330_xfer
*px
,
2506 dma_addr_t dst
, dma_addr_t src
, size_t len
)
2513 static struct dma_pl330_desc
*
2514 __pl330_prep_dma_memcpy(struct dma_pl330_chan
*pch
, dma_addr_t dst
,
2515 dma_addr_t src
, size_t len
)
2517 struct dma_pl330_desc
*desc
= pl330_get_desc(pch
);
2520 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Unable to fetch desc\n",
2521 __func__
, __LINE__
);
2526 * Ideally we should lookout for reqs bigger than
2527 * those that can be programmed with 256 bytes of
2528 * MC buffer, but considering a req size is seldom
2529 * going to be word-unaligned and more than 200MB,
2531 * Also, should the limit is reached we'd rather
2532 * have the platform increase MC buffer size than
2533 * complicating this API driver.
2535 fill_px(&desc
->px
, dst
, src
, len
);
2540 /* Call after fixing burst size */
2541 static inline int get_burst_len(struct dma_pl330_desc
*desc
, size_t len
)
2543 struct dma_pl330_chan
*pch
= desc
->pchan
;
2544 struct pl330_info
*pi
= &pch
->dmac
->pif
;
2547 burst_len
= pi
->pcfg
.data_bus_width
/ 8;
2548 burst_len
*= pi
->pcfg
.data_buf_dep
;
2549 burst_len
>>= desc
->rqcfg
.brst_size
;
2551 /* src/dst_burst_len can't be more than 16 */
2555 while (burst_len
> 1) {
2556 if (!(len
% (burst_len
<< desc
->rqcfg
.brst_size
)))
2564 static struct dma_async_tx_descriptor
*pl330_prep_dma_cyclic(
2565 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t len
,
2566 size_t period_len
, enum dma_transfer_direction direction
,
2567 unsigned long flags
, void *context
)
2569 struct dma_pl330_desc
*desc
= NULL
, *first
= NULL
;
2570 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2571 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2576 if (len
% period_len
!= 0)
2579 if (!is_slave_direction(direction
)) {
2580 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Invalid dma direction\n",
2581 __func__
, __LINE__
);
2585 for (i
= 0; i
< len
/ period_len
; i
++) {
2586 desc
= pl330_get_desc(pch
);
2588 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Unable to fetch desc\n",
2589 __func__
, __LINE__
);
2594 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2596 while (!list_empty(&first
->node
)) {
2597 desc
= list_entry(first
->node
.next
,
2598 struct dma_pl330_desc
, node
);
2599 list_move_tail(&desc
->node
, &pdmac
->desc_pool
);
2602 list_move_tail(&first
->node
, &pdmac
->desc_pool
);
2604 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2609 switch (direction
) {
2610 case DMA_MEM_TO_DEV
:
2611 desc
->rqcfg
.src_inc
= 1;
2612 desc
->rqcfg
.dst_inc
= 0;
2614 dst
= pch
->fifo_addr
;
2616 case DMA_DEV_TO_MEM
:
2617 desc
->rqcfg
.src_inc
= 0;
2618 desc
->rqcfg
.dst_inc
= 1;
2619 src
= pch
->fifo_addr
;
2626 desc
->req
.rqtype
= direction
;
2627 desc
->rqcfg
.brst_size
= pch
->burst_sz
;
2628 desc
->rqcfg
.brst_len
= 1;
2629 fill_px(&desc
->px
, dst
, src
, period_len
);
2634 list_add_tail(&desc
->node
, &first
->node
);
2636 dma_addr
+= period_len
;
2643 desc
->txd
.flags
= flags
;
2648 static struct dma_async_tx_descriptor
*
2649 pl330_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dst
,
2650 dma_addr_t src
, size_t len
, unsigned long flags
)
2652 struct dma_pl330_desc
*desc
;
2653 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2654 struct pl330_info
*pi
;
2657 if (unlikely(!pch
|| !len
))
2660 pi
= &pch
->dmac
->pif
;
2662 desc
= __pl330_prep_dma_memcpy(pch
, dst
, src
, len
);
2666 desc
->rqcfg
.src_inc
= 1;
2667 desc
->rqcfg
.dst_inc
= 1;
2668 desc
->req
.rqtype
= DMA_MEM_TO_MEM
;
2670 /* Select max possible burst size */
2671 burst
= pi
->pcfg
.data_bus_width
/ 8;
2679 desc
->rqcfg
.brst_size
= 0;
2680 while (burst
!= (1 << desc
->rqcfg
.brst_size
))
2681 desc
->rqcfg
.brst_size
++;
2683 desc
->rqcfg
.brst_len
= get_burst_len(desc
, len
);
2685 desc
->txd
.flags
= flags
;
2690 static void __pl330_giveback_desc(struct dma_pl330_dmac
*pdmac
,
2691 struct dma_pl330_desc
*first
)
2693 unsigned long flags
;
2694 struct dma_pl330_desc
*desc
;
2699 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2701 while (!list_empty(&first
->node
)) {
2702 desc
= list_entry(first
->node
.next
,
2703 struct dma_pl330_desc
, node
);
2704 list_move_tail(&desc
->node
, &pdmac
->desc_pool
);
2707 list_move_tail(&first
->node
, &pdmac
->desc_pool
);
2709 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2712 static struct dma_async_tx_descriptor
*
2713 pl330_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
2714 unsigned int sg_len
, enum dma_transfer_direction direction
,
2715 unsigned long flg
, void *context
)
2717 struct dma_pl330_desc
*first
, *desc
= NULL
;
2718 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2719 struct scatterlist
*sg
;
2723 if (unlikely(!pch
|| !sgl
|| !sg_len
))
2726 addr
= pch
->fifo_addr
;
2730 for_each_sg(sgl
, sg
, sg_len
, i
) {
2732 desc
= pl330_get_desc(pch
);
2734 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2736 dev_err(pch
->dmac
->pif
.dev
,
2737 "%s:%d Unable to fetch desc\n",
2738 __func__
, __LINE__
);
2739 __pl330_giveback_desc(pdmac
, first
);
2747 list_add_tail(&desc
->node
, &first
->node
);
2749 if (direction
== DMA_MEM_TO_DEV
) {
2750 desc
->rqcfg
.src_inc
= 1;
2751 desc
->rqcfg
.dst_inc
= 0;
2753 addr
, sg_dma_address(sg
), sg_dma_len(sg
));
2755 desc
->rqcfg
.src_inc
= 0;
2756 desc
->rqcfg
.dst_inc
= 1;
2758 sg_dma_address(sg
), addr
, sg_dma_len(sg
));
2761 desc
->rqcfg
.brst_size
= pch
->burst_sz
;
2762 desc
->rqcfg
.brst_len
= 1;
2763 desc
->req
.rqtype
= direction
;
2766 /* Return the last desc in the chain */
2767 desc
->txd
.flags
= flg
;
2771 static irqreturn_t
pl330_irq_handler(int irq
, void *data
)
2773 if (pl330_update(data
))
2779 #define PL330_DMA_BUSWIDTHS \
2780 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2781 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2782 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2783 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2784 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2786 static int pl330_dma_device_slave_caps(struct dma_chan
*dchan
,
2787 struct dma_slave_caps
*caps
)
2789 caps
->src_addr_widths
= PL330_DMA_BUSWIDTHS
;
2790 caps
->dstn_addr_widths
= PL330_DMA_BUSWIDTHS
;
2791 caps
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
2792 caps
->cmd_pause
= false;
2793 caps
->cmd_terminate
= true;
2794 caps
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
2800 pl330_probe(struct amba_device
*adev
, const struct amba_id
*id
)
2802 struct dma_pl330_platdata
*pdat
;
2803 struct dma_pl330_dmac
*pdmac
;
2804 struct dma_pl330_chan
*pch
, *_p
;
2805 struct pl330_info
*pi
;
2806 struct dma_device
*pd
;
2807 struct resource
*res
;
2811 pdat
= dev_get_platdata(&adev
->dev
);
2813 ret
= dma_set_mask_and_coherent(&adev
->dev
, DMA_BIT_MASK(32));
2817 /* Allocate a new DMAC and its Channels */
2818 pdmac
= devm_kzalloc(&adev
->dev
, sizeof(*pdmac
), GFP_KERNEL
);
2820 dev_err(&adev
->dev
, "unable to allocate mem\n");
2825 pi
->dev
= &adev
->dev
;
2826 pi
->pl330_data
= NULL
;
2827 pi
->mcbufsz
= pdat
? pdat
->mcbuf_sz
: 0;
2830 pi
->base
= devm_ioremap_resource(&adev
->dev
, res
);
2831 if (IS_ERR(pi
->base
))
2832 return PTR_ERR(pi
->base
);
2834 amba_set_drvdata(adev
, pdmac
);
2836 for (i
= 0; i
< AMBA_NR_IRQS
; i
++) {
2839 ret
= devm_request_irq(&adev
->dev
, irq
,
2840 pl330_irq_handler
, 0,
2841 dev_name(&adev
->dev
), pi
);
2849 pi
->pcfg
.periph_id
= adev
->periphid
;
2850 ret
= pl330_add(pi
);
2854 INIT_LIST_HEAD(&pdmac
->desc_pool
);
2855 spin_lock_init(&pdmac
->pool_lock
);
2857 /* Create a descriptor pool of default size */
2858 if (!add_desc(pdmac
, GFP_KERNEL
, NR_DEFAULT_DESC
))
2859 dev_warn(&adev
->dev
, "unable to allocate desc\n");
2862 INIT_LIST_HEAD(&pd
->channels
);
2864 /* Initialize channel parameters */
2866 num_chan
= max_t(int, pdat
->nr_valid_peri
, pi
->pcfg
.num_chan
);
2868 num_chan
= max_t(int, pi
->pcfg
.num_peri
, pi
->pcfg
.num_chan
);
2870 pdmac
->num_peripherals
= num_chan
;
2872 pdmac
->peripherals
= kzalloc(num_chan
* sizeof(*pch
), GFP_KERNEL
);
2873 if (!pdmac
->peripherals
) {
2875 dev_err(&adev
->dev
, "unable to allocate pdmac->peripherals\n");
2879 for (i
= 0; i
< num_chan
; i
++) {
2880 pch
= &pdmac
->peripherals
[i
];
2881 if (!adev
->dev
.of_node
)
2882 pch
->chan
.private = pdat
? &pdat
->peri_id
[i
] : NULL
;
2884 pch
->chan
.private = adev
->dev
.of_node
;
2886 INIT_LIST_HEAD(&pch
->submitted_list
);
2887 INIT_LIST_HEAD(&pch
->work_list
);
2888 INIT_LIST_HEAD(&pch
->completed_list
);
2889 spin_lock_init(&pch
->lock
);
2890 pch
->pl330_chid
= NULL
;
2891 pch
->chan
.device
= pd
;
2894 /* Add the channel to the DMAC list */
2895 list_add_tail(&pch
->chan
.device_node
, &pd
->channels
);
2898 pd
->dev
= &adev
->dev
;
2900 pd
->cap_mask
= pdat
->cap_mask
;
2902 dma_cap_set(DMA_MEMCPY
, pd
->cap_mask
);
2903 if (pi
->pcfg
.num_peri
) {
2904 dma_cap_set(DMA_SLAVE
, pd
->cap_mask
);
2905 dma_cap_set(DMA_CYCLIC
, pd
->cap_mask
);
2906 dma_cap_set(DMA_PRIVATE
, pd
->cap_mask
);
2910 pd
->device_alloc_chan_resources
= pl330_alloc_chan_resources
;
2911 pd
->device_free_chan_resources
= pl330_free_chan_resources
;
2912 pd
->device_prep_dma_memcpy
= pl330_prep_dma_memcpy
;
2913 pd
->device_prep_dma_cyclic
= pl330_prep_dma_cyclic
;
2914 pd
->device_tx_status
= pl330_tx_status
;
2915 pd
->device_prep_slave_sg
= pl330_prep_slave_sg
;
2916 pd
->device_control
= pl330_control
;
2917 pd
->device_issue_pending
= pl330_issue_pending
;
2918 pd
->device_slave_caps
= pl330_dma_device_slave_caps
;
2920 ret
= dma_async_device_register(pd
);
2922 dev_err(&adev
->dev
, "unable to register DMAC\n");
2926 if (adev
->dev
.of_node
) {
2927 ret
= of_dma_controller_register(adev
->dev
.of_node
,
2928 of_dma_pl330_xlate
, pdmac
);
2931 "unable to register DMA to the generic DT DMA helpers\n");
2935 adev
->dev
.dma_parms
= &pdmac
->dma_parms
;
2938 * This is the limit for transfers with a buswidth of 1, larger
2939 * buswidths will have larger limits.
2941 ret
= dma_set_max_seg_size(&adev
->dev
, 1900800);
2943 dev_err(&adev
->dev
, "unable to set the seg size\n");
2946 dev_info(&adev
->dev
,
2947 "Loaded driver for PL330 DMAC-%d\n", adev
->periphid
);
2948 dev_info(&adev
->dev
,
2949 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
2950 pi
->pcfg
.data_buf_dep
,
2951 pi
->pcfg
.data_bus_width
/ 8, pi
->pcfg
.num_chan
,
2952 pi
->pcfg
.num_peri
, pi
->pcfg
.num_events
);
2957 list_for_each_entry_safe(pch
, _p
, &pdmac
->ddma
.channels
,
2960 /* Remove the channel */
2961 list_del(&pch
->chan
.device_node
);
2963 /* Flush the channel */
2964 pl330_control(&pch
->chan
, DMA_TERMINATE_ALL
, 0);
2965 pl330_free_chan_resources(&pch
->chan
);
2973 static int pl330_remove(struct amba_device
*adev
)
2975 struct dma_pl330_dmac
*pdmac
= amba_get_drvdata(adev
);
2976 struct dma_pl330_chan
*pch
, *_p
;
2977 struct pl330_info
*pi
;
2982 if (adev
->dev
.of_node
)
2983 of_dma_controller_free(adev
->dev
.of_node
);
2985 dma_async_device_unregister(&pdmac
->ddma
);
2988 list_for_each_entry_safe(pch
, _p
, &pdmac
->ddma
.channels
,
2991 /* Remove the channel */
2992 list_del(&pch
->chan
.device_node
);
2994 /* Flush the channel */
2995 pl330_control(&pch
->chan
, DMA_TERMINATE_ALL
, 0);
2996 pl330_free_chan_resources(&pch
->chan
);
3006 static struct amba_id pl330_ids
[] = {
3014 MODULE_DEVICE_TABLE(amba
, pl330_ids
);
3016 static struct amba_driver pl330_driver
= {
3018 .owner
= THIS_MODULE
,
3019 .name
= "dma-pl330",
3021 .id_table
= pl330_ids
,
3022 .probe
= pl330_probe
,
3023 .remove
= pl330_remove
,
3026 module_amba_driver(pl330_driver
);
3028 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
3029 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3030 MODULE_LICENSE("GPL");