2 * Applied Micro X-Gene SoC DMA engine Driver
4 * Copyright (c) 2015, Applied Micro Circuits Corporation
5 * Authors: Rameshwar Prasad Sahu <rsahu@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 * NOTE: PM support is currently not available.
24 #include <linux/clk.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmaengine.h>
28 #include <linux/dmapool.h>
29 #include <linux/interrupt.h>
31 #include <linux/module.h>
32 #include <linux/of_device.h>
34 #include "dmaengine.h"
36 /* X-Gene DMA ring csr registers and bit definations */
37 #define XGENE_DMA_RING_CONFIG 0x04
38 #define XGENE_DMA_RING_ENABLE BIT(31)
39 #define XGENE_DMA_RING_ID 0x08
40 #define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31))
41 #define XGENE_DMA_RING_ID_BUF 0x0C
42 #define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21))
43 #define XGENE_DMA_RING_THRESLD0_SET1 0x30
44 #define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64
45 #define XGENE_DMA_RING_THRESLD1_SET1 0x34
46 #define XGENE_DMA_RING_THRESLD1_SET1_VAL 0xC8
47 #define XGENE_DMA_RING_HYSTERESIS 0x68
48 #define XGENE_DMA_RING_HYSTERESIS_VAL 0xFFFFFFFF
49 #define XGENE_DMA_RING_STATE 0x6C
50 #define XGENE_DMA_RING_STATE_WR_BASE 0x70
51 #define XGENE_DMA_RING_NE_INT_MODE 0x017C
52 #define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \
53 ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v)))
54 #define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \
55 ((m) &= (~BIT(31 - (v))))
56 #define XGENE_DMA_RING_CLKEN 0xC208
57 #define XGENE_DMA_RING_SRST 0xC200
58 #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
59 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
60 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
61 #define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1)
62 #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
63 #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
64 #define XGENE_DMA_RING_CMD_OFFSET 0x2C
65 #define XGENE_DMA_RING_CMD_BASE_OFFSET(v) ((v) << 6)
66 #define XGENE_DMA_RING_COHERENT_SET(m) \
67 (((u32 *)(m))[2] |= BIT(4))
68 #define XGENE_DMA_RING_ADDRL_SET(m, v) \
69 (((u32 *)(m))[2] |= (((v) >> 8) << 5))
70 #define XGENE_DMA_RING_ADDRH_SET(m, v) \
71 (((u32 *)(m))[3] |= ((v) >> 35))
72 #define XGENE_DMA_RING_ACCEPTLERR_SET(m) \
73 (((u32 *)(m))[3] |= BIT(19))
74 #define XGENE_DMA_RING_SIZE_SET(m, v) \
75 (((u32 *)(m))[3] |= ((v) << 23))
76 #define XGENE_DMA_RING_RECOMBBUF_SET(m) \
77 (((u32 *)(m))[3] |= BIT(27))
78 #define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \
79 (((u32 *)(m))[3] |= (0x7 << 28))
80 #define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \
81 (((u32 *)(m))[4] |= 0x3)
82 #define XGENE_DMA_RING_SELTHRSH_SET(m) \
83 (((u32 *)(m))[4] |= BIT(3))
84 #define XGENE_DMA_RING_TYPE_SET(m, v) \
85 (((u32 *)(m))[4] |= ((v) << 19))
87 /* X-Gene DMA device csr registers and bit definitions */
88 #define XGENE_DMA_IPBRR 0x0
89 #define XGENE_DMA_DEV_ID_RD(v) ((v) & 0x00000FFF)
90 #define XGENE_DMA_BUS_ID_RD(v) (((v) >> 12) & 3)
91 #define XGENE_DMA_REV_NO_RD(v) (((v) >> 14) & 3)
92 #define XGENE_DMA_GCR 0x10
93 #define XGENE_DMA_CH_SETUP(v) \
94 ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF)
95 #define XGENE_DMA_ENABLE(v) ((v) |= BIT(31))
96 #define XGENE_DMA_DISABLE(v) ((v) &= ~BIT(31))
97 #define XGENE_DMA_RAID6_CONT 0x14
98 #define XGENE_DMA_RAID6_MULTI_CTRL(v) ((v) << 24)
99 #define XGENE_DMA_INT 0x70
100 #define XGENE_DMA_INT_MASK 0x74
101 #define XGENE_DMA_INT_ALL_MASK 0xFFFFFFFF
102 #define XGENE_DMA_INT_ALL_UNMASK 0x0
103 #define XGENE_DMA_INT_MASK_SHIFT 0x14
104 #define XGENE_DMA_RING_INT0_MASK 0x90A0
105 #define XGENE_DMA_RING_INT1_MASK 0x90A8
106 #define XGENE_DMA_RING_INT2_MASK 0x90B0
107 #define XGENE_DMA_RING_INT3_MASK 0x90B8
108 #define XGENE_DMA_RING_INT4_MASK 0x90C0
109 #define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0
110 #define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF
111 #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
112 #define XGENE_DMA_BLK_MEM_RDY 0xD074
113 #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
115 /* X-Gene SoC EFUSE csr register and bit defination */
116 #define XGENE_SOC_JTAG1_SHADOW 0x18
117 #define XGENE_DMA_PQ_DISABLE_MASK BIT(13)
119 /* X-Gene DMA Descriptor format */
120 #define XGENE_DMA_DESC_NV_BIT BIT_ULL(50)
121 #define XGENE_DMA_DESC_IN_BIT BIT_ULL(55)
122 #define XGENE_DMA_DESC_C_BIT BIT_ULL(63)
123 #define XGENE_DMA_DESC_DR_BIT BIT_ULL(61)
124 #define XGENE_DMA_DESC_ELERR_POS 46
125 #define XGENE_DMA_DESC_RTYPE_POS 56
126 #define XGENE_DMA_DESC_LERR_POS 60
127 #define XGENE_DMA_DESC_BUFLEN_POS 48
128 #define XGENE_DMA_DESC_HOENQ_NUM_POS 48
129 #define XGENE_DMA_DESC_ELERR_RD(m) \
130 (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3)
131 #define XGENE_DMA_DESC_LERR_RD(m) \
132 (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7)
133 #define XGENE_DMA_DESC_STATUS(elerr, lerr) \
134 (((elerr) << 4) | (lerr))
136 /* X-Gene DMA descriptor empty s/w signature */
137 #define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL
139 /* X-Gene DMA configurable parameters defines */
140 #define XGENE_DMA_RING_NUM 512
141 #define XGENE_DMA_BUFNUM 0x0
142 #define XGENE_DMA_CPU_BUFNUM 0x18
143 #define XGENE_DMA_RING_OWNER_DMA 0x03
144 #define XGENE_DMA_RING_OWNER_CPU 0x0F
145 #define XGENE_DMA_RING_TYPE_REGULAR 0x01
146 #define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */
147 #define XGENE_DMA_RING_NUM_CONFIG 5
148 #define XGENE_DMA_MAX_CHANNEL 4
149 #define XGENE_DMA_XOR_CHANNEL 0
150 #define XGENE_DMA_PQ_CHANNEL 1
151 #define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */
152 #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */
153 #define XGENE_DMA_MAX_XOR_SRC 5
154 #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0
155 #define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL
157 /* X-Gene DMA descriptor error codes */
158 #define ERR_DESC_AXI 0x01
159 #define ERR_BAD_DESC 0x02
160 #define ERR_READ_DATA_AXI 0x03
161 #define ERR_WRITE_DATA_AXI 0x04
162 #define ERR_FBP_TIMEOUT 0x05
164 #define ERR_DIFF_SIZE 0x08
165 #define ERR_SCT_GAT_LEN 0x09
166 #define ERR_CRC_ERR 0x11
167 #define ERR_CHKSUM 0x12
170 /* X-Gene DMA error interrupt codes */
171 #define ERR_DIF_SIZE_INT 0x0
172 #define ERR_GS_ERR_INT 0x1
173 #define ERR_FPB_TIMEO_INT 0x2
174 #define ERR_WFIFO_OVF_INT 0x3
175 #define ERR_RFIFO_OVF_INT 0x4
176 #define ERR_WR_TIMEO_INT 0x5
177 #define ERR_RD_TIMEO_INT 0x6
178 #define ERR_WR_ERR_INT 0x7
179 #define ERR_RD_ERR_INT 0x8
180 #define ERR_BAD_DESC_INT 0x9
181 #define ERR_DESC_DST_INT 0xA
182 #define ERR_DESC_SRC_INT 0xB
184 /* X-Gene DMA flyby operation code */
185 #define FLYBY_2SRC_XOR 0x80
186 #define FLYBY_3SRC_XOR 0x90
187 #define FLYBY_4SRC_XOR 0xA0
188 #define FLYBY_5SRC_XOR 0xB0
190 /* X-Gene DMA SW descriptor flags */
191 #define XGENE_DMA_FLAG_64B_DESC BIT(0)
193 /* Define to dump X-Gene DMA descriptor */
194 #define XGENE_DMA_DESC_DUMP(desc, m) \
195 print_hex_dump(KERN_ERR, (m), \
196 DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0)
198 #define to_dma_desc_sw(tx) \
199 container_of(tx, struct xgene_dma_desc_sw, tx)
200 #define to_dma_chan(dchan) \
201 container_of(dchan, struct xgene_dma_chan, dma_chan)
203 #define chan_dbg(chan, fmt, arg...) \
204 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
205 #define chan_err(chan, fmt, arg...) \
206 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
208 struct xgene_dma_desc_hw
{
215 enum xgene_dma_ring_cfgsize
{
216 XGENE_DMA_RING_CFG_SIZE_512B
,
217 XGENE_DMA_RING_CFG_SIZE_2KB
,
218 XGENE_DMA_RING_CFG_SIZE_16KB
,
219 XGENE_DMA_RING_CFG_SIZE_64KB
,
220 XGENE_DMA_RING_CFG_SIZE_512KB
,
221 XGENE_DMA_RING_CFG_SIZE_INVALID
224 struct xgene_dma_ring
{
225 struct xgene_dma
*pdma
;
235 void __iomem
*cmd_base
;
236 dma_addr_t desc_paddr
;
237 u32 state
[XGENE_DMA_RING_NUM_CONFIG
];
238 enum xgene_dma_ring_cfgsize cfgsize
;
241 struct xgene_dma_desc_hw
*desc_hw
;
245 struct xgene_dma_desc_sw
{
246 struct xgene_dma_desc_hw desc1
;
247 struct xgene_dma_desc_hw desc2
;
249 struct list_head node
;
250 struct list_head tx_list
;
251 struct dma_async_tx_descriptor tx
;
255 * struct xgene_dma_chan - internal representation of an X-Gene DMA channel
256 * @dma_chan: dmaengine channel object member
257 * @pdma: X-Gene DMA device structure reference
258 * @dev: struct device reference for dma mapping api
259 * @id: raw id of this channel
260 * @rx_irq: channel IRQ
261 * @name: name of X-Gene DMA channel
262 * @lock: serializes enqueue/dequeue operations to the descriptor pool
263 * @pending: number of transaction request pushed to DMA controller for
264 * execution, but still waiting for completion,
265 * @max_outstanding: max number of outstanding request we can push to channel
266 * @ld_pending: descriptors which are queued to run, but have not yet been
267 * submitted to the hardware for execution
268 * @ld_running: descriptors which are currently being executing by the hardware
269 * @ld_completed: descriptors which have finished execution by the hardware.
270 * These descriptors have already had their cleanup actions run. They
271 * are waiting for the ACK bit to be set by the async tx API.
272 * @desc_pool: descriptor pool for DMA operations
273 * @tasklet: bottom half where all completed descriptors cleans
274 * @tx_ring: transmit ring descriptor that we use to prepare actual
275 * descriptors for further executions
276 * @rx_ring: receive ring descriptor that we use to get completed DMA
277 * descriptors during cleanup time
279 struct xgene_dma_chan
{
280 struct dma_chan dma_chan
;
281 struct xgene_dma
*pdma
;
289 struct list_head ld_pending
;
290 struct list_head ld_running
;
291 struct list_head ld_completed
;
292 struct dma_pool
*desc_pool
;
293 struct tasklet_struct tasklet
;
294 struct xgene_dma_ring tx_ring
;
295 struct xgene_dma_ring rx_ring
;
299 * struct xgene_dma - internal representation of an X-Gene DMA device
300 * @err_irq: DMA error irq number
301 * @ring_num: start id number for DMA ring
302 * @csr_dma: base for DMA register access
303 * @csr_ring: base for DMA ring register access
304 * @csr_ring_cmd: base for DMA ring command register access
305 * @csr_efuse: base for efuse register access
306 * @dma_dev: embedded struct dma_device
307 * @chan: reference to X-Gene DMA channels
314 void __iomem
*csr_dma
;
315 void __iomem
*csr_ring
;
316 void __iomem
*csr_ring_cmd
;
317 void __iomem
*csr_efuse
;
318 struct dma_device dma_dev
[XGENE_DMA_MAX_CHANNEL
];
319 struct xgene_dma_chan chan
[XGENE_DMA_MAX_CHANNEL
];
322 static const char * const xgene_dma_desc_err
[] = {
323 [ERR_DESC_AXI
] = "AXI error when reading src/dst link list",
324 [ERR_BAD_DESC
] = "ERR or El_ERR fields not set to zero in desc",
325 [ERR_READ_DATA_AXI
] = "AXI error when reading data",
326 [ERR_WRITE_DATA_AXI
] = "AXI error when writing data",
327 [ERR_FBP_TIMEOUT
] = "Timeout on bufpool fetch",
328 [ERR_ECC
] = "ECC double bit error",
329 [ERR_DIFF_SIZE
] = "Bufpool too small to hold all the DIF result",
330 [ERR_SCT_GAT_LEN
] = "Gather and scatter data length not same",
331 [ERR_CRC_ERR
] = "CRC error",
332 [ERR_CHKSUM
] = "Checksum error",
333 [ERR_DIF
] = "DIF error",
336 static const char * const xgene_dma_err
[] = {
337 [ERR_DIF_SIZE_INT
] = "DIF size error",
338 [ERR_GS_ERR_INT
] = "Gather scatter not same size error",
339 [ERR_FPB_TIMEO_INT
] = "Free pool time out error",
340 [ERR_WFIFO_OVF_INT
] = "Write FIFO over flow error",
341 [ERR_RFIFO_OVF_INT
] = "Read FIFO over flow error",
342 [ERR_WR_TIMEO_INT
] = "Write time out error",
343 [ERR_RD_TIMEO_INT
] = "Read time out error",
344 [ERR_WR_ERR_INT
] = "HBF bus write error",
345 [ERR_RD_ERR_INT
] = "HBF bus read error",
346 [ERR_BAD_DESC_INT
] = "Ring descriptor HE0 not set error",
347 [ERR_DESC_DST_INT
] = "HFB reading dst link address error",
348 [ERR_DESC_SRC_INT
] = "HFB reading src link address error",
351 static bool is_pq_enabled(struct xgene_dma
*pdma
)
355 val
= ioread32(pdma
->csr_efuse
+ XGENE_SOC_JTAG1_SHADOW
);
356 return !(val
& XGENE_DMA_PQ_DISABLE_MASK
);
359 static u64
xgene_dma_encode_len(size_t len
)
361 return (len
< XGENE_DMA_MAX_BYTE_CNT
) ?
362 ((u64
)len
<< XGENE_DMA_DESC_BUFLEN_POS
) :
363 XGENE_DMA_16K_BUFFER_LEN_CODE
;
366 static u8
xgene_dma_encode_xor_flyby(u32 src_cnt
)
368 static u8 flyby_type
[] = {
369 FLYBY_2SRC_XOR
, /* Dummy */
370 FLYBY_2SRC_XOR
, /* Dummy */
377 return flyby_type
[src_cnt
];
380 static u32
xgene_dma_ring_desc_cnt(struct xgene_dma_ring
*ring
)
382 u32 __iomem
*cmd_base
= ring
->cmd_base
;
383 u32 ring_state
= ioread32(&cmd_base
[1]);
385 return XGENE_DMA_RING_DESC_CNT(ring_state
);
388 static void xgene_dma_set_src_buffer(__le64
*ext8
, size_t *len
,
391 size_t nbytes
= (*len
< XGENE_DMA_MAX_BYTE_CNT
) ?
392 *len
: XGENE_DMA_MAX_BYTE_CNT
;
394 *ext8
|= cpu_to_le64(*paddr
);
395 *ext8
|= cpu_to_le64(xgene_dma_encode_len(nbytes
));
400 static void xgene_dma_invalidate_buffer(__le64
*ext8
)
402 *ext8
|= cpu_to_le64(XGENE_DMA_INVALID_LEN_CODE
);
405 static __le64
*xgene_dma_lookup_ext8(struct xgene_dma_desc_hw
*desc
, int idx
)
417 pr_err("Invalid dma descriptor index\n");
423 static void xgene_dma_init_desc(struct xgene_dma_desc_hw
*desc
,
426 desc
->m0
|= cpu_to_le64(XGENE_DMA_DESC_IN_BIT
);
427 desc
->m0
|= cpu_to_le64((u64
)XGENE_DMA_RING_OWNER_DMA
<<
428 XGENE_DMA_DESC_RTYPE_POS
);
429 desc
->m1
|= cpu_to_le64(XGENE_DMA_DESC_C_BIT
);
430 desc
->m3
|= cpu_to_le64((u64
)dst_ring_num
<<
431 XGENE_DMA_DESC_HOENQ_NUM_POS
);
434 static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan
*chan
,
435 struct xgene_dma_desc_sw
*desc_sw
,
436 dma_addr_t dst
, dma_addr_t src
,
439 struct xgene_dma_desc_hw
*desc1
, *desc2
;
442 /* Get 1st descriptor */
443 desc1
= &desc_sw
->desc1
;
444 xgene_dma_init_desc(desc1
, chan
->tx_ring
.dst_ring_num
);
446 /* Set destination address */
447 desc1
->m2
|= cpu_to_le64(XGENE_DMA_DESC_DR_BIT
);
448 desc1
->m3
|= cpu_to_le64(dst
);
450 /* Set 1st source address */
451 xgene_dma_set_src_buffer(&desc1
->m1
, &len
, &src
);
457 * We need to split this source buffer,
458 * and need to use 2nd descriptor
460 desc2
= &desc_sw
->desc2
;
461 desc1
->m0
|= cpu_to_le64(XGENE_DMA_DESC_NV_BIT
);
463 /* Set 2nd to 5th source address */
464 for (i
= 0; i
< 4 && len
; i
++)
465 xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2
, i
),
468 /* Invalidate unused source address field */
470 xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2
, i
));
472 /* Updated flag that we have prepared 64B descriptor */
473 desc_sw
->flags
|= XGENE_DMA_FLAG_64B_DESC
;
476 static void xgene_dma_prep_xor_desc(struct xgene_dma_chan
*chan
,
477 struct xgene_dma_desc_sw
*desc_sw
,
478 dma_addr_t
*dst
, dma_addr_t
*src
,
479 u32 src_cnt
, size_t *nbytes
,
482 struct xgene_dma_desc_hw
*desc1
, *desc2
;
483 size_t len
= *nbytes
;
486 desc1
= &desc_sw
->desc1
;
487 desc2
= &desc_sw
->desc2
;
489 /* Initialize DMA descriptor */
490 xgene_dma_init_desc(desc1
, chan
->tx_ring
.dst_ring_num
);
492 /* Set destination address */
493 desc1
->m2
|= cpu_to_le64(XGENE_DMA_DESC_DR_BIT
);
494 desc1
->m3
|= cpu_to_le64(*dst
);
496 /* We have multiple source addresses, so need to set NV bit*/
497 desc1
->m0
|= cpu_to_le64(XGENE_DMA_DESC_NV_BIT
);
499 /* Set flyby opcode */
500 desc1
->m2
|= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt
));
502 /* Set 1st to 5th source addresses */
503 for (i
= 0; i
< src_cnt
; i
++) {
505 xgene_dma_set_src_buffer((i
== 0) ? &desc1
->m1
:
506 xgene_dma_lookup_ext8(desc2
, i
- 1),
508 desc1
->m2
|= cpu_to_le64((scf
[i
] << ((i
+ 1) * 8)));
511 /* Update meta data */
513 *dst
+= XGENE_DMA_MAX_BYTE_CNT
;
515 /* We need always 64B descriptor to perform xor or pq operations */
516 desc_sw
->flags
|= XGENE_DMA_FLAG_64B_DESC
;
519 static dma_cookie_t
xgene_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
521 struct xgene_dma_desc_sw
*desc
;
522 struct xgene_dma_chan
*chan
;
528 chan
= to_dma_chan(tx
->chan
);
529 desc
= to_dma_desc_sw(tx
);
531 spin_lock_bh(&chan
->lock
);
533 cookie
= dma_cookie_assign(tx
);
535 /* Add this transaction list onto the tail of the pending queue */
536 list_splice_tail_init(&desc
->tx_list
, &chan
->ld_pending
);
538 spin_unlock_bh(&chan
->lock
);
543 static void xgene_dma_clean_descriptor(struct xgene_dma_chan
*chan
,
544 struct xgene_dma_desc_sw
*desc
)
546 list_del(&desc
->node
);
547 chan_dbg(chan
, "LD %p free\n", desc
);
548 dma_pool_free(chan
->desc_pool
, desc
, desc
->tx
.phys
);
551 static struct xgene_dma_desc_sw
*xgene_dma_alloc_descriptor(
552 struct xgene_dma_chan
*chan
)
554 struct xgene_dma_desc_sw
*desc
;
557 desc
= dma_pool_alloc(chan
->desc_pool
, GFP_NOWAIT
, &phys
);
559 chan_err(chan
, "Failed to allocate LDs\n");
563 memset(desc
, 0, sizeof(*desc
));
565 INIT_LIST_HEAD(&desc
->tx_list
);
566 desc
->tx
.phys
= phys
;
567 desc
->tx
.tx_submit
= xgene_dma_tx_submit
;
568 dma_async_tx_descriptor_init(&desc
->tx
, &chan
->dma_chan
);
570 chan_dbg(chan
, "LD %p allocated\n", desc
);
576 * xgene_dma_clean_completed_descriptor - free all descriptors which
577 * has been completed and acked
578 * @chan: X-Gene DMA channel
580 * This function is used on all completed and acked descriptors.
582 static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan
*chan
)
584 struct xgene_dma_desc_sw
*desc
, *_desc
;
586 /* Run the callback for each descriptor, in order */
587 list_for_each_entry_safe(desc
, _desc
, &chan
->ld_completed
, node
) {
588 if (async_tx_test_ack(&desc
->tx
))
589 xgene_dma_clean_descriptor(chan
, desc
);
594 * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor
595 * @chan: X-Gene DMA channel
596 * @desc: descriptor to cleanup and free
598 * This function is used on a descriptor which has been executed by the DMA
599 * controller. It will run any callbacks, submit any dependencies.
601 static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan
*chan
,
602 struct xgene_dma_desc_sw
*desc
)
604 struct dma_async_tx_descriptor
*tx
= &desc
->tx
;
607 * If this is not the last transaction in the group,
608 * then no need to complete cookie and run any callback as
609 * this is not the tx_descriptor which had been sent to caller
610 * of this DMA request
616 dma_cookie_complete(tx
);
618 /* Run the link descriptor callback function */
620 tx
->callback(tx
->callback_param
);
622 dma_descriptor_unmap(tx
);
624 /* Run any dependencies */
625 dma_run_dependencies(tx
);
629 * xgene_dma_clean_running_descriptor - move the completed descriptor from
630 * ld_running to ld_completed
631 * @chan: X-Gene DMA channel
632 * @desc: the descriptor which is completed
634 * Free the descriptor directly if acked by async_tx api,
635 * else move it to queue ld_completed.
637 static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan
*chan
,
638 struct xgene_dma_desc_sw
*desc
)
640 /* Remove from the list of running transactions */
641 list_del(&desc
->node
);
644 * the client is allowed to attach dependent operations
647 if (!async_tx_test_ack(&desc
->tx
)) {
649 * Move this descriptor to the list of descriptors which is
650 * completed, but still awaiting the 'ack' bit to be set.
652 list_add_tail(&desc
->node
, &chan
->ld_completed
);
656 chan_dbg(chan
, "LD %p free\n", desc
);
657 dma_pool_free(chan
->desc_pool
, desc
, desc
->tx
.phys
);
660 static int xgene_chan_xfer_request(struct xgene_dma_ring
*ring
,
661 struct xgene_dma_desc_sw
*desc_sw
)
663 struct xgene_dma_desc_hw
*desc_hw
;
665 /* Check if can push more descriptor to hw for execution */
666 if (xgene_dma_ring_desc_cnt(ring
) > (ring
->slots
- 2))
669 /* Get hw descriptor from DMA tx ring */
670 desc_hw
= &ring
->desc_hw
[ring
->head
];
673 * Increment the head count to point next
674 * descriptor for next time
676 if (++ring
->head
== ring
->slots
)
679 /* Copy prepared sw descriptor data to hw descriptor */
680 memcpy(desc_hw
, &desc_sw
->desc1
, sizeof(*desc_hw
));
683 * Check if we have prepared 64B descriptor,
684 * in this case we need one more hw descriptor
686 if (desc_sw
->flags
& XGENE_DMA_FLAG_64B_DESC
) {
687 desc_hw
= &ring
->desc_hw
[ring
->head
];
689 if (++ring
->head
== ring
->slots
)
692 memcpy(desc_hw
, &desc_sw
->desc2
, sizeof(*desc_hw
));
695 /* Notify the hw that we have descriptor ready for execution */
696 iowrite32((desc_sw
->flags
& XGENE_DMA_FLAG_64B_DESC
) ?
703 * xgene_chan_xfer_ld_pending - push any pending transactions to hw
704 * @chan : X-Gene DMA channel
706 * LOCKING: must hold chan->lock
708 static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan
*chan
)
710 struct xgene_dma_desc_sw
*desc_sw
, *_desc_sw
;
714 * If the list of pending descriptors is empty, then we
715 * don't need to do any work at all
717 if (list_empty(&chan
->ld_pending
)) {
718 chan_dbg(chan
, "No pending LDs\n");
723 * Move elements from the queue of pending transactions onto the list
724 * of running transactions and push it to hw for further executions
726 list_for_each_entry_safe(desc_sw
, _desc_sw
, &chan
->ld_pending
, node
) {
728 * Check if have pushed max number of transactions to hw
729 * as capable, so let's stop here and will push remaining
730 * elements from pening ld queue after completing some
731 * descriptors that we have already pushed
733 if (chan
->pending
>= chan
->max_outstanding
)
736 ret
= xgene_chan_xfer_request(&chan
->tx_ring
, desc_sw
);
741 * Delete this element from ld pending queue and append it to
744 list_move_tail(&desc_sw
->node
, &chan
->ld_running
);
746 /* Increment the pending transaction count */
752 * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed
753 * and move them to ld_completed to free until flag 'ack' is set
754 * @chan: X-Gene DMA channel
756 * This function is used on descriptors which have been executed by the DMA
757 * controller. It will run any callbacks, submit any dependencies, then
758 * free these descriptors if flag 'ack' is set.
760 static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan
*chan
)
762 struct xgene_dma_ring
*ring
= &chan
->rx_ring
;
763 struct xgene_dma_desc_sw
*desc_sw
, *_desc_sw
;
764 struct xgene_dma_desc_hw
*desc_hw
;
767 /* Clean already completed and acked descriptors */
768 xgene_dma_clean_completed_descriptor(chan
);
770 /* Run the callback for each descriptor, in order */
771 list_for_each_entry_safe(desc_sw
, _desc_sw
, &chan
->ld_running
, node
) {
772 /* Get subsequent hw descriptor from DMA rx ring */
773 desc_hw
= &ring
->desc_hw
[ring
->head
];
775 /* Check if this descriptor has been completed */
776 if (unlikely(le64_to_cpu(desc_hw
->m0
) ==
777 XGENE_DMA_DESC_EMPTY_SIGNATURE
))
780 if (++ring
->head
== ring
->slots
)
783 /* Check if we have any error with DMA transactions */
784 status
= XGENE_DMA_DESC_STATUS(
785 XGENE_DMA_DESC_ELERR_RD(le64_to_cpu(
787 XGENE_DMA_DESC_LERR_RD(le64_to_cpu(
790 /* Print the DMA error type */
791 chan_err(chan
, "%s\n", xgene_dma_desc_err
[status
]);
794 * We have DMA transactions error here. Dump DMA Tx
795 * and Rx descriptors for this request */
796 XGENE_DMA_DESC_DUMP(&desc_sw
->desc1
,
797 "X-Gene DMA TX DESC1: ");
799 if (desc_sw
->flags
& XGENE_DMA_FLAG_64B_DESC
)
800 XGENE_DMA_DESC_DUMP(&desc_sw
->desc2
,
801 "X-Gene DMA TX DESC2: ");
803 XGENE_DMA_DESC_DUMP(desc_hw
,
804 "X-Gene DMA RX ERR DESC: ");
807 /* Notify the hw about this completed descriptor */
808 iowrite32(-1, ring
->cmd
);
810 /* Mark this hw descriptor as processed */
811 desc_hw
->m0
= cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE
);
813 xgene_dma_run_tx_complete_actions(chan
, desc_sw
);
815 xgene_dma_clean_running_descriptor(chan
, desc_sw
);
818 * Decrement the pending transaction count
819 * as we have processed one
825 * Start any pending transactions automatically
826 * In the ideal case, we keep the DMA controller busy while we go
827 * ahead and free the descriptors below.
829 xgene_chan_xfer_ld_pending(chan
);
832 static int xgene_dma_alloc_chan_resources(struct dma_chan
*dchan
)
834 struct xgene_dma_chan
*chan
= to_dma_chan(dchan
);
836 /* Has this channel already been allocated? */
840 chan
->desc_pool
= dma_pool_create(chan
->name
, chan
->dev
,
841 sizeof(struct xgene_dma_desc_sw
),
843 if (!chan
->desc_pool
) {
844 chan_err(chan
, "Failed to allocate descriptor pool\n");
848 chan_dbg(chan
, "Allocate descripto pool\n");
854 * xgene_dma_free_desc_list - Free all descriptors in a queue
855 * @chan: X-Gene DMA channel
856 * @list: the list to free
858 * LOCKING: must hold chan->lock
860 static void xgene_dma_free_desc_list(struct xgene_dma_chan
*chan
,
861 struct list_head
*list
)
863 struct xgene_dma_desc_sw
*desc
, *_desc
;
865 list_for_each_entry_safe(desc
, _desc
, list
, node
)
866 xgene_dma_clean_descriptor(chan
, desc
);
869 static void xgene_dma_free_chan_resources(struct dma_chan
*dchan
)
871 struct xgene_dma_chan
*chan
= to_dma_chan(dchan
);
873 chan_dbg(chan
, "Free all resources\n");
875 if (!chan
->desc_pool
)
878 spin_lock_bh(&chan
->lock
);
880 /* Process all running descriptor */
881 xgene_dma_cleanup_descriptors(chan
);
883 /* Clean all link descriptor queues */
884 xgene_dma_free_desc_list(chan
, &chan
->ld_pending
);
885 xgene_dma_free_desc_list(chan
, &chan
->ld_running
);
886 xgene_dma_free_desc_list(chan
, &chan
->ld_completed
);
888 spin_unlock_bh(&chan
->lock
);
890 /* Delete this channel DMA pool */
891 dma_pool_destroy(chan
->desc_pool
);
892 chan
->desc_pool
= NULL
;
895 static struct dma_async_tx_descriptor
*xgene_dma_prep_memcpy(
896 struct dma_chan
*dchan
, dma_addr_t dst
, dma_addr_t src
,
897 size_t len
, unsigned long flags
)
899 struct xgene_dma_desc_sw
*first
= NULL
, *new;
900 struct xgene_dma_chan
*chan
;
903 if (unlikely(!dchan
|| !len
))
906 chan
= to_dma_chan(dchan
);
909 /* Allocate the link descriptor from DMA pool */
910 new = xgene_dma_alloc_descriptor(chan
);
914 /* Create the largest transaction possible */
915 copy
= min_t(size_t, len
, XGENE_DMA_MAX_64B_DESC_BYTE_CNT
);
917 /* Prepare DMA descriptor */
918 xgene_dma_prep_cpy_desc(chan
, new, dst
, src
, copy
);
924 async_tx_ack(&new->tx
);
926 /* Update metadata */
931 /* Insert the link descriptor to the LD ring */
932 list_add_tail(&new->node
, &first
->tx_list
);
935 new->tx
.flags
= flags
; /* client is in control of this ack */
936 new->tx
.cookie
= -EBUSY
;
937 list_splice(&first
->tx_list
, &new->tx_list
);
945 xgene_dma_free_desc_list(chan
, &first
->tx_list
);
949 static struct dma_async_tx_descriptor
*xgene_dma_prep_sg(
950 struct dma_chan
*dchan
, struct scatterlist
*dst_sg
,
951 u32 dst_nents
, struct scatterlist
*src_sg
,
952 u32 src_nents
, unsigned long flags
)
954 struct xgene_dma_desc_sw
*first
= NULL
, *new = NULL
;
955 struct xgene_dma_chan
*chan
;
956 size_t dst_avail
, src_avail
;
960 if (unlikely(!dchan
))
963 if (unlikely(!dst_nents
|| !src_nents
))
966 if (unlikely(!dst_sg
|| !src_sg
))
969 chan
= to_dma_chan(dchan
);
971 /* Get prepared for the loop */
972 dst_avail
= sg_dma_len(dst_sg
);
973 src_avail
= sg_dma_len(src_sg
);
977 /* Run until we are out of scatterlist entries */
979 /* Create the largest transaction possible */
980 len
= min_t(size_t, src_avail
, dst_avail
);
981 len
= min_t(size_t, len
, XGENE_DMA_MAX_64B_DESC_BYTE_CNT
);
985 dst
= sg_dma_address(dst_sg
) + sg_dma_len(dst_sg
) - dst_avail
;
986 src
= sg_dma_address(src_sg
) + sg_dma_len(src_sg
) - src_avail
;
988 /* Allocate the link descriptor from DMA pool */
989 new = xgene_dma_alloc_descriptor(chan
);
993 /* Prepare DMA descriptor */
994 xgene_dma_prep_cpy_desc(chan
, new, dst
, src
, len
);
1000 async_tx_ack(&new->tx
);
1002 /* update metadata */
1006 /* Insert the link descriptor to the LD ring */
1007 list_add_tail(&new->node
, &first
->tx_list
);
1010 /* fetch the next dst scatterlist entry */
1011 if (dst_avail
== 0) {
1012 /* no more entries: we're done */
1016 /* fetch the next entry: if there are no more: done */
1017 dst_sg
= sg_next(dst_sg
);
1022 dst_avail
= sg_dma_len(dst_sg
);
1025 /* fetch the next src scatterlist entry */
1026 if (src_avail
== 0) {
1027 /* no more entries: we're done */
1031 /* fetch the next entry: if there are no more: done */
1032 src_sg
= sg_next(src_sg
);
1037 src_avail
= sg_dma_len(src_sg
);
1044 new->tx
.flags
= flags
; /* client is in control of this ack */
1045 new->tx
.cookie
= -EBUSY
;
1046 list_splice(&first
->tx_list
, &new->tx_list
);
1053 xgene_dma_free_desc_list(chan
, &first
->tx_list
);
1057 static struct dma_async_tx_descriptor
*xgene_dma_prep_xor(
1058 struct dma_chan
*dchan
, dma_addr_t dst
, dma_addr_t
*src
,
1059 u32 src_cnt
, size_t len
, unsigned long flags
)
1061 struct xgene_dma_desc_sw
*first
= NULL
, *new;
1062 struct xgene_dma_chan
*chan
;
1063 static u8 multi
[XGENE_DMA_MAX_XOR_SRC
] = {
1064 0x01, 0x01, 0x01, 0x01, 0x01};
1066 if (unlikely(!dchan
|| !len
))
1069 chan
= to_dma_chan(dchan
);
1072 /* Allocate the link descriptor from DMA pool */
1073 new = xgene_dma_alloc_descriptor(chan
);
1077 /* Prepare xor DMA descriptor */
1078 xgene_dma_prep_xor_desc(chan
, new, &dst
, src
,
1079 src_cnt
, &len
, multi
);
1085 async_tx_ack(&new->tx
);
1087 /* Insert the link descriptor to the LD ring */
1088 list_add_tail(&new->node
, &first
->tx_list
);
1091 new->tx
.flags
= flags
; /* client is in control of this ack */
1092 new->tx
.cookie
= -EBUSY
;
1093 list_splice(&first
->tx_list
, &new->tx_list
);
1101 xgene_dma_free_desc_list(chan
, &first
->tx_list
);
1105 static struct dma_async_tx_descriptor
*xgene_dma_prep_pq(
1106 struct dma_chan
*dchan
, dma_addr_t
*dst
, dma_addr_t
*src
,
1107 u32 src_cnt
, const u8
*scf
, size_t len
, unsigned long flags
)
1109 struct xgene_dma_desc_sw
*first
= NULL
, *new;
1110 struct xgene_dma_chan
*chan
;
1112 dma_addr_t _src
[XGENE_DMA_MAX_XOR_SRC
];
1113 static u8 multi
[XGENE_DMA_MAX_XOR_SRC
] = {0x01, 0x01, 0x01, 0x01, 0x01};
1115 if (unlikely(!dchan
|| !len
))
1118 chan
= to_dma_chan(dchan
);
1121 * Save source addresses on local variable, may be we have to
1122 * prepare two descriptor to generate P and Q if both enabled
1123 * in the flags by client
1125 memcpy(_src
, src
, sizeof(*src
) * src_cnt
);
1127 if (flags
& DMA_PREP_PQ_DISABLE_P
)
1130 if (flags
& DMA_PREP_PQ_DISABLE_Q
)
1134 /* Allocate the link descriptor from DMA pool */
1135 new = xgene_dma_alloc_descriptor(chan
);
1143 async_tx_ack(&new->tx
);
1145 /* Insert the link descriptor to the LD ring */
1146 list_add_tail(&new->node
, &first
->tx_list
);
1149 * Prepare DMA descriptor to generate P,
1150 * if DMA_PREP_PQ_DISABLE_P flag is not set
1153 xgene_dma_prep_xor_desc(chan
, new, &dst
[0], src
,
1154 src_cnt
, &len
, multi
);
1159 * Prepare DMA descriptor to generate Q,
1160 * if DMA_PREP_PQ_DISABLE_Q flag is not set
1163 xgene_dma_prep_xor_desc(chan
, new, &dst
[1], _src
,
1164 src_cnt
, &_len
, scf
);
1166 } while (len
|| _len
);
1168 new->tx
.flags
= flags
; /* client is in control of this ack */
1169 new->tx
.cookie
= -EBUSY
;
1170 list_splice(&first
->tx_list
, &new->tx_list
);
1178 xgene_dma_free_desc_list(chan
, &first
->tx_list
);
1182 static void xgene_dma_issue_pending(struct dma_chan
*dchan
)
1184 struct xgene_dma_chan
*chan
= to_dma_chan(dchan
);
1186 spin_lock_bh(&chan
->lock
);
1187 xgene_chan_xfer_ld_pending(chan
);
1188 spin_unlock_bh(&chan
->lock
);
1191 static enum dma_status
xgene_dma_tx_status(struct dma_chan
*dchan
,
1192 dma_cookie_t cookie
,
1193 struct dma_tx_state
*txstate
)
1195 return dma_cookie_status(dchan
, cookie
, txstate
);
1198 static void xgene_dma_tasklet_cb(unsigned long data
)
1200 struct xgene_dma_chan
*chan
= (struct xgene_dma_chan
*)data
;
1202 spin_lock_bh(&chan
->lock
);
1204 /* Run all cleanup for descriptors which have been completed */
1205 xgene_dma_cleanup_descriptors(chan
);
1207 /* Re-enable DMA channel IRQ */
1208 enable_irq(chan
->rx_irq
);
1210 spin_unlock_bh(&chan
->lock
);
1213 static irqreturn_t
xgene_dma_chan_ring_isr(int irq
, void *id
)
1215 struct xgene_dma_chan
*chan
= (struct xgene_dma_chan
*)id
;
1220 * Disable DMA channel IRQ until we process completed
1223 disable_irq_nosync(chan
->rx_irq
);
1226 * Schedule the tasklet to handle all cleanup of the current
1227 * transaction. It will start a new transaction if there is
1230 tasklet_schedule(&chan
->tasklet
);
1235 static irqreturn_t
xgene_dma_err_isr(int irq
, void *id
)
1237 struct xgene_dma
*pdma
= (struct xgene_dma
*)id
;
1238 unsigned long int_mask
;
1241 val
= ioread32(pdma
->csr_dma
+ XGENE_DMA_INT
);
1243 /* Clear DMA interrupts */
1244 iowrite32(val
, pdma
->csr_dma
+ XGENE_DMA_INT
);
1246 /* Print DMA error info */
1247 int_mask
= val
>> XGENE_DMA_INT_MASK_SHIFT
;
1248 for_each_set_bit(i
, &int_mask
, ARRAY_SIZE(xgene_dma_err
))
1250 "Interrupt status 0x%08X %s\n", val
, xgene_dma_err
[i
]);
1255 static void xgene_dma_wr_ring_state(struct xgene_dma_ring
*ring
)
1259 iowrite32(ring
->num
, ring
->pdma
->csr_ring
+ XGENE_DMA_RING_STATE
);
1261 for (i
= 0; i
< XGENE_DMA_RING_NUM_CONFIG
; i
++)
1262 iowrite32(ring
->state
[i
], ring
->pdma
->csr_ring
+
1263 XGENE_DMA_RING_STATE_WR_BASE
+ (i
* 4));
1266 static void xgene_dma_clr_ring_state(struct xgene_dma_ring
*ring
)
1268 memset(ring
->state
, 0, sizeof(u32
) * XGENE_DMA_RING_NUM_CONFIG
);
1269 xgene_dma_wr_ring_state(ring
);
1272 static void xgene_dma_setup_ring(struct xgene_dma_ring
*ring
)
1274 void *ring_cfg
= ring
->state
;
1275 u64 addr
= ring
->desc_paddr
;
1278 ring
->slots
= ring
->size
/ XGENE_DMA_RING_WQ_DESC_SIZE
;
1280 /* Clear DMA ring state */
1281 xgene_dma_clr_ring_state(ring
);
1283 /* Set DMA ring type */
1284 XGENE_DMA_RING_TYPE_SET(ring_cfg
, XGENE_DMA_RING_TYPE_REGULAR
);
1286 if (ring
->owner
== XGENE_DMA_RING_OWNER_DMA
) {
1287 /* Set recombination buffer and timeout */
1288 XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg
);
1289 XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg
);
1290 XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg
);
1293 /* Initialize DMA ring state */
1294 XGENE_DMA_RING_SELTHRSH_SET(ring_cfg
);
1295 XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg
);
1296 XGENE_DMA_RING_COHERENT_SET(ring_cfg
);
1297 XGENE_DMA_RING_ADDRL_SET(ring_cfg
, addr
);
1298 XGENE_DMA_RING_ADDRH_SET(ring_cfg
, addr
);
1299 XGENE_DMA_RING_SIZE_SET(ring_cfg
, ring
->cfgsize
);
1301 /* Write DMA ring configurations */
1302 xgene_dma_wr_ring_state(ring
);
1304 /* Set DMA ring id */
1305 iowrite32(XGENE_DMA_RING_ID_SETUP(ring
->id
),
1306 ring
->pdma
->csr_ring
+ XGENE_DMA_RING_ID
);
1308 /* Set DMA ring buffer */
1309 iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring
->num
),
1310 ring
->pdma
->csr_ring
+ XGENE_DMA_RING_ID_BUF
);
1312 if (ring
->owner
!= XGENE_DMA_RING_OWNER_CPU
)
1315 /* Set empty signature to DMA Rx ring descriptors */
1316 for (i
= 0; i
< ring
->slots
; i
++) {
1317 struct xgene_dma_desc_hw
*desc
;
1319 desc
= &ring
->desc_hw
[i
];
1320 desc
->m0
= cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE
);
1323 /* Enable DMA Rx ring interrupt */
1324 val
= ioread32(ring
->pdma
->csr_ring
+ XGENE_DMA_RING_NE_INT_MODE
);
1325 XGENE_DMA_RING_NE_INT_MODE_SET(val
, ring
->buf_num
);
1326 iowrite32(val
, ring
->pdma
->csr_ring
+ XGENE_DMA_RING_NE_INT_MODE
);
1329 static void xgene_dma_clear_ring(struct xgene_dma_ring
*ring
)
1333 if (ring
->owner
== XGENE_DMA_RING_OWNER_CPU
) {
1334 /* Disable DMA Rx ring interrupt */
1335 val
= ioread32(ring
->pdma
->csr_ring
+
1336 XGENE_DMA_RING_NE_INT_MODE
);
1337 XGENE_DMA_RING_NE_INT_MODE_RESET(val
, ring
->buf_num
);
1338 iowrite32(val
, ring
->pdma
->csr_ring
+
1339 XGENE_DMA_RING_NE_INT_MODE
);
1342 /* Clear DMA ring state */
1343 ring_id
= XGENE_DMA_RING_ID_SETUP(ring
->id
);
1344 iowrite32(ring_id
, ring
->pdma
->csr_ring
+ XGENE_DMA_RING_ID
);
1346 iowrite32(0, ring
->pdma
->csr_ring
+ XGENE_DMA_RING_ID_BUF
);
1347 xgene_dma_clr_ring_state(ring
);
1350 static void xgene_dma_set_ring_cmd(struct xgene_dma_ring
*ring
)
1352 ring
->cmd_base
= ring
->pdma
->csr_ring_cmd
+
1353 XGENE_DMA_RING_CMD_BASE_OFFSET((ring
->num
-
1354 XGENE_DMA_RING_NUM
));
1356 ring
->cmd
= ring
->cmd_base
+ XGENE_DMA_RING_CMD_OFFSET
;
1359 static int xgene_dma_get_ring_size(struct xgene_dma_chan
*chan
,
1360 enum xgene_dma_ring_cfgsize cfgsize
)
1365 case XGENE_DMA_RING_CFG_SIZE_512B
:
1368 case XGENE_DMA_RING_CFG_SIZE_2KB
:
1371 case XGENE_DMA_RING_CFG_SIZE_16KB
:
1374 case XGENE_DMA_RING_CFG_SIZE_64KB
:
1377 case XGENE_DMA_RING_CFG_SIZE_512KB
:
1381 chan_err(chan
, "Unsupported cfg ring size %d\n", cfgsize
);
1388 static void xgene_dma_delete_ring_one(struct xgene_dma_ring
*ring
)
1390 /* Clear DMA ring configurations */
1391 xgene_dma_clear_ring(ring
);
1393 /* De-allocate DMA ring descriptor */
1394 if (ring
->desc_vaddr
) {
1395 dma_free_coherent(ring
->pdma
->dev
, ring
->size
,
1396 ring
->desc_vaddr
, ring
->desc_paddr
);
1397 ring
->desc_vaddr
= NULL
;
1401 static void xgene_dma_delete_chan_rings(struct xgene_dma_chan
*chan
)
1403 xgene_dma_delete_ring_one(&chan
->rx_ring
);
1404 xgene_dma_delete_ring_one(&chan
->tx_ring
);
1407 static int xgene_dma_create_ring_one(struct xgene_dma_chan
*chan
,
1408 struct xgene_dma_ring
*ring
,
1409 enum xgene_dma_ring_cfgsize cfgsize
)
1411 /* Setup DMA ring descriptor variables */
1412 ring
->pdma
= chan
->pdma
;
1413 ring
->cfgsize
= cfgsize
;
1414 ring
->num
= chan
->pdma
->ring_num
++;
1415 ring
->id
= XGENE_DMA_RING_ID_GET(ring
->owner
, ring
->buf_num
);
1417 ring
->size
= xgene_dma_get_ring_size(chan
, cfgsize
);
1418 if (ring
->size
<= 0)
1421 /* Allocate memory for DMA ring descriptor */
1422 ring
->desc_vaddr
= dma_zalloc_coherent(chan
->dev
, ring
->size
,
1423 &ring
->desc_paddr
, GFP_KERNEL
);
1424 if (!ring
->desc_vaddr
) {
1425 chan_err(chan
, "Failed to allocate ring desc\n");
1429 /* Configure and enable DMA ring */
1430 xgene_dma_set_ring_cmd(ring
);
1431 xgene_dma_setup_ring(ring
);
1436 static int xgene_dma_create_chan_rings(struct xgene_dma_chan
*chan
)
1438 struct xgene_dma_ring
*rx_ring
= &chan
->rx_ring
;
1439 struct xgene_dma_ring
*tx_ring
= &chan
->tx_ring
;
1442 /* Create DMA Rx ring descriptor */
1443 rx_ring
->owner
= XGENE_DMA_RING_OWNER_CPU
;
1444 rx_ring
->buf_num
= XGENE_DMA_CPU_BUFNUM
+ chan
->id
;
1446 ret
= xgene_dma_create_ring_one(chan
, rx_ring
,
1447 XGENE_DMA_RING_CFG_SIZE_64KB
);
1451 chan_dbg(chan
, "Rx ring id 0x%X num %d desc 0x%p\n",
1452 rx_ring
->id
, rx_ring
->num
, rx_ring
->desc_vaddr
);
1454 /* Create DMA Tx ring descriptor */
1455 tx_ring
->owner
= XGENE_DMA_RING_OWNER_DMA
;
1456 tx_ring
->buf_num
= XGENE_DMA_BUFNUM
+ chan
->id
;
1458 ret
= xgene_dma_create_ring_one(chan
, tx_ring
,
1459 XGENE_DMA_RING_CFG_SIZE_64KB
);
1461 xgene_dma_delete_ring_one(rx_ring
);
1465 tx_ring
->dst_ring_num
= XGENE_DMA_RING_DST_ID(rx_ring
->num
);
1468 "Tx ring id 0x%X num %d desc 0x%p\n",
1469 tx_ring
->id
, tx_ring
->num
, tx_ring
->desc_vaddr
);
1471 /* Set the max outstanding request possible to this channel */
1472 chan
->max_outstanding
= rx_ring
->slots
;
1477 static int xgene_dma_init_rings(struct xgene_dma
*pdma
)
1481 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++) {
1482 ret
= xgene_dma_create_chan_rings(&pdma
->chan
[i
]);
1484 for (j
= 0; j
< i
; j
++)
1485 xgene_dma_delete_chan_rings(&pdma
->chan
[j
]);
1493 static void xgene_dma_enable(struct xgene_dma
*pdma
)
1497 /* Configure and enable DMA engine */
1498 val
= ioread32(pdma
->csr_dma
+ XGENE_DMA_GCR
);
1499 XGENE_DMA_CH_SETUP(val
);
1500 XGENE_DMA_ENABLE(val
);
1501 iowrite32(val
, pdma
->csr_dma
+ XGENE_DMA_GCR
);
1504 static void xgene_dma_disable(struct xgene_dma
*pdma
)
1508 val
= ioread32(pdma
->csr_dma
+ XGENE_DMA_GCR
);
1509 XGENE_DMA_DISABLE(val
);
1510 iowrite32(val
, pdma
->csr_dma
+ XGENE_DMA_GCR
);
1513 static void xgene_dma_mask_interrupts(struct xgene_dma
*pdma
)
1516 * Mask DMA ring overflow, underflow and
1517 * AXI write/read error interrupts
1519 iowrite32(XGENE_DMA_INT_ALL_MASK
,
1520 pdma
->csr_dma
+ XGENE_DMA_RING_INT0_MASK
);
1521 iowrite32(XGENE_DMA_INT_ALL_MASK
,
1522 pdma
->csr_dma
+ XGENE_DMA_RING_INT1_MASK
);
1523 iowrite32(XGENE_DMA_INT_ALL_MASK
,
1524 pdma
->csr_dma
+ XGENE_DMA_RING_INT2_MASK
);
1525 iowrite32(XGENE_DMA_INT_ALL_MASK
,
1526 pdma
->csr_dma
+ XGENE_DMA_RING_INT3_MASK
);
1527 iowrite32(XGENE_DMA_INT_ALL_MASK
,
1528 pdma
->csr_dma
+ XGENE_DMA_RING_INT4_MASK
);
1530 /* Mask DMA error interrupts */
1531 iowrite32(XGENE_DMA_INT_ALL_MASK
, pdma
->csr_dma
+ XGENE_DMA_INT_MASK
);
1534 static void xgene_dma_unmask_interrupts(struct xgene_dma
*pdma
)
1537 * Unmask DMA ring overflow, underflow and
1538 * AXI write/read error interrupts
1540 iowrite32(XGENE_DMA_INT_ALL_UNMASK
,
1541 pdma
->csr_dma
+ XGENE_DMA_RING_INT0_MASK
);
1542 iowrite32(XGENE_DMA_INT_ALL_UNMASK
,
1543 pdma
->csr_dma
+ XGENE_DMA_RING_INT1_MASK
);
1544 iowrite32(XGENE_DMA_INT_ALL_UNMASK
,
1545 pdma
->csr_dma
+ XGENE_DMA_RING_INT2_MASK
);
1546 iowrite32(XGENE_DMA_INT_ALL_UNMASK
,
1547 pdma
->csr_dma
+ XGENE_DMA_RING_INT3_MASK
);
1548 iowrite32(XGENE_DMA_INT_ALL_UNMASK
,
1549 pdma
->csr_dma
+ XGENE_DMA_RING_INT4_MASK
);
1551 /* Unmask DMA error interrupts */
1552 iowrite32(XGENE_DMA_INT_ALL_UNMASK
,
1553 pdma
->csr_dma
+ XGENE_DMA_INT_MASK
);
1556 static void xgene_dma_init_hw(struct xgene_dma
*pdma
)
1560 /* Associate DMA ring to corresponding ring HW */
1561 iowrite32(XGENE_DMA_ASSOC_RING_MNGR1
,
1562 pdma
->csr_dma
+ XGENE_DMA_CFG_RING_WQ_ASSOC
);
1564 /* Configure RAID6 polynomial control setting */
1565 if (is_pq_enabled(pdma
))
1566 iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D),
1567 pdma
->csr_dma
+ XGENE_DMA_RAID6_CONT
);
1569 dev_info(pdma
->dev
, "PQ is disabled in HW\n");
1571 xgene_dma_enable(pdma
);
1572 xgene_dma_unmask_interrupts(pdma
);
1574 /* Get DMA id and version info */
1575 val
= ioread32(pdma
->csr_dma
+ XGENE_DMA_IPBRR
);
1577 /* DMA device info */
1579 "X-Gene DMA v%d.%02d.%02d driver registered %d channels",
1580 XGENE_DMA_REV_NO_RD(val
), XGENE_DMA_BUS_ID_RD(val
),
1581 XGENE_DMA_DEV_ID_RD(val
), XGENE_DMA_MAX_CHANNEL
);
1584 static int xgene_dma_init_ring_mngr(struct xgene_dma
*pdma
)
1586 if (ioread32(pdma
->csr_ring
+ XGENE_DMA_RING_CLKEN
) &&
1587 (!ioread32(pdma
->csr_ring
+ XGENE_DMA_RING_SRST
)))
1590 iowrite32(0x3, pdma
->csr_ring
+ XGENE_DMA_RING_CLKEN
);
1591 iowrite32(0x0, pdma
->csr_ring
+ XGENE_DMA_RING_SRST
);
1593 /* Bring up memory */
1594 iowrite32(0x0, pdma
->csr_ring
+ XGENE_DMA_RING_MEM_RAM_SHUTDOWN
);
1596 /* Force a barrier */
1597 ioread32(pdma
->csr_ring
+ XGENE_DMA_RING_MEM_RAM_SHUTDOWN
);
1599 /* reset may take up to 1ms */
1600 usleep_range(1000, 1100);
1602 if (ioread32(pdma
->csr_ring
+ XGENE_DMA_RING_BLK_MEM_RDY
)
1603 != XGENE_DMA_RING_BLK_MEM_RDY_VAL
) {
1605 "Failed to release ring mngr memory from shutdown\n");
1609 /* program threshold set 1 and all hysteresis */
1610 iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL
,
1611 pdma
->csr_ring
+ XGENE_DMA_RING_THRESLD0_SET1
);
1612 iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL
,
1613 pdma
->csr_ring
+ XGENE_DMA_RING_THRESLD1_SET1
);
1614 iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL
,
1615 pdma
->csr_ring
+ XGENE_DMA_RING_HYSTERESIS
);
1617 /* Enable QPcore and assign error queue */
1618 iowrite32(XGENE_DMA_RING_ENABLE
,
1619 pdma
->csr_ring
+ XGENE_DMA_RING_CONFIG
);
1624 static int xgene_dma_init_mem(struct xgene_dma
*pdma
)
1628 ret
= xgene_dma_init_ring_mngr(pdma
);
1632 /* Bring up memory */
1633 iowrite32(0x0, pdma
->csr_dma
+ XGENE_DMA_MEM_RAM_SHUTDOWN
);
1635 /* Force a barrier */
1636 ioread32(pdma
->csr_dma
+ XGENE_DMA_MEM_RAM_SHUTDOWN
);
1638 /* reset may take up to 1ms */
1639 usleep_range(1000, 1100);
1641 if (ioread32(pdma
->csr_dma
+ XGENE_DMA_BLK_MEM_RDY
)
1642 != XGENE_DMA_BLK_MEM_RDY_VAL
) {
1644 "Failed to release DMA memory from shutdown\n");
1651 static int xgene_dma_request_irqs(struct xgene_dma
*pdma
)
1653 struct xgene_dma_chan
*chan
;
1656 /* Register DMA error irq */
1657 ret
= devm_request_irq(pdma
->dev
, pdma
->err_irq
, xgene_dma_err_isr
,
1658 0, "dma_error", pdma
);
1661 "Failed to register error IRQ %d\n", pdma
->err_irq
);
1665 /* Register DMA channel rx irq */
1666 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++) {
1667 chan
= &pdma
->chan
[i
];
1668 ret
= devm_request_irq(chan
->dev
, chan
->rx_irq
,
1669 xgene_dma_chan_ring_isr
,
1670 0, chan
->name
, chan
);
1672 chan_err(chan
, "Failed to register Rx IRQ %d\n",
1674 devm_free_irq(pdma
->dev
, pdma
->err_irq
, pdma
);
1676 for (j
= 0; j
< i
; j
++) {
1677 chan
= &pdma
->chan
[i
];
1678 devm_free_irq(chan
->dev
, chan
->rx_irq
, chan
);
1688 static void xgene_dma_free_irqs(struct xgene_dma
*pdma
)
1690 struct xgene_dma_chan
*chan
;
1693 /* Free DMA device error irq */
1694 devm_free_irq(pdma
->dev
, pdma
->err_irq
, pdma
);
1696 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++) {
1697 chan
= &pdma
->chan
[i
];
1698 devm_free_irq(chan
->dev
, chan
->rx_irq
, chan
);
1702 static void xgene_dma_set_caps(struct xgene_dma_chan
*chan
,
1703 struct dma_device
*dma_dev
)
1705 /* Initialize DMA device capability mask */
1706 dma_cap_zero(dma_dev
->cap_mask
);
1708 /* Set DMA device capability */
1709 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
1710 dma_cap_set(DMA_SG
, dma_dev
->cap_mask
);
1712 /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
1713 * and channel 1 supports XOR, PQ both. First thing here is we have
1714 * mechanism in hw to enable/disable PQ/XOR supports on channel 1,
1715 * we can make sure this by reading SoC Efuse register.
1716 * Second thing, we have hw errata that if we run channel 0 and
1717 * channel 1 simultaneously with executing XOR and PQ request,
1718 * suddenly DMA engine hangs, So here we enable XOR on channel 0 only
1719 * if XOR and PQ supports on channel 1 is disabled.
1721 if ((chan
->id
== XGENE_DMA_PQ_CHANNEL
) &&
1722 is_pq_enabled(chan
->pdma
)) {
1723 dma_cap_set(DMA_PQ
, dma_dev
->cap_mask
);
1724 dma_cap_set(DMA_XOR
, dma_dev
->cap_mask
);
1725 } else if ((chan
->id
== XGENE_DMA_XOR_CHANNEL
) &&
1726 !is_pq_enabled(chan
->pdma
)) {
1727 dma_cap_set(DMA_XOR
, dma_dev
->cap_mask
);
1730 /* Set base and prep routines */
1731 dma_dev
->dev
= chan
->dev
;
1732 dma_dev
->device_alloc_chan_resources
= xgene_dma_alloc_chan_resources
;
1733 dma_dev
->device_free_chan_resources
= xgene_dma_free_chan_resources
;
1734 dma_dev
->device_issue_pending
= xgene_dma_issue_pending
;
1735 dma_dev
->device_tx_status
= xgene_dma_tx_status
;
1736 dma_dev
->device_prep_dma_memcpy
= xgene_dma_prep_memcpy
;
1737 dma_dev
->device_prep_dma_sg
= xgene_dma_prep_sg
;
1739 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1740 dma_dev
->device_prep_dma_xor
= xgene_dma_prep_xor
;
1741 dma_dev
->max_xor
= XGENE_DMA_MAX_XOR_SRC
;
1742 dma_dev
->xor_align
= DMAENGINE_ALIGN_64_BYTES
;
1745 if (dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
)) {
1746 dma_dev
->device_prep_dma_pq
= xgene_dma_prep_pq
;
1747 dma_dev
->max_pq
= XGENE_DMA_MAX_XOR_SRC
;
1748 dma_dev
->pq_align
= DMAENGINE_ALIGN_64_BYTES
;
1752 static int xgene_dma_async_register(struct xgene_dma
*pdma
, int id
)
1754 struct xgene_dma_chan
*chan
= &pdma
->chan
[id
];
1755 struct dma_device
*dma_dev
= &pdma
->dma_dev
[id
];
1758 chan
->dma_chan
.device
= dma_dev
;
1760 spin_lock_init(&chan
->lock
);
1761 INIT_LIST_HEAD(&chan
->ld_pending
);
1762 INIT_LIST_HEAD(&chan
->ld_running
);
1763 INIT_LIST_HEAD(&chan
->ld_completed
);
1764 tasklet_init(&chan
->tasklet
, xgene_dma_tasklet_cb
,
1765 (unsigned long)chan
);
1768 chan
->desc_pool
= NULL
;
1769 dma_cookie_init(&chan
->dma_chan
);
1771 /* Setup dma device capabilities and prep routines */
1772 xgene_dma_set_caps(chan
, dma_dev
);
1774 /* Initialize DMA device list head */
1775 INIT_LIST_HEAD(&dma_dev
->channels
);
1776 list_add_tail(&chan
->dma_chan
.device_node
, &dma_dev
->channels
);
1778 /* Register with Linux async DMA framework*/
1779 ret
= dma_async_device_register(dma_dev
);
1781 chan_err(chan
, "Failed to register async device %d", ret
);
1782 tasklet_kill(&chan
->tasklet
);
1787 /* DMA capability info */
1789 "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan
->dma_chan
),
1790 dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
) ? "MEMCPY " : "",
1791 dma_has_cap(DMA_SG
, dma_dev
->cap_mask
) ? "SGCPY " : "",
1792 dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ? "XOR " : "",
1793 dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
) ? "PQ " : "");
1798 static int xgene_dma_init_async(struct xgene_dma
*pdma
)
1802 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++) {
1803 ret
= xgene_dma_async_register(pdma
, i
);
1805 for (j
= 0; j
< i
; j
++) {
1806 dma_async_device_unregister(&pdma
->dma_dev
[j
]);
1807 tasklet_kill(&pdma
->chan
[j
].tasklet
);
1817 static void xgene_dma_async_unregister(struct xgene_dma
*pdma
)
1821 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++)
1822 dma_async_device_unregister(&pdma
->dma_dev
[i
]);
1825 static void xgene_dma_init_channels(struct xgene_dma
*pdma
)
1827 struct xgene_dma_chan
*chan
;
1830 pdma
->ring_num
= XGENE_DMA_RING_NUM
;
1832 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++) {
1833 chan
= &pdma
->chan
[i
];
1834 chan
->dev
= pdma
->dev
;
1837 snprintf(chan
->name
, sizeof(chan
->name
), "dmachan%d", chan
->id
);
1841 static int xgene_dma_get_resources(struct platform_device
*pdev
,
1842 struct xgene_dma
*pdma
)
1844 struct resource
*res
;
1847 /* Get DMA csr region */
1848 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1850 dev_err(&pdev
->dev
, "Failed to get csr region\n");
1854 pdma
->csr_dma
= devm_ioremap(&pdev
->dev
, res
->start
,
1855 resource_size(res
));
1856 if (!pdma
->csr_dma
) {
1857 dev_err(&pdev
->dev
, "Failed to ioremap csr region");
1861 /* Get DMA ring csr region */
1862 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1864 dev_err(&pdev
->dev
, "Failed to get ring csr region\n");
1868 pdma
->csr_ring
= devm_ioremap(&pdev
->dev
, res
->start
,
1869 resource_size(res
));
1870 if (!pdma
->csr_ring
) {
1871 dev_err(&pdev
->dev
, "Failed to ioremap ring csr region");
1875 /* Get DMA ring cmd csr region */
1876 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2);
1878 dev_err(&pdev
->dev
, "Failed to get ring cmd csr region\n");
1882 pdma
->csr_ring_cmd
= devm_ioremap(&pdev
->dev
, res
->start
,
1883 resource_size(res
));
1884 if (!pdma
->csr_ring_cmd
) {
1885 dev_err(&pdev
->dev
, "Failed to ioremap ring cmd csr region");
1889 /* Get efuse csr region */
1890 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 3);
1892 dev_err(&pdev
->dev
, "Failed to get efuse csr region\n");
1896 pdma
->csr_efuse
= devm_ioremap(&pdev
->dev
, res
->start
,
1897 resource_size(res
));
1898 if (!pdma
->csr_efuse
) {
1899 dev_err(&pdev
->dev
, "Failed to ioremap efuse csr region");
1903 /* Get DMA error interrupt */
1904 irq
= platform_get_irq(pdev
, 0);
1906 dev_err(&pdev
->dev
, "Failed to get Error IRQ\n");
1910 pdma
->err_irq
= irq
;
1912 /* Get DMA Rx ring descriptor interrupts for all DMA channels */
1913 for (i
= 1; i
<= XGENE_DMA_MAX_CHANNEL
; i
++) {
1914 irq
= platform_get_irq(pdev
, i
);
1916 dev_err(&pdev
->dev
, "Failed to get Rx IRQ\n");
1920 pdma
->chan
[i
- 1].rx_irq
= irq
;
1926 static int xgene_dma_probe(struct platform_device
*pdev
)
1928 struct xgene_dma
*pdma
;
1931 pdma
= devm_kzalloc(&pdev
->dev
, sizeof(*pdma
), GFP_KERNEL
);
1935 pdma
->dev
= &pdev
->dev
;
1936 platform_set_drvdata(pdev
, pdma
);
1938 ret
= xgene_dma_get_resources(pdev
, pdma
);
1942 pdma
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1943 if (IS_ERR(pdma
->clk
)) {
1944 dev_err(&pdev
->dev
, "Failed to get clk\n");
1945 return PTR_ERR(pdma
->clk
);
1948 /* Enable clk before accessing registers */
1949 ret
= clk_prepare_enable(pdma
->clk
);
1951 dev_err(&pdev
->dev
, "Failed to enable clk %d\n", ret
);
1955 /* Remove DMA RAM out of shutdown */
1956 ret
= xgene_dma_init_mem(pdma
);
1958 goto err_clk_enable
;
1960 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(42));
1962 dev_err(&pdev
->dev
, "No usable DMA configuration\n");
1966 /* Initialize DMA channels software state */
1967 xgene_dma_init_channels(pdma
);
1969 /* Configue DMA rings */
1970 ret
= xgene_dma_init_rings(pdma
);
1972 goto err_clk_enable
;
1974 ret
= xgene_dma_request_irqs(pdma
);
1976 goto err_request_irq
;
1978 /* Configure and enable DMA engine */
1979 xgene_dma_init_hw(pdma
);
1981 /* Register DMA device with linux async framework */
1982 ret
= xgene_dma_init_async(pdma
);
1984 goto err_async_init
;
1989 xgene_dma_free_irqs(pdma
);
1992 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++)
1993 xgene_dma_delete_chan_rings(&pdma
->chan
[i
]);
1997 clk_disable_unprepare(pdma
->clk
);
2002 static int xgene_dma_remove(struct platform_device
*pdev
)
2004 struct xgene_dma
*pdma
= platform_get_drvdata(pdev
);
2005 struct xgene_dma_chan
*chan
;
2008 xgene_dma_async_unregister(pdma
);
2010 /* Mask interrupts and disable DMA engine */
2011 xgene_dma_mask_interrupts(pdma
);
2012 xgene_dma_disable(pdma
);
2013 xgene_dma_free_irqs(pdma
);
2015 for (i
= 0; i
< XGENE_DMA_MAX_CHANNEL
; i
++) {
2016 chan
= &pdma
->chan
[i
];
2017 tasklet_kill(&chan
->tasklet
);
2018 xgene_dma_delete_chan_rings(chan
);
2021 clk_disable_unprepare(pdma
->clk
);
2026 static const struct of_device_id xgene_dma_of_match_ptr
[] = {
2027 {.compatible
= "apm,xgene-storm-dma",},
2030 MODULE_DEVICE_TABLE(of
, xgene_dma_of_match_ptr
);
2032 static struct platform_driver xgene_dma_driver
= {
2033 .probe
= xgene_dma_probe
,
2034 .remove
= xgene_dma_remove
,
2036 .name
= "X-Gene-DMA",
2037 .of_match_table
= xgene_dma_of_match_ptr
,
2041 module_platform_driver(xgene_dma_driver
);
2043 MODULE_DESCRIPTION("APM X-Gene SoC DMA driver");
2044 MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>");
2045 MODULE_AUTHOR("Loc Ho <lho@apm.com>");
2046 MODULE_LICENSE("GPL");
2047 MODULE_VERSION("1.0");