Commit | Line | Data |
---|---|---|
9f2fd0df RPS |
1 | /* |
2 | * Applied Micro X-Gene SoC DMA engine Driver | |
3 | * | |
4 | * Copyright (c) 2015, Applied Micro Circuits Corporation | |
5 | * Authors: Rameshwar Prasad Sahu <rsahu@apm.com> | |
6 | * Loc Ho <lho@apm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the | |
10 | * Free Software Foundation; either version 2 of the License, or (at your | |
11 | * option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | * | |
21 | * NOTE: PM support is currently not available. | |
22 | */ | |
23 | ||
24 | #include <linux/clk.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/dma-mapping.h> | |
27 | #include <linux/dmaengine.h> | |
28 | #include <linux/dmapool.h> | |
29 | #include <linux/interrupt.h> | |
30 | #include <linux/io.h> | |
31 | #include <linux/module.h> | |
32 | #include <linux/of_device.h> | |
33 | ||
34 | #include "dmaengine.h" | |
35 | ||
36 | /* X-Gene DMA ring csr registers and bit definations */ | |
37 | #define XGENE_DMA_RING_CONFIG 0x04 | |
38 | #define XGENE_DMA_RING_ENABLE BIT(31) | |
39 | #define XGENE_DMA_RING_ID 0x08 | |
40 | #define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31)) | |
41 | #define XGENE_DMA_RING_ID_BUF 0x0C | |
42 | #define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21)) | |
43 | #define XGENE_DMA_RING_THRESLD0_SET1 0x30 | |
44 | #define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64 | |
45 | #define XGENE_DMA_RING_THRESLD1_SET1 0x34 | |
46 | #define XGENE_DMA_RING_THRESLD1_SET1_VAL 0xC8 | |
47 | #define XGENE_DMA_RING_HYSTERESIS 0x68 | |
48 | #define XGENE_DMA_RING_HYSTERESIS_VAL 0xFFFFFFFF | |
49 | #define XGENE_DMA_RING_STATE 0x6C | |
50 | #define XGENE_DMA_RING_STATE_WR_BASE 0x70 | |
51 | #define XGENE_DMA_RING_NE_INT_MODE 0x017C | |
52 | #define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \ | |
53 | ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v))) | |
54 | #define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \ | |
55 | ((m) &= (~BIT(31 - (v)))) | |
56 | #define XGENE_DMA_RING_CLKEN 0xC208 | |
57 | #define XGENE_DMA_RING_SRST 0xC200 | |
58 | #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 | |
59 | #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 | |
60 | #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF | |
61 | #define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) | |
62 | #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) | |
63 | #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) | |
64 | #define XGENE_DMA_RING_CMD_OFFSET 0x2C | |
65 | #define XGENE_DMA_RING_CMD_BASE_OFFSET(v) ((v) << 6) | |
66 | #define XGENE_DMA_RING_COHERENT_SET(m) \ | |
67 | (((u32 *)(m))[2] |= BIT(4)) | |
68 | #define XGENE_DMA_RING_ADDRL_SET(m, v) \ | |
69 | (((u32 *)(m))[2] |= (((v) >> 8) << 5)) | |
70 | #define XGENE_DMA_RING_ADDRH_SET(m, v) \ | |
71 | (((u32 *)(m))[3] |= ((v) >> 35)) | |
72 | #define XGENE_DMA_RING_ACCEPTLERR_SET(m) \ | |
73 | (((u32 *)(m))[3] |= BIT(19)) | |
74 | #define XGENE_DMA_RING_SIZE_SET(m, v) \ | |
75 | (((u32 *)(m))[3] |= ((v) << 23)) | |
76 | #define XGENE_DMA_RING_RECOMBBUF_SET(m) \ | |
77 | (((u32 *)(m))[3] |= BIT(27)) | |
78 | #define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \ | |
79 | (((u32 *)(m))[3] |= (0x7 << 28)) | |
80 | #define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \ | |
81 | (((u32 *)(m))[4] |= 0x3) | |
82 | #define XGENE_DMA_RING_SELTHRSH_SET(m) \ | |
83 | (((u32 *)(m))[4] |= BIT(3)) | |
84 | #define XGENE_DMA_RING_TYPE_SET(m, v) \ | |
85 | (((u32 *)(m))[4] |= ((v) << 19)) | |
86 | ||
87 | /* X-Gene DMA device csr registers and bit definitions */ | |
88 | #define XGENE_DMA_IPBRR 0x0 | |
89 | #define XGENE_DMA_DEV_ID_RD(v) ((v) & 0x00000FFF) | |
90 | #define XGENE_DMA_BUS_ID_RD(v) (((v) >> 12) & 3) | |
91 | #define XGENE_DMA_REV_NO_RD(v) (((v) >> 14) & 3) | |
92 | #define XGENE_DMA_GCR 0x10 | |
93 | #define XGENE_DMA_CH_SETUP(v) \ | |
94 | ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF) | |
95 | #define XGENE_DMA_ENABLE(v) ((v) |= BIT(31)) | |
96 | #define XGENE_DMA_DISABLE(v) ((v) &= ~BIT(31)) | |
97 | #define XGENE_DMA_RAID6_CONT 0x14 | |
98 | #define XGENE_DMA_RAID6_MULTI_CTRL(v) ((v) << 24) | |
99 | #define XGENE_DMA_INT 0x70 | |
100 | #define XGENE_DMA_INT_MASK 0x74 | |
101 | #define XGENE_DMA_INT_ALL_MASK 0xFFFFFFFF | |
102 | #define XGENE_DMA_INT_ALL_UNMASK 0x0 | |
103 | #define XGENE_DMA_INT_MASK_SHIFT 0x14 | |
104 | #define XGENE_DMA_RING_INT0_MASK 0x90A0 | |
105 | #define XGENE_DMA_RING_INT1_MASK 0x90A8 | |
106 | #define XGENE_DMA_RING_INT2_MASK 0x90B0 | |
107 | #define XGENE_DMA_RING_INT3_MASK 0x90B8 | |
108 | #define XGENE_DMA_RING_INT4_MASK 0x90C0 | |
109 | #define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0 | |
110 | #define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF | |
111 | #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 | |
112 | #define XGENE_DMA_BLK_MEM_RDY 0xD074 | |
113 | #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF | |
114 | ||
115 | /* X-Gene SoC EFUSE csr register and bit defination */ | |
116 | #define XGENE_SOC_JTAG1_SHADOW 0x18 | |
117 | #define XGENE_DMA_PQ_DISABLE_MASK BIT(13) | |
118 | ||
119 | /* X-Gene DMA Descriptor format */ | |
120 | #define XGENE_DMA_DESC_NV_BIT BIT_ULL(50) | |
121 | #define XGENE_DMA_DESC_IN_BIT BIT_ULL(55) | |
122 | #define XGENE_DMA_DESC_C_BIT BIT_ULL(63) | |
123 | #define XGENE_DMA_DESC_DR_BIT BIT_ULL(61) | |
124 | #define XGENE_DMA_DESC_ELERR_POS 46 | |
125 | #define XGENE_DMA_DESC_RTYPE_POS 56 | |
126 | #define XGENE_DMA_DESC_LERR_POS 60 | |
127 | #define XGENE_DMA_DESC_FLYBY_POS 4 | |
128 | #define XGENE_DMA_DESC_BUFLEN_POS 48 | |
129 | #define XGENE_DMA_DESC_HOENQ_NUM_POS 48 | |
130 | ||
131 | #define XGENE_DMA_DESC_NV_SET(m) \ | |
132 | (((u64 *)(m))[0] |= XGENE_DMA_DESC_NV_BIT) | |
133 | #define XGENE_DMA_DESC_IN_SET(m) \ | |
134 | (((u64 *)(m))[0] |= XGENE_DMA_DESC_IN_BIT) | |
135 | #define XGENE_DMA_DESC_RTYPE_SET(m, v) \ | |
136 | (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_RTYPE_POS)) | |
137 | #define XGENE_DMA_DESC_BUFADDR_SET(m, v) \ | |
138 | (((u64 *)(m))[0] |= (v)) | |
139 | #define XGENE_DMA_DESC_BUFLEN_SET(m, v) \ | |
140 | (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_BUFLEN_POS)) | |
141 | #define XGENE_DMA_DESC_C_SET(m) \ | |
142 | (((u64 *)(m))[1] |= XGENE_DMA_DESC_C_BIT) | |
143 | #define XGENE_DMA_DESC_FLYBY_SET(m, v) \ | |
144 | (((u64 *)(m))[2] |= ((v) << XGENE_DMA_DESC_FLYBY_POS)) | |
145 | #define XGENE_DMA_DESC_MULTI_SET(m, v, i) \ | |
146 | (((u64 *)(m))[2] |= ((u64)(v) << (((i) + 1) * 8))) | |
147 | #define XGENE_DMA_DESC_DR_SET(m) \ | |
148 | (((u64 *)(m))[2] |= XGENE_DMA_DESC_DR_BIT) | |
149 | #define XGENE_DMA_DESC_DST_ADDR_SET(m, v) \ | |
150 | (((u64 *)(m))[3] |= (v)) | |
151 | #define XGENE_DMA_DESC_H0ENQ_NUM_SET(m, v) \ | |
152 | (((u64 *)(m))[3] |= ((u64)(v) << XGENE_DMA_DESC_HOENQ_NUM_POS)) | |
153 | #define XGENE_DMA_DESC_ELERR_RD(m) \ | |
154 | (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3) | |
155 | #define XGENE_DMA_DESC_LERR_RD(m) \ | |
156 | (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7) | |
157 | #define XGENE_DMA_DESC_STATUS(elerr, lerr) \ | |
158 | (((elerr) << 4) | (lerr)) | |
159 | ||
160 | /* X-Gene DMA descriptor empty s/w signature */ | |
161 | #define XGENE_DMA_DESC_EMPTY_INDEX 0 | |
162 | #define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL | |
163 | #define XGENE_DMA_DESC_SET_EMPTY(m) \ | |
164 | (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] = \ | |
165 | XGENE_DMA_DESC_EMPTY_SIGNATURE) | |
166 | #define XGENE_DMA_DESC_IS_EMPTY(m) \ | |
167 | (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] == \ | |
168 | XGENE_DMA_DESC_EMPTY_SIGNATURE) | |
169 | ||
170 | /* X-Gene DMA configurable parameters defines */ | |
171 | #define XGENE_DMA_RING_NUM 512 | |
172 | #define XGENE_DMA_BUFNUM 0x0 | |
173 | #define XGENE_DMA_CPU_BUFNUM 0x18 | |
174 | #define XGENE_DMA_RING_OWNER_DMA 0x03 | |
175 | #define XGENE_DMA_RING_OWNER_CPU 0x0F | |
176 | #define XGENE_DMA_RING_TYPE_REGULAR 0x01 | |
177 | #define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */ | |
178 | #define XGENE_DMA_RING_NUM_CONFIG 5 | |
179 | #define XGENE_DMA_MAX_CHANNEL 4 | |
180 | #define XGENE_DMA_XOR_CHANNEL 0 | |
181 | #define XGENE_DMA_PQ_CHANNEL 1 | |
182 | #define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */ | |
183 | #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */ | |
184 | #define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */ | |
185 | #define XGENE_DMA_MAX_XOR_SRC 5 | |
186 | #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0 | |
187 | #define XGENE_DMA_INVALID_LEN_CODE 0x7800 | |
188 | ||
189 | /* X-Gene DMA descriptor error codes */ | |
190 | #define ERR_DESC_AXI 0x01 | |
191 | #define ERR_BAD_DESC 0x02 | |
192 | #define ERR_READ_DATA_AXI 0x03 | |
193 | #define ERR_WRITE_DATA_AXI 0x04 | |
194 | #define ERR_FBP_TIMEOUT 0x05 | |
195 | #define ERR_ECC 0x06 | |
196 | #define ERR_DIFF_SIZE 0x08 | |
197 | #define ERR_SCT_GAT_LEN 0x09 | |
198 | #define ERR_CRC_ERR 0x11 | |
199 | #define ERR_CHKSUM 0x12 | |
200 | #define ERR_DIF 0x13 | |
201 | ||
202 | /* X-Gene DMA error interrupt codes */ | |
203 | #define ERR_DIF_SIZE_INT 0x0 | |
204 | #define ERR_GS_ERR_INT 0x1 | |
205 | #define ERR_FPB_TIMEO_INT 0x2 | |
206 | #define ERR_WFIFO_OVF_INT 0x3 | |
207 | #define ERR_RFIFO_OVF_INT 0x4 | |
208 | #define ERR_WR_TIMEO_INT 0x5 | |
209 | #define ERR_RD_TIMEO_INT 0x6 | |
210 | #define ERR_WR_ERR_INT 0x7 | |
211 | #define ERR_RD_ERR_INT 0x8 | |
212 | #define ERR_BAD_DESC_INT 0x9 | |
213 | #define ERR_DESC_DST_INT 0xA | |
214 | #define ERR_DESC_SRC_INT 0xB | |
215 | ||
216 | /* X-Gene DMA flyby operation code */ | |
217 | #define FLYBY_2SRC_XOR 0x8 | |
218 | #define FLYBY_3SRC_XOR 0x9 | |
219 | #define FLYBY_4SRC_XOR 0xA | |
220 | #define FLYBY_5SRC_XOR 0xB | |
221 | ||
222 | /* X-Gene DMA SW descriptor flags */ | |
223 | #define XGENE_DMA_FLAG_64B_DESC BIT(0) | |
224 | ||
225 | /* Define to dump X-Gene DMA descriptor */ | |
226 | #define XGENE_DMA_DESC_DUMP(desc, m) \ | |
227 | print_hex_dump(KERN_ERR, (m), \ | |
228 | DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0) | |
229 | ||
230 | #define to_dma_desc_sw(tx) \ | |
231 | container_of(tx, struct xgene_dma_desc_sw, tx) | |
232 | #define to_dma_chan(dchan) \ | |
233 | container_of(dchan, struct xgene_dma_chan, dma_chan) | |
234 | ||
235 | #define chan_dbg(chan, fmt, arg...) \ | |
236 | dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) | |
237 | #define chan_err(chan, fmt, arg...) \ | |
238 | dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) | |
239 | ||
240 | struct xgene_dma_desc_hw { | |
241 | u64 m0; | |
242 | u64 m1; | |
243 | u64 m2; | |
244 | u64 m3; | |
245 | }; | |
246 | ||
247 | enum xgene_dma_ring_cfgsize { | |
248 | XGENE_DMA_RING_CFG_SIZE_512B, | |
249 | XGENE_DMA_RING_CFG_SIZE_2KB, | |
250 | XGENE_DMA_RING_CFG_SIZE_16KB, | |
251 | XGENE_DMA_RING_CFG_SIZE_64KB, | |
252 | XGENE_DMA_RING_CFG_SIZE_512KB, | |
253 | XGENE_DMA_RING_CFG_SIZE_INVALID | |
254 | }; | |
255 | ||
256 | struct xgene_dma_ring { | |
257 | struct xgene_dma *pdma; | |
258 | u8 buf_num; | |
259 | u16 id; | |
260 | u16 num; | |
261 | u16 head; | |
262 | u16 owner; | |
263 | u16 slots; | |
264 | u16 dst_ring_num; | |
265 | u32 size; | |
266 | void __iomem *cmd; | |
267 | void __iomem *cmd_base; | |
268 | dma_addr_t desc_paddr; | |
269 | u32 state[XGENE_DMA_RING_NUM_CONFIG]; | |
270 | enum xgene_dma_ring_cfgsize cfgsize; | |
271 | union { | |
272 | void *desc_vaddr; | |
273 | struct xgene_dma_desc_hw *desc_hw; | |
274 | }; | |
275 | }; | |
276 | ||
277 | struct xgene_dma_desc_sw { | |
278 | struct xgene_dma_desc_hw desc1; | |
279 | struct xgene_dma_desc_hw desc2; | |
280 | u32 flags; | |
281 | struct list_head node; | |
282 | struct list_head tx_list; | |
283 | struct dma_async_tx_descriptor tx; | |
284 | }; | |
285 | ||
286 | /** | |
287 | * struct xgene_dma_chan - internal representation of an X-Gene DMA channel | |
288 | * @dma_chan: dmaengine channel object member | |
289 | * @pdma: X-Gene DMA device structure reference | |
290 | * @dev: struct device reference for dma mapping api | |
291 | * @id: raw id of this channel | |
292 | * @rx_irq: channel IRQ | |
293 | * @name: name of X-Gene DMA channel | |
294 | * @lock: serializes enqueue/dequeue operations to the descriptor pool | |
295 | * @pending: number of transaction request pushed to DMA controller for | |
296 | * execution, but still waiting for completion, | |
297 | * @max_outstanding: max number of outstanding request we can push to channel | |
298 | * @ld_pending: descriptors which are queued to run, but have not yet been | |
299 | * submitted to the hardware for execution | |
300 | * @ld_running: descriptors which are currently being executing by the hardware | |
301 | * @ld_completed: descriptors which have finished execution by the hardware. | |
302 | * These descriptors have already had their cleanup actions run. They | |
303 | * are waiting for the ACK bit to be set by the async tx API. | |
304 | * @desc_pool: descriptor pool for DMA operations | |
305 | * @tasklet: bottom half where all completed descriptors cleans | |
306 | * @tx_ring: transmit ring descriptor that we use to prepare actual | |
307 | * descriptors for further executions | |
308 | * @rx_ring: receive ring descriptor that we use to get completed DMA | |
309 | * descriptors during cleanup time | |
310 | */ | |
311 | struct xgene_dma_chan { | |
312 | struct dma_chan dma_chan; | |
313 | struct xgene_dma *pdma; | |
314 | struct device *dev; | |
315 | int id; | |
316 | int rx_irq; | |
317 | char name[8]; | |
318 | spinlock_t lock; | |
319 | int pending; | |
320 | int max_outstanding; | |
321 | struct list_head ld_pending; | |
322 | struct list_head ld_running; | |
323 | struct list_head ld_completed; | |
324 | struct dma_pool *desc_pool; | |
325 | struct tasklet_struct tasklet; | |
326 | struct xgene_dma_ring tx_ring; | |
327 | struct xgene_dma_ring rx_ring; | |
328 | }; | |
329 | ||
330 | /** | |
331 | * struct xgene_dma - internal representation of an X-Gene DMA device | |
332 | * @err_irq: DMA error irq number | |
333 | * @ring_num: start id number for DMA ring | |
334 | * @csr_dma: base for DMA register access | |
335 | * @csr_ring: base for DMA ring register access | |
336 | * @csr_ring_cmd: base for DMA ring command register access | |
337 | * @csr_efuse: base for efuse register access | |
338 | * @dma_dev: embedded struct dma_device | |
339 | * @chan: reference to X-Gene DMA channels | |
340 | */ | |
341 | struct xgene_dma { | |
342 | struct device *dev; | |
343 | struct clk *clk; | |
344 | int err_irq; | |
345 | int ring_num; | |
346 | void __iomem *csr_dma; | |
347 | void __iomem *csr_ring; | |
348 | void __iomem *csr_ring_cmd; | |
349 | void __iomem *csr_efuse; | |
350 | struct dma_device dma_dev[XGENE_DMA_MAX_CHANNEL]; | |
351 | struct xgene_dma_chan chan[XGENE_DMA_MAX_CHANNEL]; | |
352 | }; | |
353 | ||
354 | static const char * const xgene_dma_desc_err[] = { | |
355 | [ERR_DESC_AXI] = "AXI error when reading src/dst link list", | |
356 | [ERR_BAD_DESC] = "ERR or El_ERR fields not set to zero in desc", | |
357 | [ERR_READ_DATA_AXI] = "AXI error when reading data", | |
358 | [ERR_WRITE_DATA_AXI] = "AXI error when writing data", | |
359 | [ERR_FBP_TIMEOUT] = "Timeout on bufpool fetch", | |
360 | [ERR_ECC] = "ECC double bit error", | |
361 | [ERR_DIFF_SIZE] = "Bufpool too small to hold all the DIF result", | |
362 | [ERR_SCT_GAT_LEN] = "Gather and scatter data length not same", | |
363 | [ERR_CRC_ERR] = "CRC error", | |
364 | [ERR_CHKSUM] = "Checksum error", | |
365 | [ERR_DIF] = "DIF error", | |
366 | }; | |
367 | ||
368 | static const char * const xgene_dma_err[] = { | |
369 | [ERR_DIF_SIZE_INT] = "DIF size error", | |
370 | [ERR_GS_ERR_INT] = "Gather scatter not same size error", | |
371 | [ERR_FPB_TIMEO_INT] = "Free pool time out error", | |
372 | [ERR_WFIFO_OVF_INT] = "Write FIFO over flow error", | |
373 | [ERR_RFIFO_OVF_INT] = "Read FIFO over flow error", | |
374 | [ERR_WR_TIMEO_INT] = "Write time out error", | |
375 | [ERR_RD_TIMEO_INT] = "Read time out error", | |
376 | [ERR_WR_ERR_INT] = "HBF bus write error", | |
377 | [ERR_RD_ERR_INT] = "HBF bus read error", | |
378 | [ERR_BAD_DESC_INT] = "Ring descriptor HE0 not set error", | |
379 | [ERR_DESC_DST_INT] = "HFB reading dst link address error", | |
380 | [ERR_DESC_SRC_INT] = "HFB reading src link address error", | |
381 | }; | |
382 | ||
383 | static bool is_pq_enabled(struct xgene_dma *pdma) | |
384 | { | |
385 | u32 val; | |
386 | ||
387 | val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW); | |
388 | return !(val & XGENE_DMA_PQ_DISABLE_MASK); | |
389 | } | |
390 | ||
391 | static void xgene_dma_cpu_to_le64(u64 *desc, int count) | |
392 | { | |
393 | int i; | |
394 | ||
395 | for (i = 0; i < count; i++) | |
396 | desc[i] = cpu_to_le64(desc[i]); | |
397 | } | |
398 | ||
399 | static u16 xgene_dma_encode_len(u32 len) | |
400 | { | |
401 | return (len < XGENE_DMA_MAX_BYTE_CNT) ? | |
402 | len : XGENE_DMA_16K_BUFFER_LEN_CODE; | |
403 | } | |
404 | ||
405 | static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) | |
406 | { | |
407 | static u8 flyby_type[] = { | |
408 | FLYBY_2SRC_XOR, /* Dummy */ | |
409 | FLYBY_2SRC_XOR, /* Dummy */ | |
410 | FLYBY_2SRC_XOR, | |
411 | FLYBY_3SRC_XOR, | |
412 | FLYBY_4SRC_XOR, | |
413 | FLYBY_5SRC_XOR | |
414 | }; | |
415 | ||
416 | return flyby_type[src_cnt]; | |
417 | } | |
418 | ||
419 | static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) | |
420 | { | |
421 | u32 __iomem *cmd_base = ring->cmd_base; | |
422 | u32 ring_state = ioread32(&cmd_base[1]); | |
423 | ||
424 | return XGENE_DMA_RING_DESC_CNT(ring_state); | |
425 | } | |
426 | ||
427 | static void xgene_dma_set_src_buffer(void *ext8, size_t *len, | |
428 | dma_addr_t *paddr) | |
429 | { | |
430 | size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ? | |
431 | *len : XGENE_DMA_MAX_BYTE_CNT; | |
432 | ||
433 | XGENE_DMA_DESC_BUFADDR_SET(ext8, *paddr); | |
434 | XGENE_DMA_DESC_BUFLEN_SET(ext8, xgene_dma_encode_len(nbytes)); | |
435 | *len -= nbytes; | |
436 | *paddr += nbytes; | |
437 | } | |
438 | ||
439 | static void xgene_dma_invalidate_buffer(void *ext8) | |
440 | { | |
441 | XGENE_DMA_DESC_BUFLEN_SET(ext8, XGENE_DMA_INVALID_LEN_CODE); | |
442 | } | |
443 | ||
444 | static void *xgene_dma_lookup_ext8(u64 *desc, int idx) | |
445 | { | |
446 | return (idx % 2) ? (desc + idx - 1) : (desc + idx + 1); | |
447 | } | |
448 | ||
449 | static void xgene_dma_init_desc(void *desc, u16 dst_ring_num) | |
450 | { | |
451 | XGENE_DMA_DESC_C_SET(desc); /* Coherent IO */ | |
452 | XGENE_DMA_DESC_IN_SET(desc); | |
453 | XGENE_DMA_DESC_H0ENQ_NUM_SET(desc, dst_ring_num); | |
454 | XGENE_DMA_DESC_RTYPE_SET(desc, XGENE_DMA_RING_OWNER_DMA); | |
455 | } | |
456 | ||
457 | static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, | |
458 | struct xgene_dma_desc_sw *desc_sw, | |
459 | dma_addr_t dst, dma_addr_t src, | |
460 | size_t len) | |
461 | { | |
462 | void *desc1, *desc2; | |
463 | int i; | |
464 | ||
465 | /* Get 1st descriptor */ | |
466 | desc1 = &desc_sw->desc1; | |
467 | xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); | |
468 | ||
469 | /* Set destination address */ | |
470 | XGENE_DMA_DESC_DR_SET(desc1); | |
471 | XGENE_DMA_DESC_DST_ADDR_SET(desc1, dst); | |
472 | ||
473 | /* Set 1st source address */ | |
474 | xgene_dma_set_src_buffer(desc1 + 8, &len, &src); | |
475 | ||
476 | if (len <= 0) { | |
477 | desc2 = NULL; | |
478 | goto skip_additional_src; | |
479 | } | |
480 | ||
481 | /* | |
482 | * We need to split this source buffer, | |
483 | * and need to use 2nd descriptor | |
484 | */ | |
485 | desc2 = &desc_sw->desc2; | |
486 | XGENE_DMA_DESC_NV_SET(desc1); | |
487 | ||
488 | /* Set 2nd to 5th source address */ | |
489 | for (i = 0; i < 4 && len; i++) | |
490 | xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i), | |
491 | &len, &src); | |
492 | ||
493 | /* Invalidate unused source address field */ | |
494 | for (; i < 4; i++) | |
495 | xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i)); | |
496 | ||
497 | /* Updated flag that we have prepared 64B descriptor */ | |
498 | desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; | |
499 | ||
500 | skip_additional_src: | |
501 | /* Hardware stores descriptor in little endian format */ | |
502 | xgene_dma_cpu_to_le64(desc1, 4); | |
503 | if (desc2) | |
504 | xgene_dma_cpu_to_le64(desc2, 4); | |
505 | } | |
506 | ||
507 | static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, | |
508 | struct xgene_dma_desc_sw *desc_sw, | |
509 | dma_addr_t *dst, dma_addr_t *src, | |
510 | u32 src_cnt, size_t *nbytes, | |
511 | const u8 *scf) | |
512 | { | |
513 | void *desc1, *desc2; | |
514 | size_t len = *nbytes; | |
515 | int i; | |
516 | ||
517 | desc1 = &desc_sw->desc1; | |
518 | desc2 = &desc_sw->desc2; | |
519 | ||
520 | /* Initialize DMA descriptor */ | |
521 | xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); | |
522 | ||
523 | /* Set destination address */ | |
524 | XGENE_DMA_DESC_DR_SET(desc1); | |
525 | XGENE_DMA_DESC_DST_ADDR_SET(desc1, *dst); | |
526 | ||
527 | /* We have multiple source addresses, so need to set NV bit*/ | |
528 | XGENE_DMA_DESC_NV_SET(desc1); | |
529 | ||
530 | /* Set flyby opcode */ | |
531 | XGENE_DMA_DESC_FLYBY_SET(desc1, xgene_dma_encode_xor_flyby(src_cnt)); | |
532 | ||
533 | /* Set 1st to 5th source addresses */ | |
534 | for (i = 0; i < src_cnt; i++) { | |
535 | len = *nbytes; | |
536 | xgene_dma_set_src_buffer((i == 0) ? (desc1 + 8) : | |
537 | xgene_dma_lookup_ext8(desc2, i - 1), | |
538 | &len, &src[i]); | |
539 | XGENE_DMA_DESC_MULTI_SET(desc1, scf[i], i); | |
540 | } | |
541 | ||
542 | /* Hardware stores descriptor in little endian format */ | |
543 | xgene_dma_cpu_to_le64(desc1, 4); | |
544 | xgene_dma_cpu_to_le64(desc2, 4); | |
545 | ||
546 | /* Update meta data */ | |
547 | *nbytes = len; | |
548 | *dst += XGENE_DMA_MAX_BYTE_CNT; | |
549 | ||
550 | /* We need always 64B descriptor to perform xor or pq operations */ | |
551 | desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; | |
552 | } | |
553 | ||
554 | static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |
555 | { | |
556 | struct xgene_dma_desc_sw *desc; | |
557 | struct xgene_dma_chan *chan; | |
558 | dma_cookie_t cookie; | |
559 | ||
560 | if (unlikely(!tx)) | |
561 | return -EINVAL; | |
562 | ||
563 | chan = to_dma_chan(tx->chan); | |
564 | desc = to_dma_desc_sw(tx); | |
565 | ||
566 | spin_lock_bh(&chan->lock); | |
567 | ||
568 | cookie = dma_cookie_assign(tx); | |
569 | ||
570 | /* Add this transaction list onto the tail of the pending queue */ | |
571 | list_splice_tail_init(&desc->tx_list, &chan->ld_pending); | |
572 | ||
573 | spin_unlock_bh(&chan->lock); | |
574 | ||
575 | return cookie; | |
576 | } | |
577 | ||
578 | static void xgene_dma_clean_descriptor(struct xgene_dma_chan *chan, | |
579 | struct xgene_dma_desc_sw *desc) | |
580 | { | |
581 | list_del(&desc->node); | |
582 | chan_dbg(chan, "LD %p free\n", desc); | |
583 | dma_pool_free(chan->desc_pool, desc, desc->tx.phys); | |
584 | } | |
585 | ||
586 | static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor( | |
587 | struct xgene_dma_chan *chan) | |
588 | { | |
589 | struct xgene_dma_desc_sw *desc; | |
590 | dma_addr_t phys; | |
591 | ||
592 | desc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &phys); | |
593 | if (!desc) { | |
594 | chan_err(chan, "Failed to allocate LDs\n"); | |
595 | return NULL; | |
596 | } | |
597 | ||
598 | memset(desc, 0, sizeof(*desc)); | |
599 | ||
600 | INIT_LIST_HEAD(&desc->tx_list); | |
601 | desc->tx.phys = phys; | |
602 | desc->tx.tx_submit = xgene_dma_tx_submit; | |
603 | dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan); | |
604 | ||
605 | chan_dbg(chan, "LD %p allocated\n", desc); | |
606 | ||
607 | return desc; | |
608 | } | |
609 | ||
610 | /** | |
611 | * xgene_dma_clean_completed_descriptor - free all descriptors which | |
612 | * has been completed and acked | |
613 | * @chan: X-Gene DMA channel | |
614 | * | |
615 | * This function is used on all completed and acked descriptors. | |
616 | */ | |
617 | static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan *chan) | |
618 | { | |
619 | struct xgene_dma_desc_sw *desc, *_desc; | |
620 | ||
621 | /* Run the callback for each descriptor, in order */ | |
622 | list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) { | |
623 | if (async_tx_test_ack(&desc->tx)) | |
624 | xgene_dma_clean_descriptor(chan, desc); | |
625 | } | |
626 | } | |
627 | ||
628 | /** | |
629 | * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor | |
630 | * @chan: X-Gene DMA channel | |
631 | * @desc: descriptor to cleanup and free | |
632 | * | |
633 | * This function is used on a descriptor which has been executed by the DMA | |
634 | * controller. It will run any callbacks, submit any dependencies. | |
635 | */ | |
636 | static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan, | |
637 | struct xgene_dma_desc_sw *desc) | |
638 | { | |
639 | struct dma_async_tx_descriptor *tx = &desc->tx; | |
640 | ||
641 | /* | |
642 | * If this is not the last transaction in the group, | |
643 | * then no need to complete cookie and run any callback as | |
644 | * this is not the tx_descriptor which had been sent to caller | |
645 | * of this DMA request | |
646 | */ | |
647 | ||
648 | if (tx->cookie == 0) | |
649 | return; | |
650 | ||
651 | dma_cookie_complete(tx); | |
652 | ||
653 | /* Run the link descriptor callback function */ | |
654 | if (tx->callback) | |
655 | tx->callback(tx->callback_param); | |
656 | ||
657 | dma_descriptor_unmap(tx); | |
658 | ||
659 | /* Run any dependencies */ | |
660 | dma_run_dependencies(tx); | |
661 | } | |
662 | ||
663 | /** | |
664 | * xgene_dma_clean_running_descriptor - move the completed descriptor from | |
665 | * ld_running to ld_completed | |
666 | * @chan: X-Gene DMA channel | |
667 | * @desc: the descriptor which is completed | |
668 | * | |
669 | * Free the descriptor directly if acked by async_tx api, | |
670 | * else move it to queue ld_completed. | |
671 | */ | |
672 | static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, | |
673 | struct xgene_dma_desc_sw *desc) | |
674 | { | |
675 | /* Remove from the list of running transactions */ | |
676 | list_del(&desc->node); | |
677 | ||
678 | /* | |
679 | * the client is allowed to attach dependent operations | |
680 | * until 'ack' is set | |
681 | */ | |
682 | if (!async_tx_test_ack(&desc->tx)) { | |
683 | /* | |
684 | * Move this descriptor to the list of descriptors which is | |
685 | * completed, but still awaiting the 'ack' bit to be set. | |
686 | */ | |
687 | list_add_tail(&desc->node, &chan->ld_completed); | |
688 | return; | |
689 | } | |
690 | ||
691 | chan_dbg(chan, "LD %p free\n", desc); | |
692 | dma_pool_free(chan->desc_pool, desc, desc->tx.phys); | |
693 | } | |
694 | ||
695 | static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, | |
696 | struct xgene_dma_desc_sw *desc_sw) | |
697 | { | |
698 | struct xgene_dma_desc_hw *desc_hw; | |
699 | ||
700 | /* Check if can push more descriptor to hw for execution */ | |
701 | if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) | |
702 | return -EBUSY; | |
703 | ||
704 | /* Get hw descriptor from DMA tx ring */ | |
705 | desc_hw = &ring->desc_hw[ring->head]; | |
706 | ||
707 | /* | |
708 | * Increment the head count to point next | |
709 | * descriptor for next time | |
710 | */ | |
711 | if (++ring->head == ring->slots) | |
712 | ring->head = 0; | |
713 | ||
714 | /* Copy prepared sw descriptor data to hw descriptor */ | |
715 | memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw)); | |
716 | ||
717 | /* | |
718 | * Check if we have prepared 64B descriptor, | |
719 | * in this case we need one more hw descriptor | |
720 | */ | |
721 | if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) { | |
722 | desc_hw = &ring->desc_hw[ring->head]; | |
723 | ||
724 | if (++ring->head == ring->slots) | |
725 | ring->head = 0; | |
726 | ||
727 | memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); | |
728 | } | |
729 | ||
730 | /* Notify the hw that we have descriptor ready for execution */ | |
731 | iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? | |
732 | 2 : 1, ring->cmd); | |
733 | ||
734 | return 0; | |
735 | } | |
736 | ||
737 | /** | |
738 | * xgene_chan_xfer_ld_pending - push any pending transactions to hw | |
739 | * @chan : X-Gene DMA channel | |
740 | * | |
741 | * LOCKING: must hold chan->desc_lock | |
742 | */ | |
743 | static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) | |
744 | { | |
745 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; | |
746 | int ret; | |
747 | ||
748 | /* | |
749 | * If the list of pending descriptors is empty, then we | |
750 | * don't need to do any work at all | |
751 | */ | |
752 | if (list_empty(&chan->ld_pending)) { | |
753 | chan_dbg(chan, "No pending LDs\n"); | |
754 | return; | |
755 | } | |
756 | ||
757 | /* | |
758 | * Move elements from the queue of pending transactions onto the list | |
759 | * of running transactions and push it to hw for further executions | |
760 | */ | |
761 | list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) { | |
762 | /* | |
763 | * Check if have pushed max number of transactions to hw | |
764 | * as capable, so let's stop here and will push remaining | |
765 | * elements from pening ld queue after completing some | |
766 | * descriptors that we have already pushed | |
767 | */ | |
768 | if (chan->pending >= chan->max_outstanding) | |
769 | return; | |
770 | ||
771 | ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); | |
772 | if (ret) | |
773 | return; | |
774 | ||
775 | /* | |
776 | * Delete this element from ld pending queue and append it to | |
777 | * ld running queue | |
778 | */ | |
779 | list_move_tail(&desc_sw->node, &chan->ld_running); | |
780 | ||
781 | /* Increment the pending transaction count */ | |
782 | chan->pending++; | |
783 | } | |
784 | } | |
785 | ||
786 | /** | |
787 | * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed | |
788 | * and move them to ld_completed to free until flag 'ack' is set | |
789 | * @chan: X-Gene DMA channel | |
790 | * | |
791 | * This function is used on descriptors which have been executed by the DMA | |
792 | * controller. It will run any callbacks, submit any dependencies, then | |
793 | * free these descriptors if flag 'ack' is set. | |
794 | */ | |
795 | static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) | |
796 | { | |
797 | struct xgene_dma_ring *ring = &chan->rx_ring; | |
798 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; | |
799 | struct xgene_dma_desc_hw *desc_hw; | |
800 | u8 status; | |
801 | ||
802 | /* Clean already completed and acked descriptors */ | |
803 | xgene_dma_clean_completed_descriptor(chan); | |
804 | ||
805 | /* Run the callback for each descriptor, in order */ | |
806 | list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) { | |
807 | /* Get subsequent hw descriptor from DMA rx ring */ | |
808 | desc_hw = &ring->desc_hw[ring->head]; | |
809 | ||
810 | /* Check if this descriptor has been completed */ | |
811 | if (unlikely(XGENE_DMA_DESC_IS_EMPTY(desc_hw))) | |
812 | break; | |
813 | ||
814 | if (++ring->head == ring->slots) | |
815 | ring->head = 0; | |
816 | ||
817 | /* Check if we have any error with DMA transactions */ | |
818 | status = XGENE_DMA_DESC_STATUS( | |
819 | XGENE_DMA_DESC_ELERR_RD(le64_to_cpu( | |
820 | desc_hw->m0)), | |
821 | XGENE_DMA_DESC_LERR_RD(le64_to_cpu( | |
822 | desc_hw->m0))); | |
823 | if (status) { | |
824 | /* Print the DMA error type */ | |
825 | chan_err(chan, "%s\n", xgene_dma_desc_err[status]); | |
826 | ||
827 | /* | |
828 | * We have DMA transactions error here. Dump DMA Tx | |
829 | * and Rx descriptors for this request */ | |
830 | XGENE_DMA_DESC_DUMP(&desc_sw->desc1, | |
831 | "X-Gene DMA TX DESC1: "); | |
832 | ||
833 | if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) | |
834 | XGENE_DMA_DESC_DUMP(&desc_sw->desc2, | |
835 | "X-Gene DMA TX DESC2: "); | |
836 | ||
837 | XGENE_DMA_DESC_DUMP(desc_hw, | |
838 | "X-Gene DMA RX ERR DESC: "); | |
839 | } | |
840 | ||
841 | /* Notify the hw about this completed descriptor */ | |
842 | iowrite32(-1, ring->cmd); | |
843 | ||
844 | /* Mark this hw descriptor as processed */ | |
845 | XGENE_DMA_DESC_SET_EMPTY(desc_hw); | |
846 | ||
847 | xgene_dma_run_tx_complete_actions(chan, desc_sw); | |
848 | ||
849 | xgene_dma_clean_running_descriptor(chan, desc_sw); | |
850 | ||
851 | /* | |
852 | * Decrement the pending transaction count | |
853 | * as we have processed one | |
854 | */ | |
855 | chan->pending--; | |
856 | } | |
857 | ||
858 | /* | |
859 | * Start any pending transactions automatically | |
860 | * In the ideal case, we keep the DMA controller busy while we go | |
861 | * ahead and free the descriptors below. | |
862 | */ | |
863 | xgene_chan_xfer_ld_pending(chan); | |
864 | } | |
865 | ||
866 | static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan) | |
867 | { | |
868 | struct xgene_dma_chan *chan = to_dma_chan(dchan); | |
869 | ||
870 | /* Has this channel already been allocated? */ | |
871 | if (chan->desc_pool) | |
872 | return 1; | |
873 | ||
874 | chan->desc_pool = dma_pool_create(chan->name, chan->dev, | |
875 | sizeof(struct xgene_dma_desc_sw), | |
876 | 0, 0); | |
877 | if (!chan->desc_pool) { | |
878 | chan_err(chan, "Failed to allocate descriptor pool\n"); | |
879 | return -ENOMEM; | |
880 | } | |
881 | ||
882 | chan_dbg(chan, "Allocate descripto pool\n"); | |
883 | ||
884 | return 1; | |
885 | } | |
886 | ||
887 | /** | |
888 | * xgene_dma_free_desc_list - Free all descriptors in a queue | |
889 | * @chan: X-Gene DMA channel | |
890 | * @list: the list to free | |
891 | * | |
892 | * LOCKING: must hold chan->desc_lock | |
893 | */ | |
894 | static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan, | |
895 | struct list_head *list) | |
896 | { | |
897 | struct xgene_dma_desc_sw *desc, *_desc; | |
898 | ||
899 | list_for_each_entry_safe(desc, _desc, list, node) | |
900 | xgene_dma_clean_descriptor(chan, desc); | |
901 | } | |
902 | ||
903 | static void xgene_dma_free_tx_desc_list(struct xgene_dma_chan *chan, | |
904 | struct list_head *list) | |
905 | { | |
906 | struct xgene_dma_desc_sw *desc, *_desc; | |
907 | ||
908 | list_for_each_entry_safe(desc, _desc, list, node) | |
909 | xgene_dma_clean_descriptor(chan, desc); | |
910 | } | |
911 | ||
912 | static void xgene_dma_free_chan_resources(struct dma_chan *dchan) | |
913 | { | |
914 | struct xgene_dma_chan *chan = to_dma_chan(dchan); | |
915 | ||
916 | chan_dbg(chan, "Free all resources\n"); | |
917 | ||
918 | if (!chan->desc_pool) | |
919 | return; | |
920 | ||
921 | spin_lock_bh(&chan->lock); | |
922 | ||
923 | /* Process all running descriptor */ | |
924 | xgene_dma_cleanup_descriptors(chan); | |
925 | ||
926 | /* Clean all link descriptor queues */ | |
927 | xgene_dma_free_desc_list(chan, &chan->ld_pending); | |
928 | xgene_dma_free_desc_list(chan, &chan->ld_running); | |
929 | xgene_dma_free_desc_list(chan, &chan->ld_completed); | |
930 | ||
931 | spin_unlock_bh(&chan->lock); | |
932 | ||
933 | /* Delete this channel DMA pool */ | |
934 | dma_pool_destroy(chan->desc_pool); | |
935 | chan->desc_pool = NULL; | |
936 | } | |
937 | ||
938 | static struct dma_async_tx_descriptor *xgene_dma_prep_memcpy( | |
939 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, | |
940 | size_t len, unsigned long flags) | |
941 | { | |
942 | struct xgene_dma_desc_sw *first = NULL, *new; | |
943 | struct xgene_dma_chan *chan; | |
944 | size_t copy; | |
945 | ||
946 | if (unlikely(!dchan || !len)) | |
947 | return NULL; | |
948 | ||
949 | chan = to_dma_chan(dchan); | |
950 | ||
951 | do { | |
952 | /* Allocate the link descriptor from DMA pool */ | |
953 | new = xgene_dma_alloc_descriptor(chan); | |
954 | if (!new) | |
955 | goto fail; | |
956 | ||
957 | /* Create the largest transaction possible */ | |
958 | copy = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT); | |
959 | ||
960 | /* Prepare DMA descriptor */ | |
961 | xgene_dma_prep_cpy_desc(chan, new, dst, src, copy); | |
962 | ||
963 | if (!first) | |
964 | first = new; | |
965 | ||
966 | new->tx.cookie = 0; | |
967 | async_tx_ack(&new->tx); | |
968 | ||
969 | /* Update metadata */ | |
970 | len -= copy; | |
971 | dst += copy; | |
972 | src += copy; | |
973 | ||
974 | /* Insert the link descriptor to the LD ring */ | |
975 | list_add_tail(&new->node, &first->tx_list); | |
976 | } while (len); | |
977 | ||
978 | new->tx.flags = flags; /* client is in control of this ack */ | |
979 | new->tx.cookie = -EBUSY; | |
980 | list_splice(&first->tx_list, &new->tx_list); | |
981 | ||
982 | return &new->tx; | |
983 | ||
984 | fail: | |
985 | if (!first) | |
986 | return NULL; | |
987 | ||
988 | xgene_dma_free_tx_desc_list(chan, &first->tx_list); | |
989 | return NULL; | |
990 | } | |
991 | ||
992 | static struct dma_async_tx_descriptor *xgene_dma_prep_sg( | |
993 | struct dma_chan *dchan, struct scatterlist *dst_sg, | |
994 | u32 dst_nents, struct scatterlist *src_sg, | |
995 | u32 src_nents, unsigned long flags) | |
996 | { | |
997 | struct xgene_dma_desc_sw *first = NULL, *new = NULL; | |
998 | struct xgene_dma_chan *chan; | |
999 | size_t dst_avail, src_avail; | |
1000 | dma_addr_t dst, src; | |
1001 | size_t len; | |
1002 | ||
1003 | if (unlikely(!dchan)) | |
1004 | return NULL; | |
1005 | ||
1006 | if (unlikely(!dst_nents || !src_nents)) | |
1007 | return NULL; | |
1008 | ||
1009 | if (unlikely(!dst_sg || !src_sg)) | |
1010 | return NULL; | |
1011 | ||
1012 | chan = to_dma_chan(dchan); | |
1013 | ||
1014 | /* Get prepared for the loop */ | |
1015 | dst_avail = sg_dma_len(dst_sg); | |
1016 | src_avail = sg_dma_len(src_sg); | |
1017 | dst_nents--; | |
1018 | src_nents--; | |
1019 | ||
1020 | /* Run until we are out of scatterlist entries */ | |
1021 | while (true) { | |
1022 | /* Create the largest transaction possible */ | |
1023 | len = min_t(size_t, src_avail, dst_avail); | |
1024 | len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT); | |
1025 | if (len == 0) | |
1026 | goto fetch; | |
1027 | ||
1028 | dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; | |
1029 | src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; | |
1030 | ||
1031 | /* Allocate the link descriptor from DMA pool */ | |
1032 | new = xgene_dma_alloc_descriptor(chan); | |
1033 | if (!new) | |
1034 | goto fail; | |
1035 | ||
1036 | /* Prepare DMA descriptor */ | |
1037 | xgene_dma_prep_cpy_desc(chan, new, dst, src, len); | |
1038 | ||
1039 | if (!first) | |
1040 | first = new; | |
1041 | ||
1042 | new->tx.cookie = 0; | |
1043 | async_tx_ack(&new->tx); | |
1044 | ||
1045 | /* update metadata */ | |
1046 | dst_avail -= len; | |
1047 | src_avail -= len; | |
1048 | ||
1049 | /* Insert the link descriptor to the LD ring */ | |
1050 | list_add_tail(&new->node, &first->tx_list); | |
1051 | ||
1052 | fetch: | |
1053 | /* fetch the next dst scatterlist entry */ | |
1054 | if (dst_avail == 0) { | |
1055 | /* no more entries: we're done */ | |
1056 | if (dst_nents == 0) | |
1057 | break; | |
1058 | ||
1059 | /* fetch the next entry: if there are no more: done */ | |
1060 | dst_sg = sg_next(dst_sg); | |
1061 | if (!dst_sg) | |
1062 | break; | |
1063 | ||
1064 | dst_nents--; | |
1065 | dst_avail = sg_dma_len(dst_sg); | |
1066 | } | |
1067 | ||
1068 | /* fetch the next src scatterlist entry */ | |
1069 | if (src_avail == 0) { | |
1070 | /* no more entries: we're done */ | |
1071 | if (src_nents == 0) | |
1072 | break; | |
1073 | ||
1074 | /* fetch the next entry: if there are no more: done */ | |
1075 | src_sg = sg_next(src_sg); | |
1076 | if (!src_sg) | |
1077 | break; | |
1078 | ||
1079 | src_nents--; | |
1080 | src_avail = sg_dma_len(src_sg); | |
1081 | } | |
1082 | } | |
1083 | ||
1084 | if (!new) | |
1085 | return NULL; | |
1086 | ||
1087 | new->tx.flags = flags; /* client is in control of this ack */ | |
1088 | new->tx.cookie = -EBUSY; | |
1089 | list_splice(&first->tx_list, &new->tx_list); | |
1090 | ||
1091 | return &new->tx; | |
1092 | fail: | |
1093 | if (!first) | |
1094 | return NULL; | |
1095 | ||
1096 | xgene_dma_free_tx_desc_list(chan, &first->tx_list); | |
1097 | return NULL; | |
1098 | } | |
1099 | ||
1100 | static struct dma_async_tx_descriptor *xgene_dma_prep_xor( | |
1101 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src, | |
1102 | u32 src_cnt, size_t len, unsigned long flags) | |
1103 | { | |
1104 | struct xgene_dma_desc_sw *first = NULL, *new; | |
1105 | struct xgene_dma_chan *chan; | |
1106 | static u8 multi[XGENE_DMA_MAX_XOR_SRC] = { | |
1107 | 0x01, 0x01, 0x01, 0x01, 0x01}; | |
1108 | ||
1109 | if (unlikely(!dchan || !len)) | |
1110 | return NULL; | |
1111 | ||
1112 | chan = to_dma_chan(dchan); | |
1113 | ||
1114 | do { | |
1115 | /* Allocate the link descriptor from DMA pool */ | |
1116 | new = xgene_dma_alloc_descriptor(chan); | |
1117 | if (!new) | |
1118 | goto fail; | |
1119 | ||
1120 | /* Prepare xor DMA descriptor */ | |
1121 | xgene_dma_prep_xor_desc(chan, new, &dst, src, | |
1122 | src_cnt, &len, multi); | |
1123 | ||
1124 | if (!first) | |
1125 | first = new; | |
1126 | ||
1127 | new->tx.cookie = 0; | |
1128 | async_tx_ack(&new->tx); | |
1129 | ||
1130 | /* Insert the link descriptor to the LD ring */ | |
1131 | list_add_tail(&new->node, &first->tx_list); | |
1132 | } while (len); | |
1133 | ||
1134 | new->tx.flags = flags; /* client is in control of this ack */ | |
1135 | new->tx.cookie = -EBUSY; | |
1136 | list_splice(&first->tx_list, &new->tx_list); | |
1137 | ||
1138 | return &new->tx; | |
1139 | ||
1140 | fail: | |
1141 | if (!first) | |
1142 | return NULL; | |
1143 | ||
1144 | xgene_dma_free_tx_desc_list(chan, &first->tx_list); | |
1145 | return NULL; | |
1146 | } | |
1147 | ||
1148 | static struct dma_async_tx_descriptor *xgene_dma_prep_pq( | |
1149 | struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src, | |
1150 | u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) | |
1151 | { | |
1152 | struct xgene_dma_desc_sw *first = NULL, *new; | |
1153 | struct xgene_dma_chan *chan; | |
1154 | size_t _len = len; | |
1155 | dma_addr_t _src[XGENE_DMA_MAX_XOR_SRC]; | |
1156 | static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {0x01, 0x01, 0x01, 0x01, 0x01}; | |
1157 | ||
1158 | if (unlikely(!dchan || !len)) | |
1159 | return NULL; | |
1160 | ||
1161 | chan = to_dma_chan(dchan); | |
1162 | ||
1163 | /* | |
1164 | * Save source addresses on local variable, may be we have to | |
1165 | * prepare two descriptor to generate P and Q if both enabled | |
1166 | * in the flags by client | |
1167 | */ | |
1168 | memcpy(_src, src, sizeof(*src) * src_cnt); | |
1169 | ||
1170 | if (flags & DMA_PREP_PQ_DISABLE_P) | |
1171 | len = 0; | |
1172 | ||
1173 | if (flags & DMA_PREP_PQ_DISABLE_Q) | |
1174 | _len = 0; | |
1175 | ||
1176 | do { | |
1177 | /* Allocate the link descriptor from DMA pool */ | |
1178 | new = xgene_dma_alloc_descriptor(chan); | |
1179 | if (!new) | |
1180 | goto fail; | |
1181 | ||
1182 | if (!first) | |
1183 | first = new; | |
1184 | ||
1185 | new->tx.cookie = 0; | |
1186 | async_tx_ack(&new->tx); | |
1187 | ||
1188 | /* Insert the link descriptor to the LD ring */ | |
1189 | list_add_tail(&new->node, &first->tx_list); | |
1190 | ||
1191 | /* | |
1192 | * Prepare DMA descriptor to generate P, | |
1193 | * if DMA_PREP_PQ_DISABLE_P flag is not set | |
1194 | */ | |
1195 | if (len) { | |
1196 | xgene_dma_prep_xor_desc(chan, new, &dst[0], src, | |
1197 | src_cnt, &len, multi); | |
1198 | continue; | |
1199 | } | |
1200 | ||
1201 | /* | |
1202 | * Prepare DMA descriptor to generate Q, | |
1203 | * if DMA_PREP_PQ_DISABLE_Q flag is not set | |
1204 | */ | |
1205 | if (_len) { | |
1206 | xgene_dma_prep_xor_desc(chan, new, &dst[1], _src, | |
1207 | src_cnt, &_len, scf); | |
1208 | } | |
1209 | } while (len || _len); | |
1210 | ||
1211 | new->tx.flags = flags; /* client is in control of this ack */ | |
1212 | new->tx.cookie = -EBUSY; | |
1213 | list_splice(&first->tx_list, &new->tx_list); | |
1214 | ||
1215 | return &new->tx; | |
1216 | ||
1217 | fail: | |
1218 | if (!first) | |
1219 | return NULL; | |
1220 | ||
1221 | xgene_dma_free_tx_desc_list(chan, &first->tx_list); | |
1222 | return NULL; | |
1223 | } | |
1224 | ||
1225 | static void xgene_dma_issue_pending(struct dma_chan *dchan) | |
1226 | { | |
1227 | struct xgene_dma_chan *chan = to_dma_chan(dchan); | |
1228 | ||
1229 | spin_lock_bh(&chan->lock); | |
1230 | xgene_chan_xfer_ld_pending(chan); | |
1231 | spin_unlock_bh(&chan->lock); | |
1232 | } | |
1233 | ||
1234 | static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan, | |
1235 | dma_cookie_t cookie, | |
1236 | struct dma_tx_state *txstate) | |
1237 | { | |
1238 | return dma_cookie_status(dchan, cookie, txstate); | |
1239 | } | |
1240 | ||
1241 | static void xgene_dma_tasklet_cb(unsigned long data) | |
1242 | { | |
1243 | struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data; | |
1244 | ||
1245 | spin_lock_bh(&chan->lock); | |
1246 | ||
1247 | /* Run all cleanup for descriptors which have been completed */ | |
1248 | xgene_dma_cleanup_descriptors(chan); | |
1249 | ||
1250 | /* Re-enable DMA channel IRQ */ | |
1251 | enable_irq(chan->rx_irq); | |
1252 | ||
1253 | spin_unlock_bh(&chan->lock); | |
1254 | } | |
1255 | ||
1256 | static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id) | |
1257 | { | |
1258 | struct xgene_dma_chan *chan = (struct xgene_dma_chan *)id; | |
1259 | ||
1260 | BUG_ON(!chan); | |
1261 | ||
1262 | /* | |
1263 | * Disable DMA channel IRQ until we process completed | |
1264 | * descriptors | |
1265 | */ | |
1266 | disable_irq_nosync(chan->rx_irq); | |
1267 | ||
1268 | /* | |
1269 | * Schedule the tasklet to handle all cleanup of the current | |
1270 | * transaction. It will start a new transaction if there is | |
1271 | * one pending. | |
1272 | */ | |
1273 | tasklet_schedule(&chan->tasklet); | |
1274 | ||
1275 | return IRQ_HANDLED; | |
1276 | } | |
1277 | ||
1278 | static irqreturn_t xgene_dma_err_isr(int irq, void *id) | |
1279 | { | |
1280 | struct xgene_dma *pdma = (struct xgene_dma *)id; | |
1281 | unsigned long int_mask; | |
1282 | u32 val, i; | |
1283 | ||
1284 | val = ioread32(pdma->csr_dma + XGENE_DMA_INT); | |
1285 | ||
1286 | /* Clear DMA interrupts */ | |
1287 | iowrite32(val, pdma->csr_dma + XGENE_DMA_INT); | |
1288 | ||
1289 | /* Print DMA error info */ | |
1290 | int_mask = val >> XGENE_DMA_INT_MASK_SHIFT; | |
1291 | for_each_set_bit(i, &int_mask, ARRAY_SIZE(xgene_dma_err)) | |
1292 | dev_err(pdma->dev, | |
1293 | "Interrupt status 0x%08X %s\n", val, xgene_dma_err[i]); | |
1294 | ||
1295 | return IRQ_HANDLED; | |
1296 | } | |
1297 | ||
1298 | static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring) | |
1299 | { | |
1300 | int i; | |
1301 | ||
1302 | iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE); | |
1303 | ||
1304 | for (i = 0; i < XGENE_DMA_RING_NUM_CONFIG; i++) | |
1305 | iowrite32(ring->state[i], ring->pdma->csr_ring + | |
1306 | XGENE_DMA_RING_STATE_WR_BASE + (i * 4)); | |
1307 | } | |
1308 | ||
1309 | static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring) | |
1310 | { | |
1311 | memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG); | |
1312 | xgene_dma_wr_ring_state(ring); | |
1313 | } | |
1314 | ||
1315 | static void xgene_dma_setup_ring(struct xgene_dma_ring *ring) | |
1316 | { | |
1317 | void *ring_cfg = ring->state; | |
1318 | u64 addr = ring->desc_paddr; | |
1319 | void *desc; | |
1320 | u32 i, val; | |
1321 | ||
1322 | ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE; | |
1323 | ||
1324 | /* Clear DMA ring state */ | |
1325 | xgene_dma_clr_ring_state(ring); | |
1326 | ||
1327 | /* Set DMA ring type */ | |
1328 | XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR); | |
1329 | ||
1330 | if (ring->owner == XGENE_DMA_RING_OWNER_DMA) { | |
1331 | /* Set recombination buffer and timeout */ | |
1332 | XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg); | |
1333 | XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg); | |
1334 | XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg); | |
1335 | } | |
1336 | ||
1337 | /* Initialize DMA ring state */ | |
1338 | XGENE_DMA_RING_SELTHRSH_SET(ring_cfg); | |
1339 | XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg); | |
1340 | XGENE_DMA_RING_COHERENT_SET(ring_cfg); | |
1341 | XGENE_DMA_RING_ADDRL_SET(ring_cfg, addr); | |
1342 | XGENE_DMA_RING_ADDRH_SET(ring_cfg, addr); | |
1343 | XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize); | |
1344 | ||
1345 | /* Write DMA ring configurations */ | |
1346 | xgene_dma_wr_ring_state(ring); | |
1347 | ||
1348 | /* Set DMA ring id */ | |
1349 | iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id), | |
1350 | ring->pdma->csr_ring + XGENE_DMA_RING_ID); | |
1351 | ||
1352 | /* Set DMA ring buffer */ | |
1353 | iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num), | |
1354 | ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); | |
1355 | ||
1356 | if (ring->owner != XGENE_DMA_RING_OWNER_CPU) | |
1357 | return; | |
1358 | ||
1359 | /* Set empty signature to DMA Rx ring descriptors */ | |
1360 | for (i = 0; i < ring->slots; i++) { | |
1361 | desc = &ring->desc_hw[i]; | |
1362 | XGENE_DMA_DESC_SET_EMPTY(desc); | |
1363 | } | |
1364 | ||
1365 | /* Enable DMA Rx ring interrupt */ | |
1366 | val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE); | |
1367 | XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num); | |
1368 | iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE); | |
1369 | } | |
1370 | ||
1371 | static void xgene_dma_clear_ring(struct xgene_dma_ring *ring) | |
1372 | { | |
1373 | u32 ring_id, val; | |
1374 | ||
1375 | if (ring->owner == XGENE_DMA_RING_OWNER_CPU) { | |
1376 | /* Disable DMA Rx ring interrupt */ | |
1377 | val = ioread32(ring->pdma->csr_ring + | |
1378 | XGENE_DMA_RING_NE_INT_MODE); | |
1379 | XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num); | |
1380 | iowrite32(val, ring->pdma->csr_ring + | |
1381 | XGENE_DMA_RING_NE_INT_MODE); | |
1382 | } | |
1383 | ||
1384 | /* Clear DMA ring state */ | |
1385 | ring_id = XGENE_DMA_RING_ID_SETUP(ring->id); | |
1386 | iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID); | |
1387 | ||
1388 | iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); | |
1389 | xgene_dma_clr_ring_state(ring); | |
1390 | } | |
1391 | ||
1392 | static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring) | |
1393 | { | |
1394 | ring->cmd_base = ring->pdma->csr_ring_cmd + | |
1395 | XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num - | |
1396 | XGENE_DMA_RING_NUM)); | |
1397 | ||
1398 | ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET; | |
1399 | } | |
1400 | ||
1401 | static int xgene_dma_get_ring_size(struct xgene_dma_chan *chan, | |
1402 | enum xgene_dma_ring_cfgsize cfgsize) | |
1403 | { | |
1404 | int size; | |
1405 | ||
1406 | switch (cfgsize) { | |
1407 | case XGENE_DMA_RING_CFG_SIZE_512B: | |
1408 | size = 0x200; | |
1409 | break; | |
1410 | case XGENE_DMA_RING_CFG_SIZE_2KB: | |
1411 | size = 0x800; | |
1412 | break; | |
1413 | case XGENE_DMA_RING_CFG_SIZE_16KB: | |
1414 | size = 0x4000; | |
1415 | break; | |
1416 | case XGENE_DMA_RING_CFG_SIZE_64KB: | |
1417 | size = 0x10000; | |
1418 | break; | |
1419 | case XGENE_DMA_RING_CFG_SIZE_512KB: | |
1420 | size = 0x80000; | |
1421 | break; | |
1422 | default: | |
1423 | chan_err(chan, "Unsupported cfg ring size %d\n", cfgsize); | |
1424 | return -EINVAL; | |
1425 | } | |
1426 | ||
1427 | return size; | |
1428 | } | |
1429 | ||
1430 | static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring) | |
1431 | { | |
1432 | /* Clear DMA ring configurations */ | |
1433 | xgene_dma_clear_ring(ring); | |
1434 | ||
1435 | /* De-allocate DMA ring descriptor */ | |
1436 | if (ring->desc_vaddr) { | |
1437 | dma_free_coherent(ring->pdma->dev, ring->size, | |
1438 | ring->desc_vaddr, ring->desc_paddr); | |
1439 | ring->desc_vaddr = NULL; | |
1440 | } | |
1441 | } | |
1442 | ||
1443 | static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan) | |
1444 | { | |
1445 | xgene_dma_delete_ring_one(&chan->rx_ring); | |
1446 | xgene_dma_delete_ring_one(&chan->tx_ring); | |
1447 | } | |
1448 | ||
1449 | static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, | |
1450 | struct xgene_dma_ring *ring, | |
1451 | enum xgene_dma_ring_cfgsize cfgsize) | |
1452 | { | |
1453 | /* Setup DMA ring descriptor variables */ | |
1454 | ring->pdma = chan->pdma; | |
1455 | ring->cfgsize = cfgsize; | |
1456 | ring->num = chan->pdma->ring_num++; | |
1457 | ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); | |
1458 | ||
1459 | ring->size = xgene_dma_get_ring_size(chan, cfgsize); | |
1460 | if (ring->size <= 0) | |
1461 | return ring->size; | |
1462 | ||
1463 | /* Allocate memory for DMA ring descriptor */ | |
1464 | ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, | |
1465 | &ring->desc_paddr, GFP_KERNEL); | |
1466 | if (!ring->desc_vaddr) { | |
1467 | chan_err(chan, "Failed to allocate ring desc\n"); | |
1468 | return -ENOMEM; | |
1469 | } | |
1470 | ||
1471 | /* Configure and enable DMA ring */ | |
1472 | xgene_dma_set_ring_cmd(ring); | |
1473 | xgene_dma_setup_ring(ring); | |
1474 | ||
1475 | return 0; | |
1476 | } | |
1477 | ||
1478 | static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) | |
1479 | { | |
1480 | struct xgene_dma_ring *rx_ring = &chan->rx_ring; | |
1481 | struct xgene_dma_ring *tx_ring = &chan->tx_ring; | |
1482 | int ret; | |
1483 | ||
1484 | /* Create DMA Rx ring descriptor */ | |
1485 | rx_ring->owner = XGENE_DMA_RING_OWNER_CPU; | |
1486 | rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id; | |
1487 | ||
1488 | ret = xgene_dma_create_ring_one(chan, rx_ring, | |
1489 | XGENE_DMA_RING_CFG_SIZE_64KB); | |
1490 | if (ret) | |
1491 | return ret; | |
1492 | ||
1493 | chan_dbg(chan, "Rx ring id 0x%X num %d desc 0x%p\n", | |
1494 | rx_ring->id, rx_ring->num, rx_ring->desc_vaddr); | |
1495 | ||
1496 | /* Create DMA Tx ring descriptor */ | |
1497 | tx_ring->owner = XGENE_DMA_RING_OWNER_DMA; | |
1498 | tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id; | |
1499 | ||
1500 | ret = xgene_dma_create_ring_one(chan, tx_ring, | |
1501 | XGENE_DMA_RING_CFG_SIZE_64KB); | |
1502 | if (ret) { | |
1503 | xgene_dma_delete_ring_one(rx_ring); | |
1504 | return ret; | |
1505 | } | |
1506 | ||
1507 | tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num); | |
1508 | ||
1509 | chan_dbg(chan, | |
1510 | "Tx ring id 0x%X num %d desc 0x%p\n", | |
1511 | tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); | |
1512 | ||
1513 | /* Set the max outstanding request possible to this channel */ | |
1514 | chan->max_outstanding = rx_ring->slots; | |
1515 | ||
1516 | return ret; | |
1517 | } | |
1518 | ||
1519 | static int xgene_dma_init_rings(struct xgene_dma *pdma) | |
1520 | { | |
1521 | int ret, i, j; | |
1522 | ||
1523 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | |
1524 | ret = xgene_dma_create_chan_rings(&pdma->chan[i]); | |
1525 | if (ret) { | |
1526 | for (j = 0; j < i; j++) | |
1527 | xgene_dma_delete_chan_rings(&pdma->chan[j]); | |
1528 | return ret; | |
1529 | } | |
1530 | } | |
1531 | ||
1532 | return ret; | |
1533 | } | |
1534 | ||
1535 | static void xgene_dma_enable(struct xgene_dma *pdma) | |
1536 | { | |
1537 | u32 val; | |
1538 | ||
1539 | /* Configure and enable DMA engine */ | |
1540 | val = ioread32(pdma->csr_dma + XGENE_DMA_GCR); | |
1541 | XGENE_DMA_CH_SETUP(val); | |
1542 | XGENE_DMA_ENABLE(val); | |
1543 | iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR); | |
1544 | } | |
1545 | ||
1546 | static void xgene_dma_disable(struct xgene_dma *pdma) | |
1547 | { | |
1548 | u32 val; | |
1549 | ||
1550 | val = ioread32(pdma->csr_dma + XGENE_DMA_GCR); | |
1551 | XGENE_DMA_DISABLE(val); | |
1552 | iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR); | |
1553 | } | |
1554 | ||
1555 | static void xgene_dma_mask_interrupts(struct xgene_dma *pdma) | |
1556 | { | |
1557 | /* | |
1558 | * Mask DMA ring overflow, underflow and | |
1559 | * AXI write/read error interrupts | |
1560 | */ | |
1561 | iowrite32(XGENE_DMA_INT_ALL_MASK, | |
1562 | pdma->csr_dma + XGENE_DMA_RING_INT0_MASK); | |
1563 | iowrite32(XGENE_DMA_INT_ALL_MASK, | |
1564 | pdma->csr_dma + XGENE_DMA_RING_INT1_MASK); | |
1565 | iowrite32(XGENE_DMA_INT_ALL_MASK, | |
1566 | pdma->csr_dma + XGENE_DMA_RING_INT2_MASK); | |
1567 | iowrite32(XGENE_DMA_INT_ALL_MASK, | |
1568 | pdma->csr_dma + XGENE_DMA_RING_INT3_MASK); | |
1569 | iowrite32(XGENE_DMA_INT_ALL_MASK, | |
1570 | pdma->csr_dma + XGENE_DMA_RING_INT4_MASK); | |
1571 | ||
1572 | /* Mask DMA error interrupts */ | |
1573 | iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK); | |
1574 | } | |
1575 | ||
1576 | static void xgene_dma_unmask_interrupts(struct xgene_dma *pdma) | |
1577 | { | |
1578 | /* | |
1579 | * Unmask DMA ring overflow, underflow and | |
1580 | * AXI write/read error interrupts | |
1581 | */ | |
1582 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | |
1583 | pdma->csr_dma + XGENE_DMA_RING_INT0_MASK); | |
1584 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | |
1585 | pdma->csr_dma + XGENE_DMA_RING_INT1_MASK); | |
1586 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | |
1587 | pdma->csr_dma + XGENE_DMA_RING_INT2_MASK); | |
1588 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | |
1589 | pdma->csr_dma + XGENE_DMA_RING_INT3_MASK); | |
1590 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | |
1591 | pdma->csr_dma + XGENE_DMA_RING_INT4_MASK); | |
1592 | ||
1593 | /* Unmask DMA error interrupts */ | |
1594 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | |
1595 | pdma->csr_dma + XGENE_DMA_INT_MASK); | |
1596 | } | |
1597 | ||
1598 | static void xgene_dma_init_hw(struct xgene_dma *pdma) | |
1599 | { | |
1600 | u32 val; | |
1601 | ||
1602 | /* Associate DMA ring to corresponding ring HW */ | |
1603 | iowrite32(XGENE_DMA_ASSOC_RING_MNGR1, | |
1604 | pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC); | |
1605 | ||
1606 | /* Configure RAID6 polynomial control setting */ | |
1607 | if (is_pq_enabled(pdma)) | |
1608 | iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D), | |
1609 | pdma->csr_dma + XGENE_DMA_RAID6_CONT); | |
1610 | else | |
1611 | dev_info(pdma->dev, "PQ is disabled in HW\n"); | |
1612 | ||
1613 | xgene_dma_enable(pdma); | |
1614 | xgene_dma_unmask_interrupts(pdma); | |
1615 | ||
1616 | /* Get DMA id and version info */ | |
1617 | val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR); | |
1618 | ||
1619 | /* DMA device info */ | |
1620 | dev_info(pdma->dev, | |
1621 | "X-Gene DMA v%d.%02d.%02d driver registered %d channels", | |
1622 | XGENE_DMA_REV_NO_RD(val), XGENE_DMA_BUS_ID_RD(val), | |
1623 | XGENE_DMA_DEV_ID_RD(val), XGENE_DMA_MAX_CHANNEL); | |
1624 | } | |
1625 | ||
1626 | int xgene_dma_init_ring_mngr(struct xgene_dma *pdma) | |
1627 | { | |
1628 | if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) && | |
1629 | (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST))) | |
1630 | return 0; | |
1631 | ||
1632 | iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN); | |
1633 | iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST); | |
1634 | ||
1635 | /* Bring up memory */ | |
1636 | iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN); | |
1637 | ||
1638 | /* Force a barrier */ | |
1639 | ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN); | |
1640 | ||
1641 | /* reset may take up to 1ms */ | |
1642 | usleep_range(1000, 1100); | |
1643 | ||
1644 | if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY) | |
1645 | != XGENE_DMA_RING_BLK_MEM_RDY_VAL) { | |
1646 | dev_err(pdma->dev, | |
1647 | "Failed to release ring mngr memory from shutdown\n"); | |
1648 | return -ENODEV; | |
1649 | } | |
1650 | ||
1651 | /* program threshold set 1 and all hysteresis */ | |
1652 | iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL, | |
1653 | pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1); | |
1654 | iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL, | |
1655 | pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1); | |
1656 | iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL, | |
1657 | pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS); | |
1658 | ||
1659 | /* Enable QPcore and assign error queue */ | |
1660 | iowrite32(XGENE_DMA_RING_ENABLE, | |
1661 | pdma->csr_ring + XGENE_DMA_RING_CONFIG); | |
1662 | ||
1663 | return 0; | |
1664 | } | |
1665 | ||
1666 | static int xgene_dma_init_mem(struct xgene_dma *pdma) | |
1667 | { | |
1668 | int ret; | |
1669 | ||
1670 | ret = xgene_dma_init_ring_mngr(pdma); | |
1671 | if (ret) | |
1672 | return ret; | |
1673 | ||
1674 | /* Bring up memory */ | |
1675 | iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN); | |
1676 | ||
1677 | /* Force a barrier */ | |
1678 | ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN); | |
1679 | ||
1680 | /* reset may take up to 1ms */ | |
1681 | usleep_range(1000, 1100); | |
1682 | ||
1683 | if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY) | |
1684 | != XGENE_DMA_BLK_MEM_RDY_VAL) { | |
1685 | dev_err(pdma->dev, | |
1686 | "Failed to release DMA memory from shutdown\n"); | |
1687 | return -ENODEV; | |
1688 | } | |
1689 | ||
1690 | return 0; | |
1691 | } | |
1692 | ||
1693 | static int xgene_dma_request_irqs(struct xgene_dma *pdma) | |
1694 | { | |
1695 | struct xgene_dma_chan *chan; | |
1696 | int ret, i, j; | |
1697 | ||
1698 | /* Register DMA error irq */ | |
1699 | ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr, | |
1700 | 0, "dma_error", pdma); | |
1701 | if (ret) { | |
1702 | dev_err(pdma->dev, | |
1703 | "Failed to register error IRQ %d\n", pdma->err_irq); | |
1704 | return ret; | |
1705 | } | |
1706 | ||
1707 | /* Register DMA channel rx irq */ | |
1708 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | |
1709 | chan = &pdma->chan[i]; | |
1710 | ret = devm_request_irq(chan->dev, chan->rx_irq, | |
1711 | xgene_dma_chan_ring_isr, | |
1712 | 0, chan->name, chan); | |
1713 | if (ret) { | |
1714 | chan_err(chan, "Failed to register Rx IRQ %d\n", | |
1715 | chan->rx_irq); | |
1716 | devm_free_irq(pdma->dev, pdma->err_irq, pdma); | |
1717 | ||
1718 | for (j = 0; j < i; j++) { | |
1719 | chan = &pdma->chan[i]; | |
1720 | devm_free_irq(chan->dev, chan->rx_irq, chan); | |
1721 | } | |
1722 | ||
1723 | return ret; | |
1724 | } | |
1725 | } | |
1726 | ||
1727 | return 0; | |
1728 | } | |
1729 | ||
1730 | static void xgene_dma_free_irqs(struct xgene_dma *pdma) | |
1731 | { | |
1732 | struct xgene_dma_chan *chan; | |
1733 | int i; | |
1734 | ||
1735 | /* Free DMA device error irq */ | |
1736 | devm_free_irq(pdma->dev, pdma->err_irq, pdma); | |
1737 | ||
1738 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | |
1739 | chan = &pdma->chan[i]; | |
1740 | devm_free_irq(chan->dev, chan->rx_irq, chan); | |
1741 | } | |
1742 | } | |
1743 | ||
1744 | static void xgene_dma_set_caps(struct xgene_dma_chan *chan, | |
1745 | struct dma_device *dma_dev) | |
1746 | { | |
1747 | /* Initialize DMA device capability mask */ | |
1748 | dma_cap_zero(dma_dev->cap_mask); | |
1749 | ||
1750 | /* Set DMA device capability */ | |
1751 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | |
1752 | dma_cap_set(DMA_SG, dma_dev->cap_mask); | |
1753 | ||
1754 | /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR | |
1755 | * and channel 1 supports XOR, PQ both. First thing here is we have | |
1756 | * mechanism in hw to enable/disable PQ/XOR supports on channel 1, | |
1757 | * we can make sure this by reading SoC Efuse register. | |
1758 | * Second thing, we have hw errata that if we run channel 0 and | |
1759 | * channel 1 simultaneously with executing XOR and PQ request, | |
1760 | * suddenly DMA engine hangs, So here we enable XOR on channel 0 only | |
1761 | * if XOR and PQ supports on channel 1 is disabled. | |
1762 | */ | |
1763 | if ((chan->id == XGENE_DMA_PQ_CHANNEL) && | |
1764 | is_pq_enabled(chan->pdma)) { | |
1765 | dma_cap_set(DMA_PQ, dma_dev->cap_mask); | |
1766 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); | |
1767 | } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) && | |
1768 | !is_pq_enabled(chan->pdma)) { | |
1769 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); | |
1770 | } | |
1771 | ||
1772 | /* Set base and prep routines */ | |
1773 | dma_dev->dev = chan->dev; | |
1774 | dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources; | |
1775 | dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; | |
1776 | dma_dev->device_issue_pending = xgene_dma_issue_pending; | |
1777 | dma_dev->device_tx_status = xgene_dma_tx_status; | |
1778 | dma_dev->device_prep_dma_memcpy = xgene_dma_prep_memcpy; | |
1779 | dma_dev->device_prep_dma_sg = xgene_dma_prep_sg; | |
1780 | ||
1781 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | |
1782 | dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; | |
1783 | dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC; | |
1784 | dma_dev->xor_align = XGENE_DMA_XOR_ALIGNMENT; | |
1785 | } | |
1786 | ||
1787 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { | |
1788 | dma_dev->device_prep_dma_pq = xgene_dma_prep_pq; | |
1789 | dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC; | |
1790 | dma_dev->pq_align = XGENE_DMA_XOR_ALIGNMENT; | |
1791 | } | |
1792 | } | |
1793 | ||
1794 | static int xgene_dma_async_register(struct xgene_dma *pdma, int id) | |
1795 | { | |
1796 | struct xgene_dma_chan *chan = &pdma->chan[id]; | |
1797 | struct dma_device *dma_dev = &pdma->dma_dev[id]; | |
1798 | int ret; | |
1799 | ||
1800 | chan->dma_chan.device = dma_dev; | |
1801 | ||
1802 | spin_lock_init(&chan->lock); | |
1803 | INIT_LIST_HEAD(&chan->ld_pending); | |
1804 | INIT_LIST_HEAD(&chan->ld_running); | |
1805 | INIT_LIST_HEAD(&chan->ld_completed); | |
1806 | tasklet_init(&chan->tasklet, xgene_dma_tasklet_cb, | |
1807 | (unsigned long)chan); | |
1808 | ||
1809 | chan->pending = 0; | |
1810 | chan->desc_pool = NULL; | |
1811 | dma_cookie_init(&chan->dma_chan); | |
1812 | ||
1813 | /* Setup dma device capabilities and prep routines */ | |
1814 | xgene_dma_set_caps(chan, dma_dev); | |
1815 | ||
1816 | /* Initialize DMA device list head */ | |
1817 | INIT_LIST_HEAD(&dma_dev->channels); | |
1818 | list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels); | |
1819 | ||
1820 | /* Register with Linux async DMA framework*/ | |
1821 | ret = dma_async_device_register(dma_dev); | |
1822 | if (ret) { | |
1823 | chan_err(chan, "Failed to register async device %d", ret); | |
1824 | tasklet_kill(&chan->tasklet); | |
1825 | ||
1826 | return ret; | |
1827 | } | |
1828 | ||
1829 | /* DMA capability info */ | |
1830 | dev_info(pdma->dev, | |
1831 | "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan), | |
1832 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "", | |
1833 | dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "", | |
1834 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", | |
1835 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); | |
1836 | ||
1837 | return 0; | |
1838 | } | |
1839 | ||
1840 | static int xgene_dma_init_async(struct xgene_dma *pdma) | |
1841 | { | |
1842 | int ret, i, j; | |
1843 | ||
1844 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL ; i++) { | |
1845 | ret = xgene_dma_async_register(pdma, i); | |
1846 | if (ret) { | |
1847 | for (j = 0; j < i; j++) { | |
1848 | dma_async_device_unregister(&pdma->dma_dev[j]); | |
1849 | tasklet_kill(&pdma->chan[j].tasklet); | |
1850 | } | |
1851 | ||
1852 | return ret; | |
1853 | } | |
1854 | } | |
1855 | ||
1856 | return ret; | |
1857 | } | |
1858 | ||
1859 | static void xgene_dma_async_unregister(struct xgene_dma *pdma) | |
1860 | { | |
1861 | int i; | |
1862 | ||
1863 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) | |
1864 | dma_async_device_unregister(&pdma->dma_dev[i]); | |
1865 | } | |
1866 | ||
1867 | static void xgene_dma_init_channels(struct xgene_dma *pdma) | |
1868 | { | |
1869 | struct xgene_dma_chan *chan; | |
1870 | int i; | |
1871 | ||
1872 | pdma->ring_num = XGENE_DMA_RING_NUM; | |
1873 | ||
1874 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | |
1875 | chan = &pdma->chan[i]; | |
1876 | chan->dev = pdma->dev; | |
1877 | chan->pdma = pdma; | |
1878 | chan->id = i; | |
1879 | sprintf(chan->name, "dmachan%d", chan->id); | |
1880 | } | |
1881 | } | |
1882 | ||
1883 | static int xgene_dma_get_resources(struct platform_device *pdev, | |
1884 | struct xgene_dma *pdma) | |
1885 | { | |
1886 | struct resource *res; | |
1887 | int irq, i; | |
1888 | ||
1889 | /* Get DMA csr region */ | |
1890 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1891 | if (!res) { | |
1892 | dev_err(&pdev->dev, "Failed to get csr region\n"); | |
1893 | return -ENXIO; | |
1894 | } | |
1895 | ||
1896 | pdma->csr_dma = devm_ioremap(&pdev->dev, res->start, | |
1897 | resource_size(res)); | |
1898 | if (IS_ERR(pdma->csr_dma)) { | |
1899 | dev_err(&pdev->dev, "Failed to ioremap csr region"); | |
1900 | return PTR_ERR(pdma->csr_dma); | |
1901 | } | |
1902 | ||
1903 | /* Get DMA ring csr region */ | |
1904 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
1905 | if (!res) { | |
1906 | dev_err(&pdev->dev, "Failed to get ring csr region\n"); | |
1907 | return -ENXIO; | |
1908 | } | |
1909 | ||
1910 | pdma->csr_ring = devm_ioremap(&pdev->dev, res->start, | |
1911 | resource_size(res)); | |
1912 | if (IS_ERR(pdma->csr_ring)) { | |
1913 | dev_err(&pdev->dev, "Failed to ioremap ring csr region"); | |
1914 | return PTR_ERR(pdma->csr_ring); | |
1915 | } | |
1916 | ||
1917 | /* Get DMA ring cmd csr region */ | |
1918 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | |
1919 | if (!res) { | |
1920 | dev_err(&pdev->dev, "Failed to get ring cmd csr region\n"); | |
1921 | return -ENXIO; | |
1922 | } | |
1923 | ||
1924 | pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start, | |
1925 | resource_size(res)); | |
1926 | if (IS_ERR(pdma->csr_ring_cmd)) { | |
1927 | dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region"); | |
1928 | return PTR_ERR(pdma->csr_ring_cmd); | |
1929 | } | |
1930 | ||
1931 | /* Get efuse csr region */ | |
1932 | res = platform_get_resource(pdev, IORESOURCE_MEM, 3); | |
1933 | if (!res) { | |
1934 | dev_err(&pdev->dev, "Failed to get efuse csr region\n"); | |
1935 | return -ENXIO; | |
1936 | } | |
1937 | ||
1938 | pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start, | |
1939 | resource_size(res)); | |
1940 | if (IS_ERR(pdma->csr_efuse)) { | |
1941 | dev_err(&pdev->dev, "Failed to ioremap efuse csr region"); | |
1942 | return PTR_ERR(pdma->csr_efuse); | |
1943 | } | |
1944 | ||
1945 | /* Get DMA error interrupt */ | |
1946 | irq = platform_get_irq(pdev, 0); | |
1947 | if (irq <= 0) { | |
1948 | dev_err(&pdev->dev, "Failed to get Error IRQ\n"); | |
1949 | return -ENXIO; | |
1950 | } | |
1951 | ||
1952 | pdma->err_irq = irq; | |
1953 | ||
1954 | /* Get DMA Rx ring descriptor interrupts for all DMA channels */ | |
1955 | for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) { | |
1956 | irq = platform_get_irq(pdev, i); | |
1957 | if (irq <= 0) { | |
1958 | dev_err(&pdev->dev, "Failed to get Rx IRQ\n"); | |
1959 | return -ENXIO; | |
1960 | } | |
1961 | ||
1962 | pdma->chan[i - 1].rx_irq = irq; | |
1963 | } | |
1964 | ||
1965 | return 0; | |
1966 | } | |
1967 | ||
1968 | static int xgene_dma_probe(struct platform_device *pdev) | |
1969 | { | |
1970 | struct xgene_dma *pdma; | |
1971 | int ret, i; | |
1972 | ||
1973 | pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL); | |
1974 | if (!pdma) | |
1975 | return -ENOMEM; | |
1976 | ||
1977 | pdma->dev = &pdev->dev; | |
1978 | platform_set_drvdata(pdev, pdma); | |
1979 | ||
1980 | ret = xgene_dma_get_resources(pdev, pdma); | |
1981 | if (ret) | |
1982 | return ret; | |
1983 | ||
1984 | pdma->clk = devm_clk_get(&pdev->dev, NULL); | |
1985 | if (IS_ERR(pdma->clk)) { | |
1986 | dev_err(&pdev->dev, "Failed to get clk\n"); | |
1987 | return PTR_ERR(pdma->clk); | |
1988 | } | |
1989 | ||
1990 | /* Enable clk before accessing registers */ | |
1991 | ret = clk_prepare_enable(pdma->clk); | |
1992 | if (ret) { | |
1993 | dev_err(&pdev->dev, "Failed to enable clk %d\n", ret); | |
1994 | return ret; | |
1995 | } | |
1996 | ||
1997 | /* Remove DMA RAM out of shutdown */ | |
1998 | ret = xgene_dma_init_mem(pdma); | |
1999 | if (ret) | |
2000 | goto err_clk_enable; | |
2001 | ||
2002 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42)); | |
2003 | if (ret) { | |
2004 | dev_err(&pdev->dev, "No usable DMA configuration\n"); | |
2005 | goto err_dma_mask; | |
2006 | } | |
2007 | ||
2008 | /* Initialize DMA channels software state */ | |
2009 | xgene_dma_init_channels(pdma); | |
2010 | ||
2011 | /* Configue DMA rings */ | |
2012 | ret = xgene_dma_init_rings(pdma); | |
2013 | if (ret) | |
2014 | goto err_clk_enable; | |
2015 | ||
2016 | ret = xgene_dma_request_irqs(pdma); | |
2017 | if (ret) | |
2018 | goto err_request_irq; | |
2019 | ||
2020 | /* Configure and enable DMA engine */ | |
2021 | xgene_dma_init_hw(pdma); | |
2022 | ||
2023 | /* Register DMA device with linux async framework */ | |
2024 | ret = xgene_dma_init_async(pdma); | |
2025 | if (ret) | |
2026 | goto err_async_init; | |
2027 | ||
2028 | return 0; | |
2029 | ||
2030 | err_async_init: | |
2031 | xgene_dma_free_irqs(pdma); | |
2032 | ||
2033 | err_request_irq: | |
2034 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) | |
2035 | xgene_dma_delete_chan_rings(&pdma->chan[i]); | |
2036 | ||
2037 | err_dma_mask: | |
2038 | err_clk_enable: | |
2039 | clk_disable_unprepare(pdma->clk); | |
2040 | ||
2041 | return ret; | |
2042 | } | |
2043 | ||
2044 | static int xgene_dma_remove(struct platform_device *pdev) | |
2045 | { | |
2046 | struct xgene_dma *pdma = platform_get_drvdata(pdev); | |
2047 | struct xgene_dma_chan *chan; | |
2048 | int i; | |
2049 | ||
2050 | xgene_dma_async_unregister(pdma); | |
2051 | ||
2052 | /* Mask interrupts and disable DMA engine */ | |
2053 | xgene_dma_mask_interrupts(pdma); | |
2054 | xgene_dma_disable(pdma); | |
2055 | xgene_dma_free_irqs(pdma); | |
2056 | ||
2057 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | |
2058 | chan = &pdma->chan[i]; | |
2059 | tasklet_kill(&chan->tasklet); | |
2060 | xgene_dma_delete_chan_rings(chan); | |
2061 | } | |
2062 | ||
2063 | clk_disable_unprepare(pdma->clk); | |
2064 | ||
2065 | return 0; | |
2066 | } | |
2067 | ||
2068 | static const struct of_device_id xgene_dma_of_match_ptr[] = { | |
2069 | {.compatible = "apm,xgene-storm-dma",}, | |
2070 | {}, | |
2071 | }; | |
2072 | MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr); | |
2073 | ||
2074 | static struct platform_driver xgene_dma_driver = { | |
2075 | .probe = xgene_dma_probe, | |
2076 | .remove = xgene_dma_remove, | |
2077 | .driver = { | |
2078 | .name = "X-Gene-DMA", | |
2079 | .owner = THIS_MODULE, | |
2080 | .of_match_table = xgene_dma_of_match_ptr, | |
2081 | }, | |
2082 | }; | |
2083 | ||
2084 | module_platform_driver(xgene_dma_driver); | |
2085 | ||
2086 | MODULE_DESCRIPTION("APM X-Gene SoC DMA driver"); | |
2087 | MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>"); | |
2088 | MODULE_AUTHOR("Loc Ho <lho@apm.com>"); | |
2089 | MODULE_LICENSE("GPL"); | |
2090 | MODULE_VERSION("1.0"); |