Commit | Line | Data |
---|---|---|
914426c8 VK |
1 | /* |
2 | * skl-sst-cldma.c - Code Loader DMA handler | |
3 | * | |
4 | * Copyright (C) 2015, Intel Corporation. | |
5 | * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com> | |
6 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as version 2, as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but | |
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | */ | |
17 | ||
18 | #include <linux/device.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/kthread.h> | |
21 | #include "../common/sst-dsp.h" | |
22 | #include "../common/sst-dsp-priv.h" | |
23 | ||
24 | static void skl_cldma_int_enable(struct sst_dsp *ctx) | |
25 | { | |
26 | sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC, | |
27 | SKL_ADSPIC_CL_DMA, SKL_ADSPIC_CL_DMA); | |
28 | } | |
29 | ||
30 | void skl_cldma_int_disable(struct sst_dsp *ctx) | |
31 | { | |
32 | sst_dsp_shim_update_bits_unlocked(ctx, | |
33 | SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, 0); | |
34 | } | |
35 | ||
36 | /* Code loader helper APIs */ | |
37 | static void skl_cldma_setup_bdle(struct sst_dsp *ctx, | |
38 | struct snd_dma_buffer *dmab_data, | |
39 | u32 **bdlp, int size, int with_ioc) | |
40 | { | |
41 | u32 *bdl = *bdlp; | |
42 | ||
43 | ctx->cl_dev.frags = 0; | |
44 | while (size > 0) { | |
45 | phys_addr_t addr = virt_to_phys(dmab_data->area + | |
46 | (ctx->cl_dev.frags * ctx->cl_dev.bufsize)); | |
47 | ||
48 | bdl[0] = cpu_to_le32(lower_32_bits(addr)); | |
49 | bdl[1] = cpu_to_le32(upper_32_bits(addr)); | |
50 | ||
51 | bdl[2] = cpu_to_le32(ctx->cl_dev.bufsize); | |
52 | ||
53 | size -= ctx->cl_dev.bufsize; | |
54 | bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01); | |
55 | ||
56 | bdl += 4; | |
57 | ctx->cl_dev.frags++; | |
58 | } | |
59 | } | |
60 | ||
61 | /* | |
62 | * Setup controller | |
63 | * Configure the registers to update the dma buffer address and | |
64 | * enable interrupts. | |
65 | * Note: Using the channel 1 for transfer | |
66 | */ | |
67 | static void skl_cldma_setup_controller(struct sst_dsp *ctx, | |
68 | struct snd_dma_buffer *dmab_bdl, unsigned int max_size, | |
69 | u32 count) | |
70 | { | |
71 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, | |
72 | CL_SD_BDLPLBA(dmab_bdl->addr)); | |
73 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, | |
74 | CL_SD_BDLPUBA(dmab_bdl->addr)); | |
75 | ||
76 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, max_size); | |
77 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, count - 1); | |
78 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, | |
79 | CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(1)); | |
80 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, | |
81 | CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(1)); | |
82 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, | |
83 | CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(1)); | |
84 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, | |
85 | CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER)); | |
86 | } | |
87 | ||
88 | static void skl_cldma_setup_spb(struct sst_dsp *ctx, | |
89 | unsigned int size, bool enable) | |
90 | { | |
91 | if (enable) | |
92 | sst_dsp_shim_update_bits_unlocked(ctx, | |
93 | SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL, | |
94 | CL_SPBFIFO_SPBFCCTL_SPIBE_MASK, | |
95 | CL_SPBFIFO_SPBFCCTL_SPIBE(1)); | |
96 | ||
97 | sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, size); | |
98 | } | |
99 | ||
100 | static void skl_cldma_cleanup_spb(struct sst_dsp *ctx) | |
101 | { | |
102 | sst_dsp_shim_update_bits_unlocked(ctx, | |
103 | SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL, | |
104 | CL_SPBFIFO_SPBFCCTL_SPIBE_MASK, | |
105 | CL_SPBFIFO_SPBFCCTL_SPIBE(0)); | |
106 | ||
107 | sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, 0); | |
108 | } | |
109 | ||
110 | static void skl_cldma_trigger(struct sst_dsp *ctx, bool enable) | |
111 | { | |
112 | if (enable) | |
113 | sst_dsp_shim_update_bits_unlocked(ctx, | |
114 | SKL_ADSP_REG_CL_SD_CTL, | |
115 | CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(1)); | |
116 | else | |
117 | sst_dsp_shim_update_bits_unlocked(ctx, | |
118 | SKL_ADSP_REG_CL_SD_CTL, | |
119 | CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(0)); | |
120 | } | |
121 | ||
122 | static void skl_cldma_cleanup(struct sst_dsp *ctx) | |
123 | { | |
124 | skl_cldma_cleanup_spb(ctx); | |
125 | ||
126 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, | |
127 | CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(0)); | |
128 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, | |
129 | CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(0)); | |
130 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, | |
131 | CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(0)); | |
132 | sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, | |
133 | CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(0)); | |
134 | ||
135 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(0)); | |
136 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, 0); | |
137 | ||
138 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, 0); | |
139 | sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, 0); | |
140 | } | |
141 | ||
142 | static int skl_cldma_wait_interruptible(struct sst_dsp *ctx) | |
143 | { | |
144 | int ret = 0; | |
145 | ||
146 | if (!wait_event_timeout(ctx->cl_dev.wait_queue, | |
147 | ctx->cl_dev.wait_condition, | |
148 | msecs_to_jiffies(SKL_WAIT_TIMEOUT))) { | |
149 | dev_err(ctx->dev, "%s: Wait timeout\n", __func__); | |
150 | ret = -EIO; | |
151 | goto cleanup; | |
152 | } | |
153 | ||
154 | dev_dbg(ctx->dev, "%s: Event wake\n", __func__); | |
155 | if (ctx->cl_dev.wake_status != SKL_CL_DMA_BUF_COMPLETE) { | |
156 | dev_err(ctx->dev, "%s: DMA Error\n", __func__); | |
157 | ret = -EIO; | |
158 | } | |
159 | ||
160 | cleanup: | |
161 | ctx->cl_dev.wake_status = SKL_CL_DMA_STATUS_NONE; | |
162 | return ret; | |
163 | } | |
164 | ||
165 | static void skl_cldma_stop(struct sst_dsp *ctx) | |
166 | { | |
167 | ctx->cl_dev.ops.cl_trigger(ctx, false); | |
168 | } | |
169 | ||
170 | static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size, | |
171 | const void *curr_pos, bool intr_enable, bool trigger) | |
172 | { | |
173 | dev_dbg(ctx->dev, "Size: %x, intr_enable: %d\n", size, intr_enable); | |
174 | dev_dbg(ctx->dev, "buf_pos_index:%d, trigger:%d\n", | |
175 | ctx->cl_dev.dma_buffer_offset, trigger); | |
176 | dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos); | |
177 | ||
178 | memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset, | |
179 | curr_pos, size); | |
180 | ||
181 | if (ctx->cl_dev.curr_spib_pos == ctx->cl_dev.bufsize) | |
182 | ctx->cl_dev.dma_buffer_offset = 0; | |
183 | else | |
184 | ctx->cl_dev.dma_buffer_offset = ctx->cl_dev.curr_spib_pos; | |
185 | ||
186 | ctx->cl_dev.wait_condition = false; | |
187 | ||
188 | if (intr_enable) | |
189 | skl_cldma_int_enable(ctx); | |
190 | ||
191 | ctx->cl_dev.ops.cl_setup_spb(ctx, ctx->cl_dev.curr_spib_pos, trigger); | |
192 | if (trigger) | |
193 | ctx->cl_dev.ops.cl_trigger(ctx, true); | |
194 | } | |
3e40a784 VK |
195 | |
196 | /* | |
197 | * The CL dma doesn't have any way to update the transfer status until a BDL | |
198 | * buffer is fully transferred | |
199 | * | |
200 | * So Copying is divided in two parts. | |
201 | * 1. Interrupt on buffer done where the size to be transferred is more than | |
202 | * ring buffer size. | |
203 | * 2. Polling on fw register to identify if data left to transferred doesn't | |
204 | * fill the ring buffer. Caller takes care of polling the required status | |
205 | * register to identify the transfer status. | |
206 | */ | |
207 | static int | |
208 | skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size) | |
209 | { | |
210 | int ret = 0; | |
211 | bool start = true; | |
212 | unsigned int excess_bytes; | |
213 | u32 size; | |
214 | unsigned int bytes_left = total_size; | |
215 | const void *curr_pos = bin; | |
216 | ||
217 | if (total_size <= 0) | |
218 | return -EINVAL; | |
219 | ||
220 | dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left); | |
221 | ||
222 | while (bytes_left) { | |
223 | if (bytes_left > ctx->cl_dev.bufsize) { | |
224 | ||
225 | /* | |
226 | * dma transfers only till the write pointer as | |
227 | * updated in spib | |
228 | */ | |
229 | if (ctx->cl_dev.curr_spib_pos == 0) | |
230 | ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize; | |
231 | ||
232 | size = ctx->cl_dev.bufsize; | |
233 | skl_cldma_fill_buffer(ctx, size, curr_pos, true, start); | |
234 | ||
235 | start = false; | |
236 | ret = skl_cldma_wait_interruptible(ctx); | |
237 | if (ret < 0) { | |
238 | skl_cldma_stop(ctx); | |
239 | return ret; | |
240 | } | |
241 | ||
242 | } else { | |
243 | skl_cldma_int_disable(ctx); | |
244 | ||
245 | if ((ctx->cl_dev.curr_spib_pos + bytes_left) | |
246 | <= ctx->cl_dev.bufsize) { | |
247 | ctx->cl_dev.curr_spib_pos += bytes_left; | |
248 | } else { | |
249 | excess_bytes = bytes_left - | |
250 | (ctx->cl_dev.bufsize - | |
251 | ctx->cl_dev.curr_spib_pos); | |
252 | ctx->cl_dev.curr_spib_pos = excess_bytes; | |
253 | } | |
254 | ||
255 | size = bytes_left; | |
256 | skl_cldma_fill_buffer(ctx, size, | |
257 | curr_pos, false, start); | |
258 | } | |
259 | bytes_left -= size; | |
260 | curr_pos = curr_pos + size; | |
261 | } | |
262 | ||
263 | return ret; | |
264 | } | |
265 | ||
266 | void skl_cldma_process_intr(struct sst_dsp *ctx) | |
267 | { | |
268 | u8 cl_dma_intr_status; | |
269 | ||
270 | cl_dma_intr_status = | |
271 | sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS); | |
272 | ||
273 | if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE)) | |
274 | ctx->cl_dev.wake_status = SKL_CL_DMA_ERR; | |
275 | else | |
276 | ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE; | |
277 | ||
278 | ctx->cl_dev.wait_condition = true; | |
279 | wake_up(&ctx->cl_dev.wait_queue); | |
280 | } | |
281 | ||
282 | int skl_cldma_prepare(struct sst_dsp *ctx) | |
283 | { | |
284 | int ret; | |
285 | u32 *bdl; | |
286 | ||
287 | ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE; | |
288 | ||
289 | /* Allocate cl ops */ | |
290 | ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle; | |
291 | ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller; | |
292 | ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb; | |
293 | ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb; | |
294 | ctx->cl_dev.ops.cl_trigger = skl_cldma_trigger; | |
295 | ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup; | |
296 | ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf; | |
297 | ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop; | |
298 | ||
299 | /* Allocate buffer*/ | |
300 | ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev, | |
301 | &ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize); | |
302 | if (ret < 0) { | |
303 | dev_err(ctx->dev, "Alloc buffer for base fw failed: %x", ret); | |
304 | return ret; | |
305 | } | |
306 | /* Setup Code loader BDL */ | |
307 | ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev, | |
308 | &ctx->cl_dev.dmab_bdl, PAGE_SIZE); | |
309 | if (ret < 0) { | |
310 | dev_err(ctx->dev, "Alloc buffer for blde failed: %x", ret); | |
311 | ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data); | |
312 | return ret; | |
313 | } | |
314 | bdl = (u32 *)ctx->cl_dev.dmab_bdl.area; | |
315 | ||
316 | /* Allocate BDLs */ | |
317 | ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data, | |
318 | &bdl, ctx->cl_dev.bufsize, 1); | |
319 | ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl, | |
320 | ctx->cl_dev.bufsize, ctx->cl_dev.frags); | |
321 | ||
322 | ctx->cl_dev.curr_spib_pos = 0; | |
323 | ctx->cl_dev.dma_buffer_offset = 0; | |
324 | init_waitqueue_head(&ctx->cl_dev.wait_queue); | |
325 | ||
326 | return ret; | |
327 | } |