ASoc: Intel: boards: Add HDMI/DP links for nau88l25_ssm4567 machine
[deliverable/linux.git] / sound / soc / intel / skylake / skl-sst-cldma.c
CommitLineData
914426c8
VK
1/*
2 * skl-sst-cldma.c - Code Loader DMA handler
3 *
4 * Copyright (C) 2015, Intel Corporation.
5 * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as version 2, as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <linux/device.h>
19#include <linux/mm.h>
20#include <linux/kthread.h>
2434caf0 21#include <linux/delay.h>
914426c8
VK
22#include "../common/sst-dsp.h"
23#include "../common/sst-dsp-priv.h"
24
25static void skl_cldma_int_enable(struct sst_dsp *ctx)
26{
27 sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
28 SKL_ADSPIC_CL_DMA, SKL_ADSPIC_CL_DMA);
29}
30
31void skl_cldma_int_disable(struct sst_dsp *ctx)
32{
33 sst_dsp_shim_update_bits_unlocked(ctx,
34 SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, 0);
35}
36
2434caf0
JK
37static void skl_cldma_stream_run(struct sst_dsp *ctx, bool enable)
38{
39 unsigned char val;
40 int timeout;
41
42 sst_dsp_shim_update_bits_unlocked(ctx,
43 SKL_ADSP_REG_CL_SD_CTL,
44 CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(enable));
45
46 udelay(3);
47 timeout = 300;
48 do {
49 /* waiting for hardware to report that the stream Run bit set */
50 val = sst_dsp_shim_read(ctx, SKL_ADSP_REG_CL_SD_CTL) &
51 CL_SD_CTL_RUN_MASK;
52 if (enable && val)
53 break;
54 else if (!enable && !val)
55 break;
56 udelay(3);
57 } while (--timeout);
58
59 if (timeout == 0)
60 dev_err(ctx->dev, "Failed to set Run bit=%d enable=%d\n", val, enable);
61}
62
914426c8
VK
63/* Code loader helper APIs */
64static void skl_cldma_setup_bdle(struct sst_dsp *ctx,
65 struct snd_dma_buffer *dmab_data,
66 u32 **bdlp, int size, int with_ioc)
67{
68 u32 *bdl = *bdlp;
69
70 ctx->cl_dev.frags = 0;
71 while (size > 0) {
72 phys_addr_t addr = virt_to_phys(dmab_data->area +
73 (ctx->cl_dev.frags * ctx->cl_dev.bufsize));
74
75 bdl[0] = cpu_to_le32(lower_32_bits(addr));
76 bdl[1] = cpu_to_le32(upper_32_bits(addr));
77
78 bdl[2] = cpu_to_le32(ctx->cl_dev.bufsize);
79
80 size -= ctx->cl_dev.bufsize;
81 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
82
83 bdl += 4;
84 ctx->cl_dev.frags++;
85 }
86}
87
88/*
89 * Setup controller
90 * Configure the registers to update the dma buffer address and
91 * enable interrupts.
92 * Note: Using the channel 1 for transfer
93 */
94static void skl_cldma_setup_controller(struct sst_dsp *ctx,
95 struct snd_dma_buffer *dmab_bdl, unsigned int max_size,
96 u32 count)
97{
98 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL,
99 CL_SD_BDLPLBA(dmab_bdl->addr));
100 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU,
101 CL_SD_BDLPUBA(dmab_bdl->addr));
102
103 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, max_size);
104 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, count - 1);
105 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
106 CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(1));
107 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
108 CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(1));
109 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
110 CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(1));
111 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
112 CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER));
113}
114
115static void skl_cldma_setup_spb(struct sst_dsp *ctx,
116 unsigned int size, bool enable)
117{
118 if (enable)
119 sst_dsp_shim_update_bits_unlocked(ctx,
120 SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
121 CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
122 CL_SPBFIFO_SPBFCCTL_SPIBE(1));
123
124 sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, size);
125}
126
127static void skl_cldma_cleanup_spb(struct sst_dsp *ctx)
128{
129 sst_dsp_shim_update_bits_unlocked(ctx,
130 SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
131 CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
132 CL_SPBFIFO_SPBFCCTL_SPIBE(0));
133
134 sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, 0);
135}
136
914426c8
VK
137static void skl_cldma_cleanup(struct sst_dsp *ctx)
138{
139 skl_cldma_cleanup_spb(ctx);
140
141 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
142 CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(0));
143 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
144 CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(0));
145 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
146 CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(0));
147 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
148 CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(0));
149
150 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(0));
151 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, 0);
152
153 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, 0);
154 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, 0);
ae395937 155
cd470fae
VK
156 ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
157 ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_bdl);
914426c8
VK
158}
159
160static int skl_cldma_wait_interruptible(struct sst_dsp *ctx)
161{
162 int ret = 0;
163
164 if (!wait_event_timeout(ctx->cl_dev.wait_queue,
165 ctx->cl_dev.wait_condition,
166 msecs_to_jiffies(SKL_WAIT_TIMEOUT))) {
167 dev_err(ctx->dev, "%s: Wait timeout\n", __func__);
168 ret = -EIO;
169 goto cleanup;
170 }
171
172 dev_dbg(ctx->dev, "%s: Event wake\n", __func__);
173 if (ctx->cl_dev.wake_status != SKL_CL_DMA_BUF_COMPLETE) {
174 dev_err(ctx->dev, "%s: DMA Error\n", __func__);
175 ret = -EIO;
176 }
177
178cleanup:
179 ctx->cl_dev.wake_status = SKL_CL_DMA_STATUS_NONE;
180 return ret;
181}
182
183static void skl_cldma_stop(struct sst_dsp *ctx)
184{
2434caf0 185 skl_cldma_stream_run(ctx, false);
914426c8
VK
186}
187
188static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size,
189 const void *curr_pos, bool intr_enable, bool trigger)
190{
191 dev_dbg(ctx->dev, "Size: %x, intr_enable: %d\n", size, intr_enable);
192 dev_dbg(ctx->dev, "buf_pos_index:%d, trigger:%d\n",
193 ctx->cl_dev.dma_buffer_offset, trigger);
194 dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos);
195
e797af53
JK
196 /*
197 * Check if the size exceeds buffer boundary. If it exceeds
198 * max_buffer size, then copy till buffer size and then copy
199 * remaining buffer from the start of ring buffer.
200 */
201 if (ctx->cl_dev.dma_buffer_offset + size > ctx->cl_dev.bufsize) {
202 unsigned int size_b = ctx->cl_dev.bufsize -
203 ctx->cl_dev.dma_buffer_offset;
204 memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
205 curr_pos, size_b);
206 size -= size_b;
207 curr_pos += size_b;
208 ctx->cl_dev.dma_buffer_offset = 0;
209 }
210
914426c8
VK
211 memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
212 curr_pos, size);
213
214 if (ctx->cl_dev.curr_spib_pos == ctx->cl_dev.bufsize)
215 ctx->cl_dev.dma_buffer_offset = 0;
216 else
217 ctx->cl_dev.dma_buffer_offset = ctx->cl_dev.curr_spib_pos;
218
219 ctx->cl_dev.wait_condition = false;
220
221 if (intr_enable)
222 skl_cldma_int_enable(ctx);
223
224 ctx->cl_dev.ops.cl_setup_spb(ctx, ctx->cl_dev.curr_spib_pos, trigger);
225 if (trigger)
226 ctx->cl_dev.ops.cl_trigger(ctx, true);
227}
3e40a784
VK
228
229/*
230 * The CL dma doesn't have any way to update the transfer status until a BDL
231 * buffer is fully transferred
232 *
233 * So Copying is divided in two parts.
234 * 1. Interrupt on buffer done where the size to be transferred is more than
235 * ring buffer size.
236 * 2. Polling on fw register to identify if data left to transferred doesn't
237 * fill the ring buffer. Caller takes care of polling the required status
238 * register to identify the transfer status.
239 */
240static int
241skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size)
242{
243 int ret = 0;
244 bool start = true;
245 unsigned int excess_bytes;
246 u32 size;
247 unsigned int bytes_left = total_size;
248 const void *curr_pos = bin;
249
250 if (total_size <= 0)
251 return -EINVAL;
252
253 dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left);
254
255 while (bytes_left) {
256 if (bytes_left > ctx->cl_dev.bufsize) {
257
258 /*
259 * dma transfers only till the write pointer as
260 * updated in spib
261 */
262 if (ctx->cl_dev.curr_spib_pos == 0)
263 ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize;
264
265 size = ctx->cl_dev.bufsize;
266 skl_cldma_fill_buffer(ctx, size, curr_pos, true, start);
267
268 start = false;
269 ret = skl_cldma_wait_interruptible(ctx);
270 if (ret < 0) {
271 skl_cldma_stop(ctx);
272 return ret;
273 }
274
275 } else {
276 skl_cldma_int_disable(ctx);
277
278 if ((ctx->cl_dev.curr_spib_pos + bytes_left)
279 <= ctx->cl_dev.bufsize) {
280 ctx->cl_dev.curr_spib_pos += bytes_left;
281 } else {
282 excess_bytes = bytes_left -
283 (ctx->cl_dev.bufsize -
284 ctx->cl_dev.curr_spib_pos);
285 ctx->cl_dev.curr_spib_pos = excess_bytes;
286 }
287
288 size = bytes_left;
289 skl_cldma_fill_buffer(ctx, size,
290 curr_pos, false, start);
291 }
292 bytes_left -= size;
293 curr_pos = curr_pos + size;
294 }
295
296 return ret;
297}
298
299void skl_cldma_process_intr(struct sst_dsp *ctx)
300{
301 u8 cl_dma_intr_status;
302
303 cl_dma_intr_status =
304 sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS);
305
306 if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE))
307 ctx->cl_dev.wake_status = SKL_CL_DMA_ERR;
308 else
309 ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE;
310
311 ctx->cl_dev.wait_condition = true;
312 wake_up(&ctx->cl_dev.wait_queue);
313}
314
315int skl_cldma_prepare(struct sst_dsp *ctx)
316{
317 int ret;
318 u32 *bdl;
319
320 ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE;
321
322 /* Allocate cl ops */
323 ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle;
324 ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller;
325 ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb;
326 ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb;
2434caf0 327 ctx->cl_dev.ops.cl_trigger = skl_cldma_stream_run;
3e40a784
VK
328 ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup;
329 ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf;
330 ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop;
331
332 /* Allocate buffer*/
333 ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
334 &ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize);
335 if (ret < 0) {
336 dev_err(ctx->dev, "Alloc buffer for base fw failed: %x", ret);
337 return ret;
338 }
339 /* Setup Code loader BDL */
340 ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
341 &ctx->cl_dev.dmab_bdl, PAGE_SIZE);
342 if (ret < 0) {
343 dev_err(ctx->dev, "Alloc buffer for blde failed: %x", ret);
344 ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
345 return ret;
346 }
347 bdl = (u32 *)ctx->cl_dev.dmab_bdl.area;
348
349 /* Allocate BDLs */
350 ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data,
351 &bdl, ctx->cl_dev.bufsize, 1);
352 ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl,
353 ctx->cl_dev.bufsize, ctx->cl_dev.frags);
354
355 ctx->cl_dev.curr_spib_pos = 0;
356 ctx->cl_dev.dma_buffer_offset = 0;
357 init_waitqueue_head(&ctx->cl_dev.wait_queue);
358
359 return ret;
360}
This page took 0.051693 seconds and 5 git commands to generate.