Merge branch 'topic/dai-link' of git://git.kernel.org/pub/scm/linux/kernel/git/brooni...
[deliverable/linux.git] / sound / soc / intel / skylake / skl-messages.c
1 /*
2 * skl-message.c - HDA DSP interface for FW registration, Pipe and Module
3 * configurations
4 *
5 * Copyright (C) 2015 Intel Corp
6 * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
7 * Jeeja KP <jeeja.kp@intel.com>
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as version 2, as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <sound/core.h>
23 #include <sound/pcm.h>
24 #include "skl-sst-dsp.h"
25 #include "skl-sst-ipc.h"
26 #include "skl.h"
27 #include "../common/sst-dsp.h"
28 #include "../common/sst-dsp-priv.h"
29 #include "skl-topology.h"
30 #include "skl-tplg-interface.h"
31
32 static int skl_alloc_dma_buf(struct device *dev,
33 struct snd_dma_buffer *dmab, size_t size)
34 {
35 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
36 struct hdac_bus *bus = ebus_to_hbus(ebus);
37
38 if (!bus)
39 return -ENODEV;
40
41 return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab);
42 }
43
44 static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
45 {
46 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
47 struct hdac_bus *bus = ebus_to_hbus(ebus);
48
49 if (!bus)
50 return -ENODEV;
51
52 bus->io_ops->dma_free_pages(bus, dmab);
53
54 return 0;
55 }
56
57 #define NOTIFICATION_PARAM_ID 3
58 #define NOTIFICATION_MASK 0xf
59
60 /* disable notfication for underruns/overruns from firmware module */
61 static void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable)
62 {
63 struct notification_mask mask;
64 struct skl_ipc_large_config_msg msg = {0};
65
66 mask.notify = NOTIFICATION_MASK;
67 mask.enable = enable;
68
69 msg.large_param_id = NOTIFICATION_PARAM_ID;
70 msg.param_data_size = sizeof(mask);
71
72 skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)&mask);
73 }
74
75 static int skl_dsp_setup_spib(struct device *dev, unsigned int size,
76 int stream_tag, int enable)
77 {
78 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
79 struct hdac_bus *bus = ebus_to_hbus(ebus);
80 struct hdac_stream *stream = snd_hdac_get_stream(bus,
81 SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
82 struct hdac_ext_stream *estream;
83
84 if (!stream)
85 return -EINVAL;
86
87 estream = stream_to_hdac_ext_stream(stream);
88 /* enable/disable SPIB for this hdac stream */
89 snd_hdac_ext_stream_spbcap_enable(ebus, enable, stream->index);
90
91 /* set the spib value */
92 snd_hdac_ext_stream_set_spib(ebus, estream, size);
93
94 return 0;
95 }
96
97 static int skl_dsp_prepare(struct device *dev, unsigned int format,
98 unsigned int size, struct snd_dma_buffer *dmab)
99 {
100 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
101 struct hdac_bus *bus = ebus_to_hbus(ebus);
102 struct hdac_ext_stream *estream;
103 struct hdac_stream *stream;
104 struct snd_pcm_substream substream;
105 int ret;
106
107 if (!bus)
108 return -ENODEV;
109
110 memset(&substream, 0, sizeof(substream));
111 substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
112
113 estream = snd_hdac_ext_stream_assign(ebus, &substream,
114 HDAC_EXT_STREAM_TYPE_HOST);
115 if (!estream)
116 return -ENODEV;
117
118 stream = hdac_stream(estream);
119
120 /* assign decouple host dma channel */
121 ret = snd_hdac_dsp_prepare(stream, format, size, dmab);
122 if (ret < 0)
123 return ret;
124
125 skl_dsp_setup_spib(dev, size, stream->stream_tag, true);
126
127 return stream->stream_tag;
128 }
129
130 static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag)
131 {
132 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
133 struct hdac_stream *stream;
134 struct hdac_bus *bus = ebus_to_hbus(ebus);
135
136 if (!bus)
137 return -ENODEV;
138
139 stream = snd_hdac_get_stream(bus,
140 SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
141 if (!stream)
142 return -EINVAL;
143
144 snd_hdac_dsp_trigger(stream, start);
145
146 return 0;
147 }
148
149 static int skl_dsp_cleanup(struct device *dev,
150 struct snd_dma_buffer *dmab, int stream_tag)
151 {
152 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
153 struct hdac_stream *stream;
154 struct hdac_ext_stream *estream;
155 struct hdac_bus *bus = ebus_to_hbus(ebus);
156
157 if (!bus)
158 return -ENODEV;
159
160 stream = snd_hdac_get_stream(bus,
161 SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
162 if (!stream)
163 return -EINVAL;
164
165 estream = stream_to_hdac_ext_stream(stream);
166 skl_dsp_setup_spib(dev, 0, stream_tag, false);
167 snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST);
168
169 snd_hdac_dsp_cleanup(stream, dmab);
170
171 return 0;
172 }
173
174 static struct skl_dsp_loader_ops skl_get_loader_ops(void)
175 {
176 struct skl_dsp_loader_ops loader_ops;
177
178 memset(&loader_ops, 0, sizeof(struct skl_dsp_loader_ops));
179
180 loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
181 loader_ops.free_dma_buf = skl_free_dma_buf;
182
183 return loader_ops;
184 };
185
186 static struct skl_dsp_loader_ops bxt_get_loader_ops(void)
187 {
188 struct skl_dsp_loader_ops loader_ops;
189
190 memset(&loader_ops, 0, sizeof(loader_ops));
191
192 loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
193 loader_ops.free_dma_buf = skl_free_dma_buf;
194 loader_ops.prepare = skl_dsp_prepare;
195 loader_ops.trigger = skl_dsp_trigger;
196 loader_ops.cleanup = skl_dsp_cleanup;
197
198 return loader_ops;
199 };
200
201 static const struct skl_dsp_ops dsp_ops[] = {
202 {
203 .id = 0x9d70,
204 .loader_ops = skl_get_loader_ops,
205 .init = skl_sst_dsp_init,
206 .cleanup = skl_sst_dsp_cleanup
207 },
208 {
209 .id = 0x5a98,
210 .loader_ops = bxt_get_loader_ops,
211 .init = bxt_sst_dsp_init,
212 .cleanup = bxt_sst_dsp_cleanup
213 },
214 };
215
216 static int skl_get_dsp_ops(int pci_id)
217 {
218 int i;
219
220 for (i = 0; i < ARRAY_SIZE(dsp_ops); i++) {
221 if (dsp_ops[i].id == pci_id)
222 return i;
223 }
224
225 return -EINVAL;
226 }
227
228 int skl_init_dsp(struct skl *skl)
229 {
230 void __iomem *mmio_base;
231 struct hdac_ext_bus *ebus = &skl->ebus;
232 struct hdac_bus *bus = ebus_to_hbus(ebus);
233 struct skl_dsp_loader_ops loader_ops;
234 int irq = bus->irq;
235 int ret, index;
236
237 /* enable ppcap interrupt */
238 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
239 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
240
241 /* read the BAR of the ADSP MMIO */
242 mmio_base = pci_ioremap_bar(skl->pci, 4);
243 if (mmio_base == NULL) {
244 dev_err(bus->dev, "ioremap error\n");
245 return -ENXIO;
246 }
247
248 index = skl_get_dsp_ops(skl->pci->device);
249 if (index < 0)
250 return -EINVAL;
251
252 loader_ops = dsp_ops[index].loader_ops();
253 ret = dsp_ops[index].init(bus->dev, mmio_base, irq,
254 skl->fw_name, loader_ops, &skl->skl_sst);
255
256 if (ret < 0)
257 return ret;
258
259 skl_dsp_enable_notification(skl->skl_sst, false);
260 dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
261
262 return ret;
263 }
264
265 int skl_free_dsp(struct skl *skl)
266 {
267 struct hdac_ext_bus *ebus = &skl->ebus;
268 struct hdac_bus *bus = ebus_to_hbus(ebus);
269 struct skl_sst *ctx = skl->skl_sst;
270 int index;
271
272 /* disable ppcap interrupt */
273 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
274
275 index = skl_get_dsp_ops(skl->pci->device);
276 if (index < 0)
277 return -EIO;
278
279 dsp_ops[index].cleanup(bus->dev, ctx);
280
281 if (ctx->dsp->addr.lpe)
282 iounmap(ctx->dsp->addr.lpe);
283
284 return 0;
285 }
286
287 int skl_suspend_dsp(struct skl *skl)
288 {
289 struct skl_sst *ctx = skl->skl_sst;
290 int ret;
291
292 /* if ppcap is not supported return 0 */
293 if (!skl->ebus.ppcap)
294 return 0;
295
296 ret = skl_dsp_sleep(ctx->dsp);
297 if (ret < 0)
298 return ret;
299
300 /* disable ppcap interrupt */
301 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
302 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false);
303
304 return 0;
305 }
306
307 int skl_resume_dsp(struct skl *skl)
308 {
309 struct skl_sst *ctx = skl->skl_sst;
310 int ret;
311
312 /* if ppcap is not supported return 0 */
313 if (!skl->ebus.ppcap)
314 return 0;
315
316 /* enable ppcap interrupt */
317 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
318 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
319
320 ret = skl_dsp_wake(ctx->dsp);
321 if (ret < 0)
322 return ret;
323
324 skl_dsp_enable_notification(skl->skl_sst, false);
325 return ret;
326 }
327
328 enum skl_bitdepth skl_get_bit_depth(int params)
329 {
330 switch (params) {
331 case 8:
332 return SKL_DEPTH_8BIT;
333
334 case 16:
335 return SKL_DEPTH_16BIT;
336
337 case 24:
338 return SKL_DEPTH_24BIT;
339
340 case 32:
341 return SKL_DEPTH_32BIT;
342
343 default:
344 return SKL_DEPTH_INVALID;
345
346 }
347 }
348
349 /*
350 * Each module in DSP expects a base module configuration, which consists of
351 * PCM format information, which we calculate in driver and resource values
352 * which are read from widget information passed through topology binary
353 * This is send when we create a module with INIT_INSTANCE IPC msg
354 */
355 static void skl_set_base_module_format(struct skl_sst *ctx,
356 struct skl_module_cfg *mconfig,
357 struct skl_base_cfg *base_cfg)
358 {
359 struct skl_module_fmt *format = &mconfig->in_fmt[0];
360
361 base_cfg->audio_fmt.number_of_channels = (u8)format->channels;
362
363 base_cfg->audio_fmt.s_freq = format->s_freq;
364 base_cfg->audio_fmt.bit_depth = format->bit_depth;
365 base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
366 base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
367
368 dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
369 format->bit_depth, format->valid_bit_depth,
370 format->ch_cfg);
371
372 base_cfg->audio_fmt.channel_map = format->ch_map;
373
374 base_cfg->audio_fmt.interleaving = format->interleaving_style;
375
376 base_cfg->cps = mconfig->mcps;
377 base_cfg->ibs = mconfig->ibs;
378 base_cfg->obs = mconfig->obs;
379 base_cfg->is_pages = mconfig->mem_pages;
380 }
381
382 /*
383 * Copies copier capabilities into copier module and updates copier module
384 * config size.
385 */
386 static void skl_copy_copier_caps(struct skl_module_cfg *mconfig,
387 struct skl_cpr_cfg *cpr_mconfig)
388 {
389 if (mconfig->formats_config.caps_size == 0)
390 return;
391
392 memcpy(cpr_mconfig->gtw_cfg.config_data,
393 mconfig->formats_config.caps,
394 mconfig->formats_config.caps_size);
395
396 cpr_mconfig->gtw_cfg.config_length =
397 (mconfig->formats_config.caps_size) / 4;
398 }
399
400 #define SKL_NON_GATEWAY_CPR_NODE_ID 0xFFFFFFFF
401 /*
402 * Calculate the gatewat settings required for copier module, type of
403 * gateway and index of gateway to use
404 */
405 static u32 skl_get_node_id(struct skl_sst *ctx,
406 struct skl_module_cfg *mconfig)
407 {
408 union skl_connector_node_id node_id = {0};
409 union skl_ssp_dma_node ssp_node = {0};
410 struct skl_pipe_params *params = mconfig->pipe->p_params;
411
412 switch (mconfig->dev_type) {
413 case SKL_DEVICE_BT:
414 node_id.node.dma_type =
415 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
416 SKL_DMA_I2S_LINK_OUTPUT_CLASS :
417 SKL_DMA_I2S_LINK_INPUT_CLASS;
418 node_id.node.vindex = params->host_dma_id +
419 (mconfig->vbus_id << 3);
420 break;
421
422 case SKL_DEVICE_I2S:
423 node_id.node.dma_type =
424 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
425 SKL_DMA_I2S_LINK_OUTPUT_CLASS :
426 SKL_DMA_I2S_LINK_INPUT_CLASS;
427 ssp_node.dma_node.time_slot_index = mconfig->time_slot;
428 ssp_node.dma_node.i2s_instance = mconfig->vbus_id;
429 node_id.node.vindex = ssp_node.val;
430 break;
431
432 case SKL_DEVICE_DMIC:
433 node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS;
434 node_id.node.vindex = mconfig->vbus_id +
435 (mconfig->time_slot);
436 break;
437
438 case SKL_DEVICE_HDALINK:
439 node_id.node.dma_type =
440 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
441 SKL_DMA_HDA_LINK_OUTPUT_CLASS :
442 SKL_DMA_HDA_LINK_INPUT_CLASS;
443 node_id.node.vindex = params->link_dma_id;
444 break;
445
446 case SKL_DEVICE_HDAHOST:
447 node_id.node.dma_type =
448 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
449 SKL_DMA_HDA_HOST_OUTPUT_CLASS :
450 SKL_DMA_HDA_HOST_INPUT_CLASS;
451 node_id.node.vindex = params->host_dma_id;
452 break;
453
454 default:
455 node_id.val = 0xFFFFFFFF;
456 break;
457 }
458
459 return node_id.val;
460 }
461
462 static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
463 struct skl_module_cfg *mconfig,
464 struct skl_cpr_cfg *cpr_mconfig)
465 {
466 cpr_mconfig->gtw_cfg.node_id = skl_get_node_id(ctx, mconfig);
467
468 if (cpr_mconfig->gtw_cfg.node_id == SKL_NON_GATEWAY_CPR_NODE_ID) {
469 cpr_mconfig->cpr_feature_mask = 0;
470 return;
471 }
472
473 if (SKL_CONN_SOURCE == mconfig->hw_conn_type)
474 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->obs;
475 else
476 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->ibs;
477
478 cpr_mconfig->cpr_feature_mask = 0;
479 cpr_mconfig->gtw_cfg.config_length = 0;
480
481 skl_copy_copier_caps(mconfig, cpr_mconfig);
482 }
483
484 #define DMA_CONTROL_ID 5
485
486 int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
487 {
488 struct skl_dma_control *dma_ctrl;
489 struct skl_i2s_config_blob config_blob;
490 struct skl_ipc_large_config_msg msg = {0};
491 int err = 0;
492
493
494 /*
495 * if blob size is same as capablity size, then no dma control
496 * present so return
497 */
498 if (mconfig->formats_config.caps_size == sizeof(config_blob))
499 return 0;
500
501 msg.large_param_id = DMA_CONTROL_ID;
502 msg.param_data_size = sizeof(struct skl_dma_control) +
503 mconfig->formats_config.caps_size;
504
505 dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL);
506 if (dma_ctrl == NULL)
507 return -ENOMEM;
508
509 dma_ctrl->node_id = skl_get_node_id(ctx, mconfig);
510
511 /* size in dwords */
512 dma_ctrl->config_length = sizeof(config_blob) / 4;
513
514 memcpy(dma_ctrl->config_data, mconfig->formats_config.caps,
515 mconfig->formats_config.caps_size);
516
517 err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl);
518
519 kfree(dma_ctrl);
520
521 return err;
522 }
523
524 static void skl_setup_out_format(struct skl_sst *ctx,
525 struct skl_module_cfg *mconfig,
526 struct skl_audio_data_format *out_fmt)
527 {
528 struct skl_module_fmt *format = &mconfig->out_fmt[0];
529
530 out_fmt->number_of_channels = (u8)format->channels;
531 out_fmt->s_freq = format->s_freq;
532 out_fmt->bit_depth = format->bit_depth;
533 out_fmt->valid_bit_depth = format->valid_bit_depth;
534 out_fmt->ch_cfg = format->ch_cfg;
535
536 out_fmt->channel_map = format->ch_map;
537 out_fmt->interleaving = format->interleaving_style;
538 out_fmt->sample_type = format->sample_type;
539
540 dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n",
541 out_fmt->number_of_channels, format->s_freq, format->bit_depth);
542 }
543
544 /*
545 * DSP needs SRC module for frequency conversion, SRC takes base module
546 * configuration and the target frequency as extra parameter passed as src
547 * config
548 */
549 static void skl_set_src_format(struct skl_sst *ctx,
550 struct skl_module_cfg *mconfig,
551 struct skl_src_module_cfg *src_mconfig)
552 {
553 struct skl_module_fmt *fmt = &mconfig->out_fmt[0];
554
555 skl_set_base_module_format(ctx, mconfig,
556 (struct skl_base_cfg *)src_mconfig);
557
558 src_mconfig->src_cfg = fmt->s_freq;
559 }
560
561 /*
562 * DSP needs updown module to do channel conversion. updown module take base
563 * module configuration and channel configuration
564 * It also take coefficients and now we have defaults applied here
565 */
566 static void skl_set_updown_mixer_format(struct skl_sst *ctx,
567 struct skl_module_cfg *mconfig,
568 struct skl_up_down_mixer_cfg *mixer_mconfig)
569 {
570 struct skl_module_fmt *fmt = &mconfig->out_fmt[0];
571 int i = 0;
572
573 skl_set_base_module_format(ctx, mconfig,
574 (struct skl_base_cfg *)mixer_mconfig);
575 mixer_mconfig->out_ch_cfg = fmt->ch_cfg;
576
577 /* Select F/W default coefficient */
578 mixer_mconfig->coeff_sel = 0x0;
579
580 /* User coeff, don't care since we are selecting F/W defaults */
581 for (i = 0; i < UP_DOWN_MIXER_MAX_COEFF; i++)
582 mixer_mconfig->coeff[i] = 0xDEADBEEF;
583 }
584
585 /*
586 * 'copier' is DSP internal module which copies data from Host DMA (HDA host
587 * dma) or link (hda link, SSP, PDM)
588 * Here we calculate the copier module parameters, like PCM format, output
589 * format, gateway settings
590 * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg
591 */
592 static void skl_set_copier_format(struct skl_sst *ctx,
593 struct skl_module_cfg *mconfig,
594 struct skl_cpr_cfg *cpr_mconfig)
595 {
596 struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt;
597 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig;
598
599 skl_set_base_module_format(ctx, mconfig, base_cfg);
600
601 skl_setup_out_format(ctx, mconfig, out_fmt);
602 skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig);
603 }
604
605 /*
606 * Algo module are DSP pre processing modules. Algo module take base module
607 * configuration and params
608 */
609
610 static void skl_set_algo_format(struct skl_sst *ctx,
611 struct skl_module_cfg *mconfig,
612 struct skl_algo_cfg *algo_mcfg)
613 {
614 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)algo_mcfg;
615
616 skl_set_base_module_format(ctx, mconfig, base_cfg);
617
618 if (mconfig->formats_config.caps_size == 0)
619 return;
620
621 memcpy(algo_mcfg->params,
622 mconfig->formats_config.caps,
623 mconfig->formats_config.caps_size);
624
625 }
626
627 /*
628 * Mic select module allows selecting one or many input channels, thus
629 * acting as a demux.
630 *
631 * Mic select module take base module configuration and out-format
632 * configuration
633 */
634 static void skl_set_base_outfmt_format(struct skl_sst *ctx,
635 struct skl_module_cfg *mconfig,
636 struct skl_base_outfmt_cfg *base_outfmt_mcfg)
637 {
638 struct skl_audio_data_format *out_fmt = &base_outfmt_mcfg->out_fmt;
639 struct skl_base_cfg *base_cfg =
640 (struct skl_base_cfg *)base_outfmt_mcfg;
641
642 skl_set_base_module_format(ctx, mconfig, base_cfg);
643 skl_setup_out_format(ctx, mconfig, out_fmt);
644 }
645
646 static u16 skl_get_module_param_size(struct skl_sst *ctx,
647 struct skl_module_cfg *mconfig)
648 {
649 u16 param_size;
650
651 switch (mconfig->m_type) {
652 case SKL_MODULE_TYPE_COPIER:
653 param_size = sizeof(struct skl_cpr_cfg);
654 param_size += mconfig->formats_config.caps_size;
655 return param_size;
656
657 case SKL_MODULE_TYPE_SRCINT:
658 return sizeof(struct skl_src_module_cfg);
659
660 case SKL_MODULE_TYPE_UPDWMIX:
661 return sizeof(struct skl_up_down_mixer_cfg);
662
663 case SKL_MODULE_TYPE_ALGO:
664 param_size = sizeof(struct skl_base_cfg);
665 param_size += mconfig->formats_config.caps_size;
666 return param_size;
667
668 case SKL_MODULE_TYPE_BASE_OUTFMT:
669 return sizeof(struct skl_base_outfmt_cfg);
670
671 default:
672 /*
673 * return only base cfg when no specific module type is
674 * specified
675 */
676 return sizeof(struct skl_base_cfg);
677 }
678
679 return 0;
680 }
681
682 /*
683 * DSP firmware supports various modules like copier, SRC, updown etc.
684 * These modules required various parameters to be calculated and sent for
685 * the module initialization to DSP. By default a generic module needs only
686 * base module format configuration
687 */
688
689 static int skl_set_module_format(struct skl_sst *ctx,
690 struct skl_module_cfg *module_config,
691 u16 *module_config_size,
692 void **param_data)
693 {
694 u16 param_size;
695
696 param_size = skl_get_module_param_size(ctx, module_config);
697
698 *param_data = kzalloc(param_size, GFP_KERNEL);
699 if (NULL == *param_data)
700 return -ENOMEM;
701
702 *module_config_size = param_size;
703
704 switch (module_config->m_type) {
705 case SKL_MODULE_TYPE_COPIER:
706 skl_set_copier_format(ctx, module_config, *param_data);
707 break;
708
709 case SKL_MODULE_TYPE_SRCINT:
710 skl_set_src_format(ctx, module_config, *param_data);
711 break;
712
713 case SKL_MODULE_TYPE_UPDWMIX:
714 skl_set_updown_mixer_format(ctx, module_config, *param_data);
715 break;
716
717 case SKL_MODULE_TYPE_ALGO:
718 skl_set_algo_format(ctx, module_config, *param_data);
719 break;
720
721 case SKL_MODULE_TYPE_BASE_OUTFMT:
722 skl_set_base_outfmt_format(ctx, module_config, *param_data);
723 break;
724
725 default:
726 skl_set_base_module_format(ctx, module_config, *param_data);
727 break;
728
729 }
730
731 dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n",
732 module_config->id.module_id, param_size);
733 print_hex_dump(KERN_DEBUG, "Module params:", DUMP_PREFIX_OFFSET, 8, 4,
734 *param_data, param_size, false);
735 return 0;
736 }
737
738 static int skl_get_queue_index(struct skl_module_pin *mpin,
739 struct skl_module_inst_id id, int max)
740 {
741 int i;
742
743 for (i = 0; i < max; i++) {
744 if (mpin[i].id.module_id == id.module_id &&
745 mpin[i].id.instance_id == id.instance_id)
746 return i;
747 }
748
749 return -EINVAL;
750 }
751
752 /*
753 * Allocates queue for each module.
754 * if dynamic, the pin_index is allocated 0 to max_pin.
755 * In static, the pin_index is fixed based on module_id and instance id
756 */
757 static int skl_alloc_queue(struct skl_module_pin *mpin,
758 struct skl_module_cfg *tgt_cfg, int max)
759 {
760 int i;
761 struct skl_module_inst_id id = tgt_cfg->id;
762 /*
763 * if pin in dynamic, find first free pin
764 * otherwise find match module and instance id pin as topology will
765 * ensure a unique pin is assigned to this so no need to
766 * allocate/free
767 */
768 for (i = 0; i < max; i++) {
769 if (mpin[i].is_dynamic) {
770 if (!mpin[i].in_use &&
771 mpin[i].pin_state == SKL_PIN_UNBIND) {
772
773 mpin[i].in_use = true;
774 mpin[i].id.module_id = id.module_id;
775 mpin[i].id.instance_id = id.instance_id;
776 mpin[i].tgt_mcfg = tgt_cfg;
777 return i;
778 }
779 } else {
780 if (mpin[i].id.module_id == id.module_id &&
781 mpin[i].id.instance_id == id.instance_id &&
782 mpin[i].pin_state == SKL_PIN_UNBIND) {
783
784 mpin[i].tgt_mcfg = tgt_cfg;
785 return i;
786 }
787 }
788 }
789
790 return -EINVAL;
791 }
792
793 static void skl_free_queue(struct skl_module_pin *mpin, int q_index)
794 {
795 if (mpin[q_index].is_dynamic) {
796 mpin[q_index].in_use = false;
797 mpin[q_index].id.module_id = 0;
798 mpin[q_index].id.instance_id = 0;
799 }
800 mpin[q_index].pin_state = SKL_PIN_UNBIND;
801 mpin[q_index].tgt_mcfg = NULL;
802 }
803
804 /* Module state will be set to unint, if all the out pin state is UNBIND */
805
806 static void skl_clear_module_state(struct skl_module_pin *mpin, int max,
807 struct skl_module_cfg *mcfg)
808 {
809 int i;
810 bool found = false;
811
812 for (i = 0; i < max; i++) {
813 if (mpin[i].pin_state == SKL_PIN_UNBIND)
814 continue;
815 found = true;
816 break;
817 }
818
819 if (!found)
820 mcfg->m_state = SKL_MODULE_UNINIT;
821 return;
822 }
823
824 /*
825 * A module needs to be instanataited in DSP. A mdoule is present in a
826 * collection of module referred as a PIPE.
827 * We first calculate the module format, based on module type and then
828 * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper
829 */
830 int skl_init_module(struct skl_sst *ctx,
831 struct skl_module_cfg *mconfig)
832 {
833 u16 module_config_size = 0;
834 void *param_data = NULL;
835 int ret;
836 struct skl_ipc_init_instance_msg msg;
837
838 dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__,
839 mconfig->id.module_id, mconfig->id.instance_id);
840
841 if (mconfig->pipe->state != SKL_PIPE_CREATED) {
842 dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n",
843 mconfig->pipe->state, mconfig->pipe->ppl_id);
844 return -EIO;
845 }
846
847 ret = skl_set_module_format(ctx, mconfig,
848 &module_config_size, &param_data);
849 if (ret < 0) {
850 dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret);
851 return ret;
852 }
853
854 msg.module_id = mconfig->id.module_id;
855 msg.instance_id = mconfig->id.instance_id;
856 msg.ppl_instance_id = mconfig->pipe->ppl_id;
857 msg.param_data_size = module_config_size;
858 msg.core_id = mconfig->core_id;
859
860 ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data);
861 if (ret < 0) {
862 dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret);
863 kfree(param_data);
864 return ret;
865 }
866 mconfig->m_state = SKL_MODULE_INIT_DONE;
867 kfree(param_data);
868 return ret;
869 }
870
871 static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg
872 *src_module, struct skl_module_cfg *dst_module)
873 {
874 dev_dbg(ctx->dev, "%s: src module_id = %d src_instance=%d\n",
875 __func__, src_module->id.module_id, src_module->id.instance_id);
876 dev_dbg(ctx->dev, "%s: dst_module=%d dst_instacne=%d\n", __func__,
877 dst_module->id.module_id, dst_module->id.instance_id);
878
879 dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n",
880 src_module->m_state, dst_module->m_state);
881 }
882
883 /*
884 * On module freeup, we need to unbind the module with modules
885 * it is already bind.
886 * Find the pin allocated and unbind then using bind_unbind IPC
887 */
888 int skl_unbind_modules(struct skl_sst *ctx,
889 struct skl_module_cfg *src_mcfg,
890 struct skl_module_cfg *dst_mcfg)
891 {
892 int ret;
893 struct skl_ipc_bind_unbind_msg msg;
894 struct skl_module_inst_id src_id = src_mcfg->id;
895 struct skl_module_inst_id dst_id = dst_mcfg->id;
896 int in_max = dst_mcfg->max_in_queue;
897 int out_max = src_mcfg->max_out_queue;
898 int src_index, dst_index, src_pin_state, dst_pin_state;
899
900 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
901
902 /* get src queue index */
903 src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
904 if (src_index < 0)
905 return 0;
906
907 msg.src_queue = src_index;
908
909 /* get dst queue index */
910 dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
911 if (dst_index < 0)
912 return 0;
913
914 msg.dst_queue = dst_index;
915
916 src_pin_state = src_mcfg->m_out_pin[src_index].pin_state;
917 dst_pin_state = dst_mcfg->m_in_pin[dst_index].pin_state;
918
919 if (src_pin_state != SKL_PIN_BIND_DONE ||
920 dst_pin_state != SKL_PIN_BIND_DONE)
921 return 0;
922
923 msg.module_id = src_mcfg->id.module_id;
924 msg.instance_id = src_mcfg->id.instance_id;
925 msg.dst_module_id = dst_mcfg->id.module_id;
926 msg.dst_instance_id = dst_mcfg->id.instance_id;
927 msg.bind = false;
928
929 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
930 if (!ret) {
931 /* free queue only if unbind is success */
932 skl_free_queue(src_mcfg->m_out_pin, src_index);
933 skl_free_queue(dst_mcfg->m_in_pin, dst_index);
934
935 /*
936 * check only if src module bind state, bind is
937 * always from src -> sink
938 */
939 skl_clear_module_state(src_mcfg->m_out_pin, out_max, src_mcfg);
940 }
941
942 return ret;
943 }
944
945 /*
946 * Once a module is instantiated it need to be 'bind' with other modules in
947 * the pipeline. For binding we need to find the module pins which are bind
948 * together
949 * This function finds the pins and then sends bund_unbind IPC message to
950 * DSP using IPC helper
951 */
952 int skl_bind_modules(struct skl_sst *ctx,
953 struct skl_module_cfg *src_mcfg,
954 struct skl_module_cfg *dst_mcfg)
955 {
956 int ret;
957 struct skl_ipc_bind_unbind_msg msg;
958 int in_max = dst_mcfg->max_in_queue;
959 int out_max = src_mcfg->max_out_queue;
960 int src_index, dst_index;
961
962 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
963
964 if (src_mcfg->m_state < SKL_MODULE_INIT_DONE ||
965 dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
966 return 0;
967
968 src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_mcfg, out_max);
969 if (src_index < 0)
970 return -EINVAL;
971
972 msg.src_queue = src_index;
973 dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_mcfg, in_max);
974 if (dst_index < 0) {
975 skl_free_queue(src_mcfg->m_out_pin, src_index);
976 return -EINVAL;
977 }
978
979 msg.dst_queue = dst_index;
980
981 dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n",
982 msg.src_queue, msg.dst_queue);
983
984 msg.module_id = src_mcfg->id.module_id;
985 msg.instance_id = src_mcfg->id.instance_id;
986 msg.dst_module_id = dst_mcfg->id.module_id;
987 msg.dst_instance_id = dst_mcfg->id.instance_id;
988 msg.bind = true;
989
990 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
991
992 if (!ret) {
993 src_mcfg->m_state = SKL_MODULE_BIND_DONE;
994 src_mcfg->m_out_pin[src_index].pin_state = SKL_PIN_BIND_DONE;
995 dst_mcfg->m_in_pin[dst_index].pin_state = SKL_PIN_BIND_DONE;
996 } else {
997 /* error case , if IPC fails, clear the queue index */
998 skl_free_queue(src_mcfg->m_out_pin, src_index);
999 skl_free_queue(dst_mcfg->m_in_pin, dst_index);
1000 }
1001
1002 return ret;
1003 }
1004
1005 static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe,
1006 enum skl_ipc_pipeline_state state)
1007 {
1008 dev_dbg(ctx->dev, "%s: pipe_satate = %d\n", __func__, state);
1009
1010 return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state);
1011 }
1012
1013 /*
1014 * A pipeline is a collection of modules. Before a module in instantiated a
1015 * pipeline needs to be created for it.
1016 * This function creates pipeline, by sending create pipeline IPC messages
1017 * to FW
1018 */
1019 int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
1020 {
1021 int ret;
1022
1023 dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
1024
1025 ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
1026 pipe->pipe_priority, pipe->ppl_id);
1027 if (ret < 0) {
1028 dev_err(ctx->dev, "Failed to create pipeline\n");
1029 return ret;
1030 }
1031
1032 pipe->state = SKL_PIPE_CREATED;
1033
1034 return 0;
1035 }
1036
1037 /*
1038 * A pipeline needs to be deleted on cleanup. If a pipeline is running, then
1039 * pause the pipeline first and then delete it
1040 * The pipe delete is done by sending delete pipeline IPC. DSP will stop the
1041 * DMA engines and releases resources
1042 */
1043 int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
1044 {
1045 int ret;
1046
1047 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
1048
1049 /* If pipe is not started, do not try to stop the pipe in FW. */
1050 if (pipe->state > SKL_PIPE_STARTED) {
1051 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
1052 if (ret < 0) {
1053 dev_err(ctx->dev, "Failed to stop pipeline\n");
1054 return ret;
1055 }
1056
1057 pipe->state = SKL_PIPE_PAUSED;
1058 } else {
1059 /* If pipe was not created in FW, do not try to delete it */
1060 if (pipe->state < SKL_PIPE_CREATED)
1061 return 0;
1062
1063 ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
1064 if (ret < 0)
1065 dev_err(ctx->dev, "Failed to delete pipeline\n");
1066
1067 pipe->state = SKL_PIPE_INVALID;
1068 }
1069
1070 return ret;
1071 }
1072
1073 /*
1074 * A pipeline is also a scheduling entity in DSP which can be run, stopped
1075 * For processing data the pipe need to be run by sending IPC set pipe state
1076 * to DSP
1077 */
1078 int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
1079 {
1080 int ret;
1081
1082 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
1083
1084 /* If pipe was not created in FW, do not try to pause or delete */
1085 if (pipe->state < SKL_PIPE_CREATED)
1086 return 0;
1087
1088 /* Pipe has to be paused before it is started */
1089 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
1090 if (ret < 0) {
1091 dev_err(ctx->dev, "Failed to pause pipe\n");
1092 return ret;
1093 }
1094
1095 pipe->state = SKL_PIPE_PAUSED;
1096
1097 ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING);
1098 if (ret < 0) {
1099 dev_err(ctx->dev, "Failed to start pipe\n");
1100 return ret;
1101 }
1102
1103 pipe->state = SKL_PIPE_STARTED;
1104
1105 return 0;
1106 }
1107
1108 /*
1109 * Stop the pipeline by sending set pipe state IPC
1110 * DSP doesnt implement stop so we always send pause message
1111 */
1112 int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
1113 {
1114 int ret;
1115
1116 dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id);
1117
1118 /* If pipe was not created in FW, do not try to pause or delete */
1119 if (pipe->state < SKL_PIPE_PAUSED)
1120 return 0;
1121
1122 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
1123 if (ret < 0) {
1124 dev_dbg(ctx->dev, "Failed to stop pipe\n");
1125 return ret;
1126 }
1127
1128 pipe->state = SKL_PIPE_CREATED;
1129
1130 return 0;
1131 }
1132
1133 /* Algo parameter set helper function */
1134 int skl_set_module_params(struct skl_sst *ctx, u32 *params, int size,
1135 u32 param_id, struct skl_module_cfg *mcfg)
1136 {
1137 struct skl_ipc_large_config_msg msg;
1138
1139 msg.module_id = mcfg->id.module_id;
1140 msg.instance_id = mcfg->id.instance_id;
1141 msg.param_data_size = size;
1142 msg.large_param_id = param_id;
1143
1144 return skl_ipc_set_large_config(&ctx->ipc, &msg, params);
1145 }
1146
1147 int skl_get_module_params(struct skl_sst *ctx, u32 *params, int size,
1148 u32 param_id, struct skl_module_cfg *mcfg)
1149 {
1150 struct skl_ipc_large_config_msg msg;
1151
1152 msg.module_id = mcfg->id.module_id;
1153 msg.instance_id = mcfg->id.instance_id;
1154 msg.param_data_size = size;
1155 msg.large_param_id = param_id;
1156
1157 return skl_ipc_get_large_config(&ctx->ipc, &msg, params);
1158 }
This page took 0.05809 seconds and 6 git commands to generate.