ASoC: Intel: Skylake: check manifest size
[deliverable/linux.git] / sound / soc / intel / skylake / skl-topology.c
1 /*
2 * skl-topology.c - Implements Platform component ALSA controls/widget
3 * handlers.
4 *
5 * Copyright (C) 2014-2015 Intel Corp
6 * Author: Jeeja KP <jeeja.kp@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/firmware.h>
22 #include <sound/soc.h>
23 #include <sound/soc-topology.h>
24 #include <uapi/sound/snd_sst_tokens.h>
25 #include "skl-sst-dsp.h"
26 #include "skl-sst-ipc.h"
27 #include "skl-topology.h"
28 #include "skl.h"
29 #include "skl-tplg-interface.h"
30 #include "../common/sst-dsp.h"
31 #include "../common/sst-dsp-priv.h"
32
33 #define SKL_CH_FIXUP_MASK (1 << 0)
34 #define SKL_RATE_FIXUP_MASK (1 << 1)
35 #define SKL_FMT_FIXUP_MASK (1 << 2)
36 #define SKL_IN_DIR_BIT_MASK BIT(0)
37 #define SKL_PIN_COUNT_MASK GENMASK(7, 4)
38
39 /*
40 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
41 * ignore. This helpers checks if the SKL driver handles this widget type
42 */
43 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
44 {
45 switch (w->id) {
46 case snd_soc_dapm_dai_link:
47 case snd_soc_dapm_dai_in:
48 case snd_soc_dapm_aif_in:
49 case snd_soc_dapm_aif_out:
50 case snd_soc_dapm_dai_out:
51 case snd_soc_dapm_switch:
52 return false;
53 default:
54 return true;
55 }
56 }
57
58 /*
59 * Each pipelines needs memory to be allocated. Check if we have free memory
60 * from available pool.
61 */
62 static bool skl_is_pipe_mem_avail(struct skl *skl,
63 struct skl_module_cfg *mconfig)
64 {
65 struct skl_sst *ctx = skl->skl_sst;
66
67 if (skl->resource.mem + mconfig->pipe->memory_pages >
68 skl->resource.max_mem) {
69 dev_err(ctx->dev,
70 "%s: module_id %d instance %d\n", __func__,
71 mconfig->id.module_id,
72 mconfig->id.instance_id);
73 dev_err(ctx->dev,
74 "exceeds ppl memory available %d mem %d\n",
75 skl->resource.max_mem, skl->resource.mem);
76 return false;
77 } else {
78 return true;
79 }
80 }
81
82 /*
83 * Add the mem to the mem pool. This is freed when pipe is deleted.
84 * Note: DSP does actual memory management we only keep track for complete
85 * pool
86 */
87 static void skl_tplg_alloc_pipe_mem(struct skl *skl,
88 struct skl_module_cfg *mconfig)
89 {
90 skl->resource.mem += mconfig->pipe->memory_pages;
91 }
92
93 /*
94 * Pipeline needs needs DSP CPU resources for computation, this is
95 * quantified in MCPS (Million Clocks Per Second) required for module/pipe
96 *
97 * Each pipelines needs mcps to be allocated. Check if we have mcps for this
98 * pipe.
99 */
100
101 static bool skl_is_pipe_mcps_avail(struct skl *skl,
102 struct skl_module_cfg *mconfig)
103 {
104 struct skl_sst *ctx = skl->skl_sst;
105
106 if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) {
107 dev_err(ctx->dev,
108 "%s: module_id %d instance %d\n", __func__,
109 mconfig->id.module_id, mconfig->id.instance_id);
110 dev_err(ctx->dev,
111 "exceeds ppl mcps available %d > mem %d\n",
112 skl->resource.max_mcps, skl->resource.mcps);
113 return false;
114 } else {
115 return true;
116 }
117 }
118
119 static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
120 struct skl_module_cfg *mconfig)
121 {
122 skl->resource.mcps += mconfig->mcps;
123 }
124
125 /*
126 * Free the mcps when tearing down
127 */
128 static void
129 skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
130 {
131 skl->resource.mcps -= mconfig->mcps;
132 }
133
134 /*
135 * Free the memory when tearing down
136 */
137 static void
138 skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
139 {
140 skl->resource.mem -= mconfig->pipe->memory_pages;
141 }
142
143
144 static void skl_dump_mconfig(struct skl_sst *ctx,
145 struct skl_module_cfg *mcfg)
146 {
147 dev_dbg(ctx->dev, "Dumping config\n");
148 dev_dbg(ctx->dev, "Input Format:\n");
149 dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels);
150 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq);
151 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg);
152 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth);
153 dev_dbg(ctx->dev, "Output Format:\n");
154 dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels);
155 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq);
156 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth);
157 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
158 }
159
160 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
161 {
162 int slot_map = 0xFFFFFFFF;
163 int start_slot = 0;
164 int i;
165
166 for (i = 0; i < chs; i++) {
167 /*
168 * For 2 channels with starting slot as 0, slot map will
169 * look like 0xFFFFFF10.
170 */
171 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
172 start_slot++;
173 }
174 fmt->ch_map = slot_map;
175 }
176
177 static void skl_tplg_update_params(struct skl_module_fmt *fmt,
178 struct skl_pipe_params *params, int fixup)
179 {
180 if (fixup & SKL_RATE_FIXUP_MASK)
181 fmt->s_freq = params->s_freq;
182 if (fixup & SKL_CH_FIXUP_MASK) {
183 fmt->channels = params->ch;
184 skl_tplg_update_chmap(fmt, fmt->channels);
185 }
186 if (fixup & SKL_FMT_FIXUP_MASK) {
187 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
188
189 /*
190 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
191 * container so update bit depth accordingly
192 */
193 switch (fmt->valid_bit_depth) {
194 case SKL_DEPTH_16BIT:
195 fmt->bit_depth = fmt->valid_bit_depth;
196 break;
197
198 default:
199 fmt->bit_depth = SKL_DEPTH_32BIT;
200 break;
201 }
202 }
203
204 }
205
206 /*
207 * A pipeline may have modules which impact the pcm parameters, like SRC,
208 * channel converter, format converter.
209 * We need to calculate the output params by applying the 'fixup'
210 * Topology will tell driver which type of fixup is to be applied by
211 * supplying the fixup mask, so based on that we calculate the output
212 *
213 * Now In FE the pcm hw_params is source/target format. Same is applicable
214 * for BE with its hw_params invoked.
215 * here based on FE, BE pipeline and direction we calculate the input and
216 * outfix and then apply that for a module
217 */
218 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
219 struct skl_pipe_params *params, bool is_fe)
220 {
221 int in_fixup, out_fixup;
222 struct skl_module_fmt *in_fmt, *out_fmt;
223
224 /* Fixups will be applied to pin 0 only */
225 in_fmt = &m_cfg->in_fmt[0];
226 out_fmt = &m_cfg->out_fmt[0];
227
228 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
229 if (is_fe) {
230 in_fixup = m_cfg->params_fixup;
231 out_fixup = (~m_cfg->converter) &
232 m_cfg->params_fixup;
233 } else {
234 out_fixup = m_cfg->params_fixup;
235 in_fixup = (~m_cfg->converter) &
236 m_cfg->params_fixup;
237 }
238 } else {
239 if (is_fe) {
240 out_fixup = m_cfg->params_fixup;
241 in_fixup = (~m_cfg->converter) &
242 m_cfg->params_fixup;
243 } else {
244 in_fixup = m_cfg->params_fixup;
245 out_fixup = (~m_cfg->converter) &
246 m_cfg->params_fixup;
247 }
248 }
249
250 skl_tplg_update_params(in_fmt, params, in_fixup);
251 skl_tplg_update_params(out_fmt, params, out_fixup);
252 }
253
254 /*
255 * A module needs input and output buffers, which are dependent upon pcm
256 * params, so once we have calculate params, we need buffer calculation as
257 * well.
258 */
259 static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
260 struct skl_module_cfg *mcfg)
261 {
262 int multiplier = 1;
263 struct skl_module_fmt *in_fmt, *out_fmt;
264 int in_rate, out_rate;
265
266
267 /* Since fixups is applied to pin 0 only, ibs, obs needs
268 * change for pin 0 only
269 */
270 in_fmt = &mcfg->in_fmt[0];
271 out_fmt = &mcfg->out_fmt[0];
272
273 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
274 multiplier = 5;
275
276 if (in_fmt->s_freq % 1000)
277 in_rate = (in_fmt->s_freq / 1000) + 1;
278 else
279 in_rate = (in_fmt->s_freq / 1000);
280
281 mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
282 (mcfg->in_fmt->bit_depth >> 3) *
283 multiplier;
284
285 if (mcfg->out_fmt->s_freq % 1000)
286 out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
287 else
288 out_rate = (mcfg->out_fmt->s_freq / 1000);
289
290 mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
291 (mcfg->out_fmt->bit_depth >> 3) *
292 multiplier;
293 }
294
295 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
296 struct skl_sst *ctx)
297 {
298 struct skl_module_cfg *m_cfg = w->priv;
299 int link_type, dir;
300 u32 ch, s_freq, s_fmt;
301 struct nhlt_specific_cfg *cfg;
302 struct skl *skl = get_skl_ctx(ctx->dev);
303
304 /* check if we already have blob */
305 if (m_cfg->formats_config.caps_size > 0)
306 return 0;
307
308 dev_dbg(ctx->dev, "Applying default cfg blob\n");
309 switch (m_cfg->dev_type) {
310 case SKL_DEVICE_DMIC:
311 link_type = NHLT_LINK_DMIC;
312 dir = SNDRV_PCM_STREAM_CAPTURE;
313 s_freq = m_cfg->in_fmt[0].s_freq;
314 s_fmt = m_cfg->in_fmt[0].bit_depth;
315 ch = m_cfg->in_fmt[0].channels;
316 break;
317
318 case SKL_DEVICE_I2S:
319 link_type = NHLT_LINK_SSP;
320 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
321 dir = SNDRV_PCM_STREAM_PLAYBACK;
322 s_freq = m_cfg->out_fmt[0].s_freq;
323 s_fmt = m_cfg->out_fmt[0].bit_depth;
324 ch = m_cfg->out_fmt[0].channels;
325 } else {
326 dir = SNDRV_PCM_STREAM_CAPTURE;
327 s_freq = m_cfg->in_fmt[0].s_freq;
328 s_fmt = m_cfg->in_fmt[0].bit_depth;
329 ch = m_cfg->in_fmt[0].channels;
330 }
331 break;
332
333 default:
334 return -EINVAL;
335 }
336
337 /* update the blob based on virtual bus_id and default params */
338 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
339 s_fmt, ch, s_freq, dir);
340 if (cfg) {
341 m_cfg->formats_config.caps_size = cfg->size;
342 m_cfg->formats_config.caps = (u32 *) &cfg->caps;
343 } else {
344 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
345 m_cfg->vbus_id, link_type, dir);
346 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
347 ch, s_freq, s_fmt);
348 return -EIO;
349 }
350
351 return 0;
352 }
353
354 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
355 struct skl_sst *ctx)
356 {
357 struct skl_module_cfg *m_cfg = w->priv;
358 struct skl_pipe_params *params = m_cfg->pipe->p_params;
359 int p_conn_type = m_cfg->pipe->conn_type;
360 bool is_fe;
361
362 if (!m_cfg->params_fixup)
363 return;
364
365 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
366 w->name);
367
368 skl_dump_mconfig(ctx, m_cfg);
369
370 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
371 is_fe = true;
372 else
373 is_fe = false;
374
375 skl_tplg_update_params_fixup(m_cfg, params, is_fe);
376 skl_tplg_update_buffer_size(ctx, m_cfg);
377
378 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
379 w->name);
380
381 skl_dump_mconfig(ctx, m_cfg);
382 }
383
384 /*
385 * some modules can have multiple params set from user control and
386 * need to be set after module is initialized. If set_param flag is
387 * set module params will be done after module is initialised.
388 */
389 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
390 struct skl_sst *ctx)
391 {
392 int i, ret;
393 struct skl_module_cfg *mconfig = w->priv;
394 const struct snd_kcontrol_new *k;
395 struct soc_bytes_ext *sb;
396 struct skl_algo_data *bc;
397 struct skl_specific_cfg *sp_cfg;
398
399 if (mconfig->formats_config.caps_size > 0 &&
400 mconfig->formats_config.set_params == SKL_PARAM_SET) {
401 sp_cfg = &mconfig->formats_config;
402 ret = skl_set_module_params(ctx, sp_cfg->caps,
403 sp_cfg->caps_size,
404 sp_cfg->param_id, mconfig);
405 if (ret < 0)
406 return ret;
407 }
408
409 for (i = 0; i < w->num_kcontrols; i++) {
410 k = &w->kcontrol_news[i];
411 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
412 sb = (void *) k->private_value;
413 bc = (struct skl_algo_data *)sb->dobj.private;
414
415 if (bc->set_params == SKL_PARAM_SET) {
416 ret = skl_set_module_params(ctx,
417 (u32 *)bc->params, bc->size,
418 bc->param_id, mconfig);
419 if (ret < 0)
420 return ret;
421 }
422 }
423 }
424
425 return 0;
426 }
427
428 /*
429 * some module param can set from user control and this is required as
430 * when module is initailzed. if module param is required in init it is
431 * identifed by set_param flag. if set_param flag is not set, then this
432 * parameter needs to set as part of module init.
433 */
434 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
435 {
436 const struct snd_kcontrol_new *k;
437 struct soc_bytes_ext *sb;
438 struct skl_algo_data *bc;
439 struct skl_module_cfg *mconfig = w->priv;
440 int i;
441
442 for (i = 0; i < w->num_kcontrols; i++) {
443 k = &w->kcontrol_news[i];
444 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
445 sb = (struct soc_bytes_ext *)k->private_value;
446 bc = (struct skl_algo_data *)sb->dobj.private;
447
448 if (bc->set_params != SKL_PARAM_INIT)
449 continue;
450
451 mconfig->formats_config.caps = (u32 *)&bc->params;
452 mconfig->formats_config.caps_size = bc->size;
453
454 break;
455 }
456 }
457
458 return 0;
459 }
460
461 /*
462 * Inside a pipe instance, we can have various modules. These modules need
463 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
464 * skl_init_module() routine, so invoke that for all modules in a pipeline
465 */
466 static int
467 skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
468 {
469 struct skl_pipe_module *w_module;
470 struct snd_soc_dapm_widget *w;
471 struct skl_module_cfg *mconfig;
472 struct skl_sst *ctx = skl->skl_sst;
473 int ret = 0;
474
475 list_for_each_entry(w_module, &pipe->w_list, node) {
476 w = w_module->w;
477 mconfig = w->priv;
478
479 /* check if module ids are populated */
480 if (mconfig->id.module_id < 0) {
481 dev_err(skl->skl_sst->dev,
482 "module %pUL id not populated\n",
483 (uuid_le *)mconfig->guid);
484 return -EIO;
485 }
486
487 /* check resource available */
488 if (!skl_is_pipe_mcps_avail(skl, mconfig))
489 return -ENOMEM;
490
491 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
492 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
493 mconfig->id.module_id, mconfig->guid);
494 if (ret < 0)
495 return ret;
496
497 mconfig->m_state = SKL_MODULE_LOADED;
498 }
499
500 /* update blob if blob is null for be with default value */
501 skl_tplg_update_be_blob(w, ctx);
502
503 /*
504 * apply fix/conversion to module params based on
505 * FE/BE params
506 */
507 skl_tplg_update_module_params(w, ctx);
508
509 skl_tplg_set_module_init_data(w);
510 ret = skl_init_module(ctx, mconfig);
511 if (ret < 0)
512 return ret;
513
514 skl_tplg_alloc_pipe_mcps(skl, mconfig);
515 ret = skl_tplg_set_module_params(w, ctx);
516 if (ret < 0)
517 return ret;
518 }
519
520 return 0;
521 }
522
523 static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
524 struct skl_pipe *pipe)
525 {
526 struct skl_pipe_module *w_module = NULL;
527 struct skl_module_cfg *mconfig = NULL;
528
529 list_for_each_entry(w_module, &pipe->w_list, node) {
530 mconfig = w_module->w->priv;
531
532 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
533 mconfig->m_state > SKL_MODULE_UNINIT)
534 return ctx->dsp->fw_ops.unload_mod(ctx->dsp,
535 mconfig->id.module_id);
536 }
537
538 /* no modules to unload in this path, so return */
539 return 0;
540 }
541
542 /*
543 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
544 * need create the pipeline. So we do following:
545 * - check the resources
546 * - Create the pipeline
547 * - Initialize the modules in pipeline
548 * - finally bind all modules together
549 */
550 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
551 struct skl *skl)
552 {
553 int ret;
554 struct skl_module_cfg *mconfig = w->priv;
555 struct skl_pipe_module *w_module;
556 struct skl_pipe *s_pipe = mconfig->pipe;
557 struct skl_module_cfg *src_module = NULL, *dst_module;
558 struct skl_sst *ctx = skl->skl_sst;
559
560 /* check resource available */
561 if (!skl_is_pipe_mcps_avail(skl, mconfig))
562 return -EBUSY;
563
564 if (!skl_is_pipe_mem_avail(skl, mconfig))
565 return -ENOMEM;
566
567 /*
568 * Create a list of modules for pipe.
569 * This list contains modules from source to sink
570 */
571 ret = skl_create_pipeline(ctx, mconfig->pipe);
572 if (ret < 0)
573 return ret;
574
575 skl_tplg_alloc_pipe_mem(skl, mconfig);
576 skl_tplg_alloc_pipe_mcps(skl, mconfig);
577
578 /* Init all pipe modules from source to sink */
579 ret = skl_tplg_init_pipe_modules(skl, s_pipe);
580 if (ret < 0)
581 return ret;
582
583 /* Bind modules from source to sink */
584 list_for_each_entry(w_module, &s_pipe->w_list, node) {
585 dst_module = w_module->w->priv;
586
587 if (src_module == NULL) {
588 src_module = dst_module;
589 continue;
590 }
591
592 ret = skl_bind_modules(ctx, src_module, dst_module);
593 if (ret < 0)
594 return ret;
595
596 src_module = dst_module;
597 }
598
599 return 0;
600 }
601
602 /*
603 * Some modules require params to be set after the module is bound to
604 * all pins connected.
605 *
606 * The module provider initializes set_param flag for such modules and we
607 * send params after binding
608 */
609 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
610 struct skl_module_cfg *mcfg, struct skl_sst *ctx)
611 {
612 int i, ret;
613 struct skl_module_cfg *mconfig = w->priv;
614 const struct snd_kcontrol_new *k;
615 struct soc_bytes_ext *sb;
616 struct skl_algo_data *bc;
617 struct skl_specific_cfg *sp_cfg;
618
619 /*
620 * check all out/in pins are in bind state.
621 * if so set the module param
622 */
623 for (i = 0; i < mcfg->max_out_queue; i++) {
624 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
625 return 0;
626 }
627
628 for (i = 0; i < mcfg->max_in_queue; i++) {
629 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
630 return 0;
631 }
632
633 if (mconfig->formats_config.caps_size > 0 &&
634 mconfig->formats_config.set_params == SKL_PARAM_BIND) {
635 sp_cfg = &mconfig->formats_config;
636 ret = skl_set_module_params(ctx, sp_cfg->caps,
637 sp_cfg->caps_size,
638 sp_cfg->param_id, mconfig);
639 if (ret < 0)
640 return ret;
641 }
642
643 for (i = 0; i < w->num_kcontrols; i++) {
644 k = &w->kcontrol_news[i];
645 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
646 sb = (void *) k->private_value;
647 bc = (struct skl_algo_data *)sb->dobj.private;
648
649 if (bc->set_params == SKL_PARAM_BIND) {
650 ret = skl_set_module_params(ctx,
651 (u32 *)bc->params, bc->max,
652 bc->param_id, mconfig);
653 if (ret < 0)
654 return ret;
655 }
656 }
657 }
658
659 return 0;
660 }
661
662 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
663 struct skl *skl,
664 struct snd_soc_dapm_widget *src_w,
665 struct skl_module_cfg *src_mconfig)
666 {
667 struct snd_soc_dapm_path *p;
668 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
669 struct skl_module_cfg *sink_mconfig;
670 struct skl_sst *ctx = skl->skl_sst;
671 int ret;
672
673 snd_soc_dapm_widget_for_each_sink_path(w, p) {
674 if (!p->connect)
675 continue;
676
677 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
678 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
679
680 next_sink = p->sink;
681
682 if (!is_skl_dsp_widget_type(p->sink))
683 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
684
685 /*
686 * here we will check widgets in sink pipelines, so that
687 * can be any widgets type and we are only interested if
688 * they are ones used for SKL so check that first
689 */
690 if ((p->sink->priv != NULL) &&
691 is_skl_dsp_widget_type(p->sink)) {
692
693 sink = p->sink;
694 sink_mconfig = sink->priv;
695
696 if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
697 sink_mconfig->m_state == SKL_MODULE_UNINIT)
698 continue;
699
700 /* Bind source to sink, mixin is always source */
701 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
702 if (ret)
703 return ret;
704
705 /* set module params after bind */
706 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
707 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
708
709 /* Start sinks pipe first */
710 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
711 if (sink_mconfig->pipe->conn_type !=
712 SKL_PIPE_CONN_TYPE_FE)
713 ret = skl_run_pipe(ctx,
714 sink_mconfig->pipe);
715 if (ret)
716 return ret;
717 }
718 }
719 }
720
721 if (!sink)
722 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
723
724 return 0;
725 }
726
727 /*
728 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
729 * we need to do following:
730 * - Bind to sink pipeline
731 * Since the sink pipes can be running and we don't get mixer event on
732 * connect for already running mixer, we need to find the sink pipes
733 * here and bind to them. This way dynamic connect works.
734 * - Start sink pipeline, if not running
735 * - Then run current pipe
736 */
737 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
738 struct skl *skl)
739 {
740 struct skl_module_cfg *src_mconfig;
741 struct skl_sst *ctx = skl->skl_sst;
742 int ret = 0;
743
744 src_mconfig = w->priv;
745
746 /*
747 * find which sink it is connected to, bind with the sink,
748 * if sink is not started, start sink pipe first, then start
749 * this pipe
750 */
751 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
752 if (ret)
753 return ret;
754
755 /* Start source pipe last after starting all sinks */
756 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
757 return skl_run_pipe(ctx, src_mconfig->pipe);
758
759 return 0;
760 }
761
762 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
763 struct snd_soc_dapm_widget *w, struct skl *skl)
764 {
765 struct snd_soc_dapm_path *p;
766 struct snd_soc_dapm_widget *src_w = NULL;
767 struct skl_sst *ctx = skl->skl_sst;
768
769 snd_soc_dapm_widget_for_each_source_path(w, p) {
770 src_w = p->source;
771 if (!p->connect)
772 continue;
773
774 dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
775 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
776
777 /*
778 * here we will check widgets in sink pipelines, so that can
779 * be any widgets type and we are only interested if they are
780 * ones used for SKL so check that first
781 */
782 if ((p->source->priv != NULL) &&
783 is_skl_dsp_widget_type(p->source)) {
784 return p->source;
785 }
786 }
787
788 if (src_w != NULL)
789 return skl_get_src_dsp_widget(src_w, skl);
790
791 return NULL;
792 }
793
794 /*
795 * in the Post-PMU event of mixer we need to do following:
796 * - Check if this pipe is running
797 * - if not, then
798 * - bind this pipeline to its source pipeline
799 * if source pipe is already running, this means it is a dynamic
800 * connection and we need to bind only to that pipe
801 * - start this pipeline
802 */
803 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
804 struct skl *skl)
805 {
806 int ret = 0;
807 struct snd_soc_dapm_widget *source, *sink;
808 struct skl_module_cfg *src_mconfig, *sink_mconfig;
809 struct skl_sst *ctx = skl->skl_sst;
810 int src_pipe_started = 0;
811
812 sink = w;
813 sink_mconfig = sink->priv;
814
815 /*
816 * If source pipe is already started, that means source is driving
817 * one more sink before this sink got connected, Since source is
818 * started, bind this sink to source and start this pipe.
819 */
820 source = skl_get_src_dsp_widget(w, skl);
821 if (source != NULL) {
822 src_mconfig = source->priv;
823 sink_mconfig = sink->priv;
824 src_pipe_started = 1;
825
826 /*
827 * check pipe state, then no need to bind or start the
828 * pipe
829 */
830 if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
831 src_pipe_started = 0;
832 }
833
834 if (src_pipe_started) {
835 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
836 if (ret)
837 return ret;
838
839 /* set module params after bind */
840 skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
841 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
842
843 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
844 ret = skl_run_pipe(ctx, sink_mconfig->pipe);
845 }
846
847 return ret;
848 }
849
850 /*
851 * in the Pre-PMD event of mixer we need to do following:
852 * - Stop the pipe
853 * - find the source connections and remove that from dapm_path_list
854 * - unbind with source pipelines if still connected
855 */
856 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
857 struct skl *skl)
858 {
859 struct skl_module_cfg *src_mconfig, *sink_mconfig;
860 int ret = 0, i;
861 struct skl_sst *ctx = skl->skl_sst;
862
863 sink_mconfig = w->priv;
864
865 /* Stop the pipe */
866 ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
867 if (ret)
868 return ret;
869
870 for (i = 0; i < sink_mconfig->max_in_queue; i++) {
871 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
872 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
873 if (!src_mconfig)
874 continue;
875 /*
876 * If path_found == 1, that means pmd for source
877 * pipe has not occurred, source is connected to
878 * some other sink. so its responsibility of sink
879 * to unbind itself from source.
880 */
881 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
882 if (ret < 0)
883 return ret;
884
885 ret = skl_unbind_modules(ctx,
886 src_mconfig, sink_mconfig);
887 }
888 }
889
890 return ret;
891 }
892
893 /*
894 * in the Post-PMD event of mixer we need to do following:
895 * - Free the mcps used
896 * - Free the mem used
897 * - Unbind the modules within the pipeline
898 * - Delete the pipeline (modules are not required to be explicitly
899 * deleted, pipeline delete is enough here
900 */
901 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
902 struct skl *skl)
903 {
904 struct skl_module_cfg *mconfig = w->priv;
905 struct skl_pipe_module *w_module;
906 struct skl_module_cfg *src_module = NULL, *dst_module;
907 struct skl_sst *ctx = skl->skl_sst;
908 struct skl_pipe *s_pipe = mconfig->pipe;
909 int ret = 0;
910
911 if (s_pipe->state == SKL_PIPE_INVALID)
912 return -EINVAL;
913
914 skl_tplg_free_pipe_mcps(skl, mconfig);
915 skl_tplg_free_pipe_mem(skl, mconfig);
916
917 list_for_each_entry(w_module, &s_pipe->w_list, node) {
918 dst_module = w_module->w->priv;
919
920 if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
921 skl_tplg_free_pipe_mcps(skl, dst_module);
922 if (src_module == NULL) {
923 src_module = dst_module;
924 continue;
925 }
926
927 skl_unbind_modules(ctx, src_module, dst_module);
928 src_module = dst_module;
929 }
930
931 ret = skl_delete_pipe(ctx, mconfig->pipe);
932
933 return skl_tplg_unload_pipe_modules(ctx, s_pipe);
934 }
935
936 /*
937 * in the Post-PMD event of PGA we need to do following:
938 * - Free the mcps used
939 * - Stop the pipeline
940 * - In source pipe is connected, unbind with source pipelines
941 */
942 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
943 struct skl *skl)
944 {
945 struct skl_module_cfg *src_mconfig, *sink_mconfig;
946 int ret = 0, i;
947 struct skl_sst *ctx = skl->skl_sst;
948
949 src_mconfig = w->priv;
950
951 /* Stop the pipe since this is a mixin module */
952 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
953 if (ret)
954 return ret;
955
956 for (i = 0; i < src_mconfig->max_out_queue; i++) {
957 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
958 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
959 if (!sink_mconfig)
960 continue;
961 /*
962 * This is a connecter and if path is found that means
963 * unbind between source and sink has not happened yet
964 */
965 ret = skl_unbind_modules(ctx, src_mconfig,
966 sink_mconfig);
967 }
968 }
969
970 return ret;
971 }
972
973 /*
974 * In modelling, we assume there will be ONLY one mixer in a pipeline. If
975 * mixer is not required then it is treated as static mixer aka vmixer with
976 * a hard path to source module
977 * So we don't need to check if source is started or not as hard path puts
978 * dependency on each other
979 */
980 static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w,
981 struct snd_kcontrol *k, int event)
982 {
983 struct snd_soc_dapm_context *dapm = w->dapm;
984 struct skl *skl = get_skl_ctx(dapm->dev);
985
986 switch (event) {
987 case SND_SOC_DAPM_PRE_PMU:
988 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
989
990 case SND_SOC_DAPM_POST_PMU:
991 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
992
993 case SND_SOC_DAPM_PRE_PMD:
994 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
995
996 case SND_SOC_DAPM_POST_PMD:
997 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
998 }
999
1000 return 0;
1001 }
1002
1003 /*
1004 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1005 * second one is required that is created as another pipe entity.
1006 * The mixer is responsible for pipe management and represent a pipeline
1007 * instance
1008 */
1009 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1010 struct snd_kcontrol *k, int event)
1011 {
1012 struct snd_soc_dapm_context *dapm = w->dapm;
1013 struct skl *skl = get_skl_ctx(dapm->dev);
1014
1015 switch (event) {
1016 case SND_SOC_DAPM_PRE_PMU:
1017 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1018
1019 case SND_SOC_DAPM_POST_PMU:
1020 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1021
1022 case SND_SOC_DAPM_PRE_PMD:
1023 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1024
1025 case SND_SOC_DAPM_POST_PMD:
1026 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1027 }
1028
1029 return 0;
1030 }
1031
1032 /*
1033 * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1034 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1035 * the sink when it is running (two FE to one BE or one FE to two BE)
1036 * scenarios
1037 */
1038 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1039 struct snd_kcontrol *k, int event)
1040
1041 {
1042 struct snd_soc_dapm_context *dapm = w->dapm;
1043 struct skl *skl = get_skl_ctx(dapm->dev);
1044
1045 switch (event) {
1046 case SND_SOC_DAPM_PRE_PMU:
1047 return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1048
1049 case SND_SOC_DAPM_POST_PMD:
1050 return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1051 }
1052
1053 return 0;
1054 }
1055
1056 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1057 unsigned int __user *data, unsigned int size)
1058 {
1059 struct soc_bytes_ext *sb =
1060 (struct soc_bytes_ext *)kcontrol->private_value;
1061 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
1062 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1063 struct skl_module_cfg *mconfig = w->priv;
1064 struct skl *skl = get_skl_ctx(w->dapm->dev);
1065
1066 if (w->power)
1067 skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
1068 bc->size, bc->param_id, mconfig);
1069
1070 /* decrement size for TLV header */
1071 size -= 2 * sizeof(u32);
1072
1073 /* check size as we don't want to send kernel data */
1074 if (size > bc->max)
1075 size = bc->max;
1076
1077 if (bc->params) {
1078 if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1079 return -EFAULT;
1080 if (copy_to_user(data + 1, &size, sizeof(u32)))
1081 return -EFAULT;
1082 if (copy_to_user(data + 2, bc->params, size))
1083 return -EFAULT;
1084 }
1085
1086 return 0;
1087 }
1088
1089 #define SKL_PARAM_VENDOR_ID 0xff
1090
1091 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1092 const unsigned int __user *data, unsigned int size)
1093 {
1094 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1095 struct skl_module_cfg *mconfig = w->priv;
1096 struct soc_bytes_ext *sb =
1097 (struct soc_bytes_ext *)kcontrol->private_value;
1098 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1099 struct skl *skl = get_skl_ctx(w->dapm->dev);
1100
1101 if (ac->params) {
1102 if (size > ac->max)
1103 return -EINVAL;
1104
1105 ac->size = size;
1106 /*
1107 * if the param_is is of type Vendor, firmware expects actual
1108 * parameter id and size from the control.
1109 */
1110 if (ac->param_id == SKL_PARAM_VENDOR_ID) {
1111 if (copy_from_user(ac->params, data, size))
1112 return -EFAULT;
1113 } else {
1114 if (copy_from_user(ac->params,
1115 data + 2, size))
1116 return -EFAULT;
1117 }
1118
1119 if (w->power)
1120 return skl_set_module_params(skl->skl_sst,
1121 (u32 *)ac->params, ac->size,
1122 ac->param_id, mconfig);
1123 }
1124
1125 return 0;
1126 }
1127
1128 /*
1129 * Fill the dma id for host and link. In case of passthrough
1130 * pipeline, this will both host and link in the same
1131 * pipeline, so need to copy the link and host based on dev_type
1132 */
1133 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1134 struct skl_pipe_params *params)
1135 {
1136 struct skl_pipe *pipe = mcfg->pipe;
1137
1138 if (pipe->passthru) {
1139 switch (mcfg->dev_type) {
1140 case SKL_DEVICE_HDALINK:
1141 pipe->p_params->link_dma_id = params->link_dma_id;
1142 break;
1143
1144 case SKL_DEVICE_HDAHOST:
1145 pipe->p_params->host_dma_id = params->host_dma_id;
1146 break;
1147
1148 default:
1149 break;
1150 }
1151 pipe->p_params->s_fmt = params->s_fmt;
1152 pipe->p_params->ch = params->ch;
1153 pipe->p_params->s_freq = params->s_freq;
1154 pipe->p_params->stream = params->stream;
1155
1156 } else {
1157 memcpy(pipe->p_params, params, sizeof(*params));
1158 }
1159 }
1160
1161 /*
1162 * The FE params are passed by hw_params of the DAI.
1163 * On hw_params, the params are stored in Gateway module of the FE and we
1164 * need to calculate the format in DSP module configuration, that
1165 * conversion is done here
1166 */
1167 int skl_tplg_update_pipe_params(struct device *dev,
1168 struct skl_module_cfg *mconfig,
1169 struct skl_pipe_params *params)
1170 {
1171 struct skl_module_fmt *format = NULL;
1172
1173 skl_tplg_fill_dma_id(mconfig, params);
1174
1175 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
1176 format = &mconfig->in_fmt[0];
1177 else
1178 format = &mconfig->out_fmt[0];
1179
1180 /* set the hw_params */
1181 format->s_freq = params->s_freq;
1182 format->channels = params->ch;
1183 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1184
1185 /*
1186 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1187 * container so update bit depth accordingly
1188 */
1189 switch (format->valid_bit_depth) {
1190 case SKL_DEPTH_16BIT:
1191 format->bit_depth = format->valid_bit_depth;
1192 break;
1193
1194 case SKL_DEPTH_24BIT:
1195 case SKL_DEPTH_32BIT:
1196 format->bit_depth = SKL_DEPTH_32BIT;
1197 break;
1198
1199 default:
1200 dev_err(dev, "Invalid bit depth %x for pipe\n",
1201 format->valid_bit_depth);
1202 return -EINVAL;
1203 }
1204
1205 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1206 mconfig->ibs = (format->s_freq / 1000) *
1207 (format->channels) *
1208 (format->bit_depth >> 3);
1209 } else {
1210 mconfig->obs = (format->s_freq / 1000) *
1211 (format->channels) *
1212 (format->bit_depth >> 3);
1213 }
1214
1215 return 0;
1216 }
1217
1218 /*
1219 * Query the module config for the FE DAI
1220 * This is used to find the hw_params set for that DAI and apply to FE
1221 * pipeline
1222 */
1223 struct skl_module_cfg *
1224 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1225 {
1226 struct snd_soc_dapm_widget *w;
1227 struct snd_soc_dapm_path *p = NULL;
1228
1229 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1230 w = dai->playback_widget;
1231 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1232 if (p->connect && p->sink->power &&
1233 !is_skl_dsp_widget_type(p->sink))
1234 continue;
1235
1236 if (p->sink->priv) {
1237 dev_dbg(dai->dev, "set params for %s\n",
1238 p->sink->name);
1239 return p->sink->priv;
1240 }
1241 }
1242 } else {
1243 w = dai->capture_widget;
1244 snd_soc_dapm_widget_for_each_source_path(w, p) {
1245 if (p->connect && p->source->power &&
1246 !is_skl_dsp_widget_type(p->source))
1247 continue;
1248
1249 if (p->source->priv) {
1250 dev_dbg(dai->dev, "set params for %s\n",
1251 p->source->name);
1252 return p->source->priv;
1253 }
1254 }
1255 }
1256
1257 return NULL;
1258 }
1259
1260 static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1261 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1262 {
1263 struct snd_soc_dapm_path *p;
1264 struct skl_module_cfg *mconfig = NULL;
1265
1266 snd_soc_dapm_widget_for_each_source_path(w, p) {
1267 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1268 if (p->connect &&
1269 (p->sink->id == snd_soc_dapm_aif_out) &&
1270 p->source->priv) {
1271 mconfig = p->source->priv;
1272 return mconfig;
1273 }
1274 mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1275 if (mconfig)
1276 return mconfig;
1277 }
1278 }
1279 return mconfig;
1280 }
1281
1282 static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1283 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1284 {
1285 struct snd_soc_dapm_path *p;
1286 struct skl_module_cfg *mconfig = NULL;
1287
1288 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1289 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1290 if (p->connect &&
1291 (p->source->id == snd_soc_dapm_aif_in) &&
1292 p->sink->priv) {
1293 mconfig = p->sink->priv;
1294 return mconfig;
1295 }
1296 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1297 if (mconfig)
1298 return mconfig;
1299 }
1300 }
1301 return mconfig;
1302 }
1303
1304 struct skl_module_cfg *
1305 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1306 {
1307 struct snd_soc_dapm_widget *w;
1308 struct skl_module_cfg *mconfig;
1309
1310 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1311 w = dai->playback_widget;
1312 mconfig = skl_get_mconfig_pb_cpr(dai, w);
1313 } else {
1314 w = dai->capture_widget;
1315 mconfig = skl_get_mconfig_cap_cpr(dai, w);
1316 }
1317 return mconfig;
1318 }
1319
1320 static u8 skl_tplg_be_link_type(int dev_type)
1321 {
1322 int ret;
1323
1324 switch (dev_type) {
1325 case SKL_DEVICE_BT:
1326 ret = NHLT_LINK_SSP;
1327 break;
1328
1329 case SKL_DEVICE_DMIC:
1330 ret = NHLT_LINK_DMIC;
1331 break;
1332
1333 case SKL_DEVICE_I2S:
1334 ret = NHLT_LINK_SSP;
1335 break;
1336
1337 case SKL_DEVICE_HDALINK:
1338 ret = NHLT_LINK_HDA;
1339 break;
1340
1341 default:
1342 ret = NHLT_LINK_INVALID;
1343 break;
1344 }
1345
1346 return ret;
1347 }
1348
1349 /*
1350 * Fill the BE gateway parameters
1351 * The BE gateway expects a blob of parameters which are kept in the ACPI
1352 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1353 * The port can have multiple settings so pick based on the PCM
1354 * parameters
1355 */
1356 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1357 struct skl_module_cfg *mconfig,
1358 struct skl_pipe_params *params)
1359 {
1360 struct nhlt_specific_cfg *cfg;
1361 struct skl *skl = get_skl_ctx(dai->dev);
1362 int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1363
1364 skl_tplg_fill_dma_id(mconfig, params);
1365
1366 if (link_type == NHLT_LINK_HDA)
1367 return 0;
1368
1369 /* update the blob based on virtual bus_id*/
1370 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1371 params->s_fmt, params->ch,
1372 params->s_freq, params->stream);
1373 if (cfg) {
1374 mconfig->formats_config.caps_size = cfg->size;
1375 mconfig->formats_config.caps = (u32 *) &cfg->caps;
1376 } else {
1377 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1378 mconfig->vbus_id, link_type,
1379 params->stream);
1380 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1381 params->ch, params->s_freq, params->s_fmt);
1382 return -EINVAL;
1383 }
1384
1385 return 0;
1386 }
1387
1388 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1389 struct snd_soc_dapm_widget *w,
1390 struct skl_pipe_params *params)
1391 {
1392 struct snd_soc_dapm_path *p;
1393 int ret = -EIO;
1394
1395 snd_soc_dapm_widget_for_each_source_path(w, p) {
1396 if (p->connect && is_skl_dsp_widget_type(p->source) &&
1397 p->source->priv) {
1398
1399 ret = skl_tplg_be_fill_pipe_params(dai,
1400 p->source->priv, params);
1401 if (ret < 0)
1402 return ret;
1403 } else {
1404 ret = skl_tplg_be_set_src_pipe_params(dai,
1405 p->source, params);
1406 if (ret < 0)
1407 return ret;
1408 }
1409 }
1410
1411 return ret;
1412 }
1413
1414 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1415 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1416 {
1417 struct snd_soc_dapm_path *p = NULL;
1418 int ret = -EIO;
1419
1420 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1421 if (p->connect && is_skl_dsp_widget_type(p->sink) &&
1422 p->sink->priv) {
1423
1424 ret = skl_tplg_be_fill_pipe_params(dai,
1425 p->sink->priv, params);
1426 if (ret < 0)
1427 return ret;
1428 } else {
1429 ret = skl_tplg_be_set_sink_pipe_params(
1430 dai, p->sink, params);
1431 if (ret < 0)
1432 return ret;
1433 }
1434 }
1435
1436 return ret;
1437 }
1438
1439 /*
1440 * BE hw_params can be a source parameters (capture) or sink parameters
1441 * (playback). Based on sink and source we need to either find the source
1442 * list or the sink list and set the pipeline parameters
1443 */
1444 int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1445 struct skl_pipe_params *params)
1446 {
1447 struct snd_soc_dapm_widget *w;
1448
1449 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1450 w = dai->playback_widget;
1451
1452 return skl_tplg_be_set_src_pipe_params(dai, w, params);
1453
1454 } else {
1455 w = dai->capture_widget;
1456
1457 return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1458 }
1459
1460 return 0;
1461 }
1462
1463 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1464 {SKL_MIXER_EVENT, skl_tplg_mixer_event},
1465 {SKL_VMIXER_EVENT, skl_tplg_vmixer_event},
1466 {SKL_PGA_EVENT, skl_tplg_pga_event},
1467 };
1468
1469 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1470 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1471 skl_tplg_tlv_control_set},
1472 };
1473
1474 static int skl_tplg_fill_pipe_tkn(struct device *dev,
1475 struct skl_pipe *pipe, u32 tkn,
1476 u32 tkn_val)
1477 {
1478
1479 switch (tkn) {
1480 case SKL_TKN_U32_PIPE_CONN_TYPE:
1481 pipe->conn_type = tkn_val;
1482 break;
1483
1484 case SKL_TKN_U32_PIPE_PRIORITY:
1485 pipe->pipe_priority = tkn_val;
1486 break;
1487
1488 case SKL_TKN_U32_PIPE_MEM_PGS:
1489 pipe->memory_pages = tkn_val;
1490 break;
1491
1492 default:
1493 dev_err(dev, "Token not handled %d\n", tkn);
1494 return -EINVAL;
1495 }
1496
1497 return 0;
1498 }
1499
1500 /*
1501 * Add pipeline by parsing the relevant tokens
1502 * Return an existing pipe if the pipe already exists.
1503 */
1504 static int skl_tplg_add_pipe(struct device *dev,
1505 struct skl_module_cfg *mconfig, struct skl *skl,
1506 struct snd_soc_tplg_vendor_value_elem *tkn_elem)
1507 {
1508 struct skl_pipeline *ppl;
1509 struct skl_pipe *pipe;
1510 struct skl_pipe_params *params;
1511
1512 list_for_each_entry(ppl, &skl->ppl_list, node) {
1513 if (ppl->pipe->ppl_id == tkn_elem->value) {
1514 mconfig->pipe = ppl->pipe;
1515 return EEXIST;
1516 }
1517 }
1518
1519 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
1520 if (!ppl)
1521 return -ENOMEM;
1522
1523 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
1524 if (!pipe)
1525 return -ENOMEM;
1526
1527 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
1528 if (!params)
1529 return -ENOMEM;
1530
1531 pipe->p_params = params;
1532 pipe->ppl_id = tkn_elem->value;
1533 INIT_LIST_HEAD(&pipe->w_list);
1534
1535 ppl->pipe = pipe;
1536 list_add(&ppl->node, &skl->ppl_list);
1537
1538 mconfig->pipe = pipe;
1539 mconfig->pipe->state = SKL_PIPE_INVALID;
1540
1541 return 0;
1542 }
1543
1544 static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
1545 struct skl_module_pin *m_pin,
1546 int pin_index, u32 value)
1547 {
1548 switch (tkn) {
1549 case SKL_TKN_U32_PIN_MOD_ID:
1550 m_pin[pin_index].id.module_id = value;
1551 break;
1552
1553 case SKL_TKN_U32_PIN_INST_ID:
1554 m_pin[pin_index].id.instance_id = value;
1555 break;
1556
1557 default:
1558 dev_err(dev, "%d Not a pin token\n", value);
1559 return -EINVAL;
1560 }
1561
1562 return 0;
1563 }
1564
1565 /*
1566 * Parse for pin config specific tokens to fill up the
1567 * module private data
1568 */
1569 static int skl_tplg_fill_pins_info(struct device *dev,
1570 struct skl_module_cfg *mconfig,
1571 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1572 int dir, int pin_count)
1573 {
1574 int ret;
1575 struct skl_module_pin *m_pin;
1576
1577 switch (dir) {
1578 case SKL_DIR_IN:
1579 m_pin = mconfig->m_in_pin;
1580 break;
1581
1582 case SKL_DIR_OUT:
1583 m_pin = mconfig->m_out_pin;
1584 break;
1585
1586 default:
1587 dev_err(dev, "Invalid direction value");
1588 return -EINVAL;
1589 }
1590
1591 ret = skl_tplg_fill_pin(dev, tkn_elem->token,
1592 m_pin, pin_count, tkn_elem->value);
1593
1594 if (ret < 0)
1595 return ret;
1596
1597 m_pin[pin_count].in_use = false;
1598 m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
1599
1600 return 0;
1601 }
1602
1603 /*
1604 * Fill up input/output module config format based
1605 * on the direction
1606 */
1607 static int skl_tplg_fill_fmt(struct device *dev,
1608 struct skl_module_cfg *mconfig, u32 tkn,
1609 u32 value, u32 dir, u32 pin_count)
1610 {
1611 struct skl_module_fmt *dst_fmt;
1612
1613 switch (dir) {
1614 case SKL_DIR_IN:
1615 dst_fmt = mconfig->in_fmt;
1616 dst_fmt += pin_count;
1617 break;
1618
1619 case SKL_DIR_OUT:
1620 dst_fmt = mconfig->out_fmt;
1621 dst_fmt += pin_count;
1622 break;
1623
1624 default:
1625 dev_err(dev, "Invalid direction value");
1626 return -EINVAL;
1627 }
1628
1629 switch (tkn) {
1630 case SKL_TKN_U32_FMT_CH:
1631 dst_fmt->channels = value;
1632 break;
1633
1634 case SKL_TKN_U32_FMT_FREQ:
1635 dst_fmt->s_freq = value;
1636 break;
1637
1638 case SKL_TKN_U32_FMT_BIT_DEPTH:
1639 dst_fmt->bit_depth = value;
1640 break;
1641
1642 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1643 dst_fmt->valid_bit_depth = value;
1644 break;
1645
1646 case SKL_TKN_U32_FMT_CH_CONFIG:
1647 dst_fmt->ch_cfg = value;
1648 break;
1649
1650 case SKL_TKN_U32_FMT_INTERLEAVE:
1651 dst_fmt->interleaving_style = value;
1652 break;
1653
1654 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1655 dst_fmt->sample_type = value;
1656 break;
1657
1658 case SKL_TKN_U32_FMT_CH_MAP:
1659 dst_fmt->ch_map = value;
1660 break;
1661
1662 default:
1663 dev_err(dev, "Invalid token %d", tkn);
1664 return -EINVAL;
1665 }
1666
1667 return 0;
1668 }
1669
1670 static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
1671 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
1672 {
1673 if (uuid_tkn->token == SKL_TKN_UUID)
1674 memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
1675 else {
1676 dev_err(dev, "Not an UUID token tkn %d", uuid_tkn->token);
1677 return -EINVAL;
1678 }
1679
1680 return 0;
1681 }
1682
1683 static void skl_tplg_fill_pin_dynamic_val(
1684 struct skl_module_pin *mpin, u32 pin_count, u32 value)
1685 {
1686 int i;
1687
1688 for (i = 0; i < pin_count; i++)
1689 mpin[i].is_dynamic = value;
1690 }
1691
1692 /*
1693 * Parse tokens to fill up the module private data
1694 */
1695 static int skl_tplg_get_token(struct device *dev,
1696 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1697 struct skl *skl, struct skl_module_cfg *mconfig)
1698 {
1699 int tkn_count = 0;
1700 int ret;
1701 static int is_pipe_exists;
1702 static int pin_index, dir;
1703
1704 if (tkn_elem->token > SKL_TKN_MAX)
1705 return -EINVAL;
1706
1707 switch (tkn_elem->token) {
1708 case SKL_TKN_U8_IN_QUEUE_COUNT:
1709 mconfig->max_in_queue = tkn_elem->value;
1710 mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
1711 sizeof(*mconfig->m_in_pin),
1712 GFP_KERNEL);
1713 if (!mconfig->m_in_pin)
1714 return -ENOMEM;
1715
1716 break;
1717
1718 case SKL_TKN_U8_OUT_QUEUE_COUNT:
1719 mconfig->max_out_queue = tkn_elem->value;
1720 mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
1721 sizeof(*mconfig->m_out_pin),
1722 GFP_KERNEL);
1723
1724 if (!mconfig->m_out_pin)
1725 return -ENOMEM;
1726
1727 break;
1728
1729 case SKL_TKN_U8_DYN_IN_PIN:
1730 if (!mconfig->m_in_pin)
1731 return -ENOMEM;
1732
1733 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
1734 mconfig->max_in_queue, tkn_elem->value);
1735
1736 break;
1737
1738 case SKL_TKN_U8_DYN_OUT_PIN:
1739 if (!mconfig->m_out_pin)
1740 return -ENOMEM;
1741
1742 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
1743 mconfig->max_out_queue, tkn_elem->value);
1744
1745 break;
1746
1747 case SKL_TKN_U8_TIME_SLOT:
1748 mconfig->time_slot = tkn_elem->value;
1749 break;
1750
1751 case SKL_TKN_U8_CORE_ID:
1752 mconfig->core_id = tkn_elem->value;
1753
1754 case SKL_TKN_U8_MOD_TYPE:
1755 mconfig->m_type = tkn_elem->value;
1756 break;
1757
1758 case SKL_TKN_U8_DEV_TYPE:
1759 mconfig->dev_type = tkn_elem->value;
1760 break;
1761
1762 case SKL_TKN_U8_HW_CONN_TYPE:
1763 mconfig->hw_conn_type = tkn_elem->value;
1764 break;
1765
1766 case SKL_TKN_U16_MOD_INST_ID:
1767 mconfig->id.instance_id =
1768 tkn_elem->value;
1769 break;
1770
1771 case SKL_TKN_U32_MEM_PAGES:
1772 mconfig->mem_pages = tkn_elem->value;
1773 break;
1774
1775 case SKL_TKN_U32_MAX_MCPS:
1776 mconfig->mcps = tkn_elem->value;
1777 break;
1778
1779 case SKL_TKN_U32_OBS:
1780 mconfig->obs = tkn_elem->value;
1781 break;
1782
1783 case SKL_TKN_U32_IBS:
1784 mconfig->ibs = tkn_elem->value;
1785 break;
1786
1787 case SKL_TKN_U32_VBUS_ID:
1788 mconfig->vbus_id = tkn_elem->value;
1789 break;
1790
1791 case SKL_TKN_U32_PARAMS_FIXUP:
1792 mconfig->params_fixup = tkn_elem->value;
1793 break;
1794
1795 case SKL_TKN_U32_CONVERTER:
1796 mconfig->converter = tkn_elem->value;
1797 break;
1798
1799 case SKL_TKN_U32_PIPE_ID:
1800 ret = skl_tplg_add_pipe(dev,
1801 mconfig, skl, tkn_elem);
1802
1803 if (ret < 0)
1804 return is_pipe_exists;
1805
1806 if (ret == EEXIST)
1807 is_pipe_exists = 1;
1808
1809 break;
1810
1811 case SKL_TKN_U32_PIPE_CONN_TYPE:
1812 case SKL_TKN_U32_PIPE_PRIORITY:
1813 case SKL_TKN_U32_PIPE_MEM_PGS:
1814 if (is_pipe_exists) {
1815 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
1816 tkn_elem->token, tkn_elem->value);
1817 if (ret < 0)
1818 return ret;
1819 }
1820
1821 break;
1822
1823 /*
1824 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
1825 * direction and the pin count. The first four bits represent
1826 * direction and next four the pin count.
1827 */
1828 case SKL_TKN_U32_DIR_PIN_COUNT:
1829 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
1830 pin_index = (tkn_elem->value &
1831 SKL_PIN_COUNT_MASK) >> 4;
1832
1833 break;
1834
1835 case SKL_TKN_U32_FMT_CH:
1836 case SKL_TKN_U32_FMT_FREQ:
1837 case SKL_TKN_U32_FMT_BIT_DEPTH:
1838 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1839 case SKL_TKN_U32_FMT_CH_CONFIG:
1840 case SKL_TKN_U32_FMT_INTERLEAVE:
1841 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1842 case SKL_TKN_U32_FMT_CH_MAP:
1843 ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token,
1844 tkn_elem->value, dir, pin_index);
1845
1846 if (ret < 0)
1847 return ret;
1848
1849 break;
1850
1851 case SKL_TKN_U32_PIN_MOD_ID:
1852 case SKL_TKN_U32_PIN_INST_ID:
1853 ret = skl_tplg_fill_pins_info(dev,
1854 mconfig, tkn_elem, dir,
1855 pin_index);
1856 if (ret < 0)
1857 return ret;
1858
1859 break;
1860
1861 case SKL_TKN_U32_CAPS_SIZE:
1862 mconfig->formats_config.caps_size =
1863 tkn_elem->value;
1864
1865 break;
1866
1867 case SKL_TKN_U32_PROC_DOMAIN:
1868 mconfig->domain =
1869 tkn_elem->value;
1870
1871 break;
1872
1873 case SKL_TKN_U8_IN_PIN_TYPE:
1874 case SKL_TKN_U8_OUT_PIN_TYPE:
1875 case SKL_TKN_U8_CONN_TYPE:
1876 break;
1877
1878 default:
1879 dev_err(dev, "Token %d not handled\n",
1880 tkn_elem->token);
1881 return -EINVAL;
1882 }
1883
1884 tkn_count++;
1885
1886 return tkn_count;
1887 }
1888
1889 /*
1890 * Parse the vendor array for specific tokens to construct
1891 * module private data
1892 */
1893 static int skl_tplg_get_tokens(struct device *dev,
1894 char *pvt_data, struct skl *skl,
1895 struct skl_module_cfg *mconfig, int block_size)
1896 {
1897 struct snd_soc_tplg_vendor_array *array;
1898 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
1899 int tkn_count = 0, ret;
1900 int off = 0, tuple_size = 0;
1901
1902 if (block_size <= 0)
1903 return -EINVAL;
1904
1905 while (tuple_size < block_size) {
1906 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
1907
1908 off += array->size;
1909
1910 switch (array->type) {
1911 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
1912 dev_warn(dev, "no string tokens expected for skl tplg");
1913 continue;
1914
1915 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
1916 ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
1917 if (ret < 0)
1918 return ret;
1919
1920 tuple_size += sizeof(*array->uuid);
1921
1922 continue;
1923
1924 default:
1925 tkn_elem = array->value;
1926 tkn_count = 0;
1927 break;
1928 }
1929
1930 while (tkn_count <= (array->num_elems - 1)) {
1931 ret = skl_tplg_get_token(dev, tkn_elem,
1932 skl, mconfig);
1933
1934 if (ret < 0)
1935 return ret;
1936
1937 tkn_count = tkn_count + ret;
1938 tkn_elem++;
1939 }
1940
1941 tuple_size += tkn_count * sizeof(*tkn_elem);
1942 }
1943
1944 return 0;
1945 }
1946
1947 /*
1948 * Every data block is preceded by a descriptor to read the number
1949 * of data blocks, they type of the block and it's size
1950 */
1951 static int skl_tplg_get_desc_blocks(struct device *dev,
1952 struct snd_soc_tplg_vendor_array *array)
1953 {
1954 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
1955
1956 tkn_elem = array->value;
1957
1958 switch (tkn_elem->token) {
1959 case SKL_TKN_U8_NUM_BLOCKS:
1960 case SKL_TKN_U8_BLOCK_TYPE:
1961 case SKL_TKN_U16_BLOCK_SIZE:
1962 return tkn_elem->value;
1963
1964 default:
1965 dev_err(dev, "Invalid descriptor token %d", tkn_elem->token);
1966 break;
1967 }
1968
1969 return -EINVAL;
1970 }
1971
1972 /*
1973 * Parse the private data for the token and corresponding value.
1974 * The private data can have multiple data blocks. So, a data block
1975 * is preceded by a descriptor for number of blocks and a descriptor
1976 * for the type and size of the suceeding data block.
1977 */
1978 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
1979 struct skl *skl, struct device *dev,
1980 struct skl_module_cfg *mconfig)
1981 {
1982 struct snd_soc_tplg_vendor_array *array;
1983 int num_blocks, block_size = 0, block_type, off = 0;
1984 char *data;
1985 int ret;
1986
1987 /* Read the NUM_DATA_BLOCKS descriptor */
1988 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
1989 ret = skl_tplg_get_desc_blocks(dev, array);
1990 if (ret < 0)
1991 return ret;
1992 num_blocks = ret;
1993
1994 off += array->size;
1995 array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
1996
1997 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
1998 while (num_blocks > 0) {
1999 ret = skl_tplg_get_desc_blocks(dev, array);
2000
2001 if (ret < 0)
2002 return ret;
2003 block_type = ret;
2004 off += array->size;
2005
2006 array = (struct snd_soc_tplg_vendor_array *)
2007 (tplg_w->priv.data + off);
2008
2009 ret = skl_tplg_get_desc_blocks(dev, array);
2010
2011 if (ret < 0)
2012 return ret;
2013 block_size = ret;
2014 off += array->size;
2015
2016 array = (struct snd_soc_tplg_vendor_array *)
2017 (tplg_w->priv.data + off);
2018
2019 data = (tplg_w->priv.data + off);
2020
2021 if (block_type == SKL_TYPE_TUPLE) {
2022 ret = skl_tplg_get_tokens(dev, data,
2023 skl, mconfig, block_size);
2024
2025 if (ret < 0)
2026 return ret;
2027
2028 --num_blocks;
2029 } else {
2030 if (mconfig->formats_config.caps_size > 0)
2031 memcpy(mconfig->formats_config.caps, data,
2032 mconfig->formats_config.caps_size);
2033 --num_blocks;
2034 }
2035 }
2036
2037 return 0;
2038 }
2039
2040 static void skl_clear_pin_config(struct snd_soc_platform *platform,
2041 struct snd_soc_dapm_widget *w)
2042 {
2043 int i;
2044 struct skl_module_cfg *mconfig;
2045 struct skl_pipe *pipe;
2046
2047 if (!strncmp(w->dapm->component->name, platform->component.name,
2048 strlen(platform->component.name))) {
2049 mconfig = w->priv;
2050 pipe = mconfig->pipe;
2051 for (i = 0; i < mconfig->max_in_queue; i++) {
2052 mconfig->m_in_pin[i].in_use = false;
2053 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2054 }
2055 for (i = 0; i < mconfig->max_out_queue; i++) {
2056 mconfig->m_out_pin[i].in_use = false;
2057 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2058 }
2059 pipe->state = SKL_PIPE_INVALID;
2060 mconfig->m_state = SKL_MODULE_UNINIT;
2061 }
2062 }
2063
2064 void skl_cleanup_resources(struct skl *skl)
2065 {
2066 struct skl_sst *ctx = skl->skl_sst;
2067 struct snd_soc_platform *soc_platform = skl->platform;
2068 struct snd_soc_dapm_widget *w;
2069 struct snd_soc_card *card;
2070
2071 if (soc_platform == NULL)
2072 return;
2073
2074 card = soc_platform->component.card;
2075 if (!card || !card->instantiated)
2076 return;
2077
2078 skl->resource.mem = 0;
2079 skl->resource.mcps = 0;
2080
2081 list_for_each_entry(w, &card->widgets, list) {
2082 if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
2083 skl_clear_pin_config(soc_platform, w);
2084 }
2085
2086 skl_clear_module_cnt(ctx->dsp);
2087 }
2088
2089 /*
2090 * Topology core widget load callback
2091 *
2092 * This is used to save the private data for each widget which gives
2093 * information to the driver about module and pipeline parameters which DSP
2094 * FW expects like ids, resource values, formats etc
2095 */
2096 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
2097 struct snd_soc_dapm_widget *w,
2098 struct snd_soc_tplg_dapm_widget *tplg_w)
2099 {
2100 int ret;
2101 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2102 struct skl *skl = ebus_to_skl(ebus);
2103 struct hdac_bus *bus = ebus_to_hbus(ebus);
2104 struct skl_module_cfg *mconfig;
2105
2106 if (!tplg_w->priv.size)
2107 goto bind_event;
2108
2109 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
2110
2111 if (!mconfig)
2112 return -ENOMEM;
2113
2114 w->priv = mconfig;
2115
2116 /*
2117 * module binary can be loaded later, so set it to query when
2118 * module is load for a use case
2119 */
2120 mconfig->id.module_id = -1;
2121
2122 /* Parse private data for tuples */
2123 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
2124 if (ret < 0)
2125 return ret;
2126 bind_event:
2127 if (tplg_w->event_type == 0) {
2128 dev_dbg(bus->dev, "ASoC: No event handler required\n");
2129 return 0;
2130 }
2131
2132 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
2133 ARRAY_SIZE(skl_tplg_widget_ops),
2134 tplg_w->event_type);
2135
2136 if (ret) {
2137 dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
2138 __func__, tplg_w->event_type);
2139 return -EINVAL;
2140 }
2141
2142 return 0;
2143 }
2144
2145 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
2146 struct snd_soc_tplg_bytes_control *bc)
2147 {
2148 struct skl_algo_data *ac;
2149 struct skl_dfw_algo_data *dfw_ac =
2150 (struct skl_dfw_algo_data *)bc->priv.data;
2151
2152 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
2153 if (!ac)
2154 return -ENOMEM;
2155
2156 /* Fill private data */
2157 ac->max = dfw_ac->max;
2158 ac->param_id = dfw_ac->param_id;
2159 ac->set_params = dfw_ac->set_params;
2160 ac->size = dfw_ac->max;
2161
2162 if (ac->max) {
2163 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
2164 if (!ac->params)
2165 return -ENOMEM;
2166
2167 memcpy(ac->params, dfw_ac->params, ac->max);
2168 }
2169
2170 be->dobj.private = ac;
2171 return 0;
2172 }
2173
2174 static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
2175 struct snd_kcontrol_new *kctl,
2176 struct snd_soc_tplg_ctl_hdr *hdr)
2177 {
2178 struct soc_bytes_ext *sb;
2179 struct snd_soc_tplg_bytes_control *tplg_bc;
2180 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2181 struct hdac_bus *bus = ebus_to_hbus(ebus);
2182
2183 switch (hdr->ops.info) {
2184 case SND_SOC_TPLG_CTL_BYTES:
2185 tplg_bc = container_of(hdr,
2186 struct snd_soc_tplg_bytes_control, hdr);
2187 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
2188 sb = (struct soc_bytes_ext *)kctl->private_value;
2189 if (tplg_bc->priv.size)
2190 return skl_init_algo_data(
2191 bus->dev, sb, tplg_bc);
2192 }
2193 break;
2194
2195 default:
2196 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
2197 hdr->ops.get, hdr->ops.put, hdr->ops.info);
2198 break;
2199 }
2200
2201 return 0;
2202 }
2203
2204 static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
2205 struct snd_soc_tplg_vendor_string_elem *str_elem,
2206 struct skl_dfw_manifest *minfo)
2207 {
2208 int tkn_count = 0;
2209 static int ref_count;
2210
2211 switch (str_elem->token) {
2212 case SKL_TKN_STR_LIB_NAME:
2213 if (ref_count > minfo->lib_count - 1) {
2214 ref_count = 0;
2215 return -EINVAL;
2216 }
2217
2218 strncpy(minfo->lib[ref_count].name, str_elem->string,
2219 ARRAY_SIZE(minfo->lib[ref_count].name));
2220 ref_count++;
2221 tkn_count++;
2222 break;
2223
2224 default:
2225 dev_err(dev, "Not a string token %d", str_elem->token);
2226 break;
2227 }
2228
2229 return tkn_count;
2230 }
2231
2232 static int skl_tplg_get_str_tkn(struct device *dev,
2233 struct snd_soc_tplg_vendor_array *array,
2234 struct skl_dfw_manifest *minfo)
2235 {
2236 int tkn_count = 0, ret;
2237 struct snd_soc_tplg_vendor_string_elem *str_elem;
2238
2239 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
2240 while (tkn_count < array->num_elems) {
2241 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, minfo);
2242 str_elem++;
2243
2244 if (ret < 0)
2245 return ret;
2246
2247 tkn_count = tkn_count + ret;
2248 }
2249
2250 return tkn_count;
2251 }
2252
2253 static int skl_tplg_get_int_tkn(struct device *dev,
2254 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2255 struct skl_dfw_manifest *minfo)
2256 {
2257 int tkn_count = 0;
2258
2259 switch (tkn_elem->token) {
2260 case SKL_TKN_U32_LIB_COUNT:
2261 minfo->lib_count = tkn_elem->value;
2262 tkn_count++;
2263 break;
2264
2265 default:
2266 dev_err(dev, "Not a manifest token %d", tkn_elem->token);
2267 return -EINVAL;
2268 }
2269
2270 return tkn_count;
2271 }
2272
2273 /*
2274 * Fill the manifest structure by parsing the tokens based on the
2275 * type.
2276 */
2277 static int skl_tplg_get_manifest_tkn(struct device *dev,
2278 char *pvt_data, struct skl_dfw_manifest *minfo,
2279 int block_size)
2280 {
2281 int tkn_count = 0, ret;
2282 int off = 0, tuple_size = 0;
2283 struct snd_soc_tplg_vendor_array *array;
2284 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2285
2286 if (block_size <= 0)
2287 return -EINVAL;
2288
2289 while (tuple_size < block_size) {
2290 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2291 off += array->size;
2292 switch (array->type) {
2293 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2294 ret = skl_tplg_get_str_tkn(dev, array, minfo);
2295
2296 if (ret < 0)
2297 return ret;
2298 tkn_count += ret;
2299
2300 tuple_size += tkn_count *
2301 sizeof(struct snd_soc_tplg_vendor_string_elem);
2302 continue;
2303
2304 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2305 dev_warn(dev, "no uuid tokens for skl tplf manifest");
2306 continue;
2307
2308 default:
2309 tkn_elem = array->value;
2310 tkn_count = 0;
2311 break;
2312 }
2313
2314 while (tkn_count <= array->num_elems - 1) {
2315 ret = skl_tplg_get_int_tkn(dev,
2316 tkn_elem, minfo);
2317 if (ret < 0)
2318 return ret;
2319
2320 tkn_count = tkn_count + ret;
2321 tkn_elem++;
2322 tuple_size += tkn_count *
2323 sizeof(struct snd_soc_tplg_vendor_value_elem);
2324 break;
2325 }
2326 tkn_count = 0;
2327 }
2328
2329 return 0;
2330 }
2331
2332 /*
2333 * Parse manifest private data for tokens. The private data block is
2334 * preceded by descriptors for type and size of data block.
2335 */
2336 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
2337 struct device *dev, struct skl_dfw_manifest *minfo)
2338 {
2339 struct snd_soc_tplg_vendor_array *array;
2340 int num_blocks, block_size = 0, block_type, off = 0;
2341 char *data;
2342 int ret;
2343
2344 /* Read the NUM_DATA_BLOCKS descriptor */
2345 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
2346 ret = skl_tplg_get_desc_blocks(dev, array);
2347 if (ret < 0)
2348 return ret;
2349 num_blocks = ret;
2350
2351 off += array->size;
2352 array = (struct snd_soc_tplg_vendor_array *)
2353 (manifest->priv.data + off);
2354
2355 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2356 while (num_blocks > 0) {
2357 ret = skl_tplg_get_desc_blocks(dev, array);
2358
2359 if (ret < 0)
2360 return ret;
2361 block_type = ret;
2362 off += array->size;
2363
2364 array = (struct snd_soc_tplg_vendor_array *)
2365 (manifest->priv.data + off);
2366
2367 ret = skl_tplg_get_desc_blocks(dev, array);
2368
2369 if (ret < 0)
2370 return ret;
2371 block_size = ret;
2372 off += array->size;
2373
2374 array = (struct snd_soc_tplg_vendor_array *)
2375 (manifest->priv.data + off);
2376
2377 data = (manifest->priv.data + off);
2378
2379 if (block_type == SKL_TYPE_TUPLE) {
2380 ret = skl_tplg_get_manifest_tkn(dev, data, minfo,
2381 block_size);
2382
2383 if (ret < 0)
2384 return ret;
2385
2386 --num_blocks;
2387 } else {
2388 return -EINVAL;
2389 }
2390 }
2391
2392 return 0;
2393 }
2394
2395 static int skl_manifest_load(struct snd_soc_component *cmpnt,
2396 struct snd_soc_tplg_manifest *manifest)
2397 {
2398 struct skl_dfw_manifest *minfo;
2399 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2400 struct hdac_bus *bus = ebus_to_hbus(ebus);
2401 struct skl *skl = ebus_to_skl(ebus);
2402 int ret = 0;
2403
2404 /* proceed only if we have private data defined */
2405 if (manifest->priv.size == 0)
2406 return 0;
2407
2408 minfo = &skl->skl_sst->manifest;
2409
2410 skl_tplg_get_manifest_data(manifest, bus->dev, minfo);
2411
2412 if (minfo->lib_count > HDA_MAX_LIB) {
2413 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
2414 minfo->lib_count);
2415 ret = -EINVAL;
2416 }
2417
2418 return ret;
2419 }
2420
2421 static struct snd_soc_tplg_ops skl_tplg_ops = {
2422 .widget_load = skl_tplg_widget_load,
2423 .control_load = skl_tplg_control_load,
2424 .bytes_ext_ops = skl_tlv_ops,
2425 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
2426 .manifest = skl_manifest_load,
2427 };
2428
2429 /*
2430 * A pipe can have multiple modules, each of them will be a DAPM widget as
2431 * well. While managing a pipeline we need to get the list of all the
2432 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
2433 * helps to get the SKL type widgets in that pipeline
2434 */
2435 static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
2436 {
2437 struct snd_soc_dapm_widget *w;
2438 struct skl_module_cfg *mcfg = NULL;
2439 struct skl_pipe_module *p_module = NULL;
2440 struct skl_pipe *pipe;
2441
2442 list_for_each_entry(w, &platform->component.card->widgets, list) {
2443 if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
2444 mcfg = w->priv;
2445 pipe = mcfg->pipe;
2446
2447 p_module = devm_kzalloc(platform->dev,
2448 sizeof(*p_module), GFP_KERNEL);
2449 if (!p_module)
2450 return -ENOMEM;
2451
2452 p_module->w = w;
2453 list_add_tail(&p_module->node, &pipe->w_list);
2454 }
2455 }
2456
2457 return 0;
2458 }
2459
2460 static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
2461 {
2462 struct skl_pipe_module *w_module;
2463 struct snd_soc_dapm_widget *w;
2464 struct skl_module_cfg *mconfig;
2465 bool host_found = false, link_found = false;
2466
2467 list_for_each_entry(w_module, &pipe->w_list, node) {
2468 w = w_module->w;
2469 mconfig = w->priv;
2470
2471 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
2472 host_found = true;
2473 else if (mconfig->dev_type != SKL_DEVICE_NONE)
2474 link_found = true;
2475 }
2476
2477 if (host_found && link_found)
2478 pipe->passthru = true;
2479 else
2480 pipe->passthru = false;
2481 }
2482
2483 /* This will be read from topology manifest, currently defined here */
2484 #define SKL_MAX_MCPS 30000000
2485 #define SKL_FW_MAX_MEM 1000000
2486
2487 /*
2488 * SKL topology init routine
2489 */
2490 int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
2491 {
2492 int ret;
2493 const struct firmware *fw;
2494 struct hdac_bus *bus = ebus_to_hbus(ebus);
2495 struct skl *skl = ebus_to_skl(ebus);
2496 struct skl_pipeline *ppl;
2497
2498 ret = request_firmware(&fw, skl->tplg_name, bus->dev);
2499 if (ret < 0) {
2500 dev_err(bus->dev, "tplg fw %s load failed with %d\n",
2501 skl->tplg_name, ret);
2502 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
2503 if (ret < 0) {
2504 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
2505 "dfw_sst.bin", ret);
2506 return ret;
2507 }
2508 }
2509
2510 /*
2511 * The complete tplg for SKL is loaded as index 0, we don't use
2512 * any other index
2513 */
2514 ret = snd_soc_tplg_component_load(&platform->component,
2515 &skl_tplg_ops, fw, 0);
2516 if (ret < 0) {
2517 dev_err(bus->dev, "tplg component load failed%d\n", ret);
2518 release_firmware(fw);
2519 return -EINVAL;
2520 }
2521
2522 skl->resource.max_mcps = SKL_MAX_MCPS;
2523 skl->resource.max_mem = SKL_FW_MAX_MEM;
2524
2525 skl->tplg = fw;
2526 ret = skl_tplg_create_pipe_widget_list(platform);
2527 if (ret < 0)
2528 return ret;
2529
2530 list_for_each_entry(ppl, &skl->ppl_list, node)
2531 skl_tplg_set_pipe_type(skl, ppl->pipe);
2532
2533 return 0;
2534 }
This page took 0.081593 seconds and 6 git commands to generate.