1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * skl-topology.c - Implements Platform component ALSA controls/widget
4 * handlers.
5 *
6 * Copyright (C) 2014-2015 Intel Corp
7 * Author: Jeeja KP <jeeja.kp@intel.com>
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 */
10
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/firmware.h>
14 #include <linux/uuid.h>
15 #include <sound/intel-nhlt.h>
16 #include <sound/soc.h>
17 #include <sound/soc-acpi.h>
18 #include <sound/soc-topology.h>
19 #include <uapi/sound/snd_sst_tokens.h>
20 #include <uapi/sound/skl-tplg-interface.h>
21 #include "skl-sst-dsp.h"
22 #include "skl-sst-ipc.h"
23 #include "skl-topology.h"
24 #include "skl.h"
25 #include "../common/sst-dsp.h"
26 #include "../common/sst-dsp-priv.h"
27
28 #define SKL_CH_FIXUP_MASK (1 << 0)
29 #define SKL_RATE_FIXUP_MASK (1 << 1)
30 #define SKL_FMT_FIXUP_MASK (1 << 2)
31 #define SKL_IN_DIR_BIT_MASK BIT(0)
32 #define SKL_PIN_COUNT_MASK GENMASK(7, 4)
33
34 static const int mic_mono_list[] = {
35 0, 1, 2, 3,
36 };
37 static const int mic_stereo_list[][SKL_CH_STEREO] = {
38 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3},
39 };
40 static const int mic_trio_list[][SKL_CH_TRIO] = {
41 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3},
42 };
43 static const int mic_quatro_list[][SKL_CH_QUATRO] = {
44 {0, 1, 2, 3},
45 };
46
47 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \
48 ((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq))
49
skl_tplg_d0i3_get(struct skl_dev * skl,enum d0i3_capability caps)50 void skl_tplg_d0i3_get(struct skl_dev *skl, enum d0i3_capability caps)
51 {
52 struct skl_d0i3_data *d0i3 = &skl->d0i3;
53
54 switch (caps) {
55 case SKL_D0I3_NONE:
56 d0i3->non_d0i3++;
57 break;
58
59 case SKL_D0I3_STREAMING:
60 d0i3->streaming++;
61 break;
62
63 case SKL_D0I3_NON_STREAMING:
64 d0i3->non_streaming++;
65 break;
66 }
67 }
68
skl_tplg_d0i3_put(struct skl_dev * skl,enum d0i3_capability caps)69 void skl_tplg_d0i3_put(struct skl_dev *skl, enum d0i3_capability caps)
70 {
71 struct skl_d0i3_data *d0i3 = &skl->d0i3;
72
73 switch (caps) {
74 case SKL_D0I3_NONE:
75 d0i3->non_d0i3--;
76 break;
77
78 case SKL_D0I3_STREAMING:
79 d0i3->streaming--;
80 break;
81
82 case SKL_D0I3_NON_STREAMING:
83 d0i3->non_streaming--;
84 break;
85 }
86 }
87
88 /*
89 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
90 * ignore. This helpers checks if the SKL driver handles this widget type
91 */
is_skl_dsp_widget_type(struct snd_soc_dapm_widget * w,struct device * dev)92 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
93 struct device *dev)
94 {
95 if (w->dapm->dev != dev)
96 return false;
97
98 switch (w->id) {
99 case snd_soc_dapm_dai_link:
100 case snd_soc_dapm_dai_in:
101 case snd_soc_dapm_aif_in:
102 case snd_soc_dapm_aif_out:
103 case snd_soc_dapm_dai_out:
104 case snd_soc_dapm_switch:
105 case snd_soc_dapm_output:
106 case snd_soc_dapm_mux:
107
108 return false;
109 default:
110 return true;
111 }
112 }
113
skl_dump_mconfig(struct skl_dev * skl,struct skl_module_cfg * mcfg)114 static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg)
115 {
116 struct skl_module_iface *iface = &mcfg->module->formats[mcfg->fmt_idx];
117
118 dev_dbg(skl->dev, "Dumping config\n");
119 dev_dbg(skl->dev, "Input Format:\n");
120 dev_dbg(skl->dev, "channels = %d\n", iface->inputs[0].fmt.channels);
121 dev_dbg(skl->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq);
122 dev_dbg(skl->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg);
123 dev_dbg(skl->dev, "valid bit depth = %d\n",
124 iface->inputs[0].fmt.valid_bit_depth);
125 dev_dbg(skl->dev, "Output Format:\n");
126 dev_dbg(skl->dev, "channels = %d\n", iface->outputs[0].fmt.channels);
127 dev_dbg(skl->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq);
128 dev_dbg(skl->dev, "valid bit depth = %d\n",
129 iface->outputs[0].fmt.valid_bit_depth);
130 dev_dbg(skl->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg);
131 }
132
skl_tplg_update_chmap(struct skl_module_fmt * fmt,int chs)133 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
134 {
135 int slot_map = 0xFFFFFFFF;
136 int start_slot = 0;
137 int i;
138
139 for (i = 0; i < chs; i++) {
140 /*
141 * For 2 channels with starting slot as 0, slot map will
142 * look like 0xFFFFFF10.
143 */
144 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
145 start_slot++;
146 }
147 fmt->ch_map = slot_map;
148 }
149
skl_tplg_update_params(struct skl_module_fmt * fmt,struct skl_pipe_params * params,int fixup)150 static void skl_tplg_update_params(struct skl_module_fmt *fmt,
151 struct skl_pipe_params *params, int fixup)
152 {
153 if (fixup & SKL_RATE_FIXUP_MASK)
154 fmt->s_freq = params->s_freq;
155 if (fixup & SKL_CH_FIXUP_MASK) {
156 fmt->channels = params->ch;
157 skl_tplg_update_chmap(fmt, fmt->channels);
158 }
159 if (fixup & SKL_FMT_FIXUP_MASK) {
160 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
161
162 /*
163 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
164 * container so update bit depth accordingly
165 */
166 switch (fmt->valid_bit_depth) {
167 case SKL_DEPTH_16BIT:
168 fmt->bit_depth = fmt->valid_bit_depth;
169 break;
170
171 default:
172 fmt->bit_depth = SKL_DEPTH_32BIT;
173 break;
174 }
175 }
176
177 }
178
179 /*
180 * A pipeline may have modules which impact the pcm parameters, like SRC,
181 * channel converter, format converter.
182 * We need to calculate the output params by applying the 'fixup'
183 * Topology will tell driver which type of fixup is to be applied by
184 * supplying the fixup mask, so based on that we calculate the output
185 *
186 * Now In FE the pcm hw_params is source/target format. Same is applicable
187 * for BE with its hw_params invoked.
188 * here based on FE, BE pipeline and direction we calculate the input and
189 * outfix and then apply that for a module
190 */
skl_tplg_update_params_fixup(struct skl_module_cfg * m_cfg,struct skl_pipe_params * params,bool is_fe)191 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
192 struct skl_pipe_params *params, bool is_fe)
193 {
194 int in_fixup, out_fixup;
195 struct skl_module_fmt *in_fmt, *out_fmt;
196
197 /* Fixups will be applied to pin 0 only */
198 in_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].inputs[0].fmt;
199 out_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].outputs[0].fmt;
200
201 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
202 if (is_fe) {
203 in_fixup = m_cfg->params_fixup;
204 out_fixup = (~m_cfg->converter) &
205 m_cfg->params_fixup;
206 } else {
207 out_fixup = m_cfg->params_fixup;
208 in_fixup = (~m_cfg->converter) &
209 m_cfg->params_fixup;
210 }
211 } else {
212 if (is_fe) {
213 out_fixup = m_cfg->params_fixup;
214 in_fixup = (~m_cfg->converter) &
215 m_cfg->params_fixup;
216 } else {
217 in_fixup = m_cfg->params_fixup;
218 out_fixup = (~m_cfg->converter) &
219 m_cfg->params_fixup;
220 }
221 }
222
223 skl_tplg_update_params(in_fmt, params, in_fixup);
224 skl_tplg_update_params(out_fmt, params, out_fixup);
225 }
226
227 /*
228 * A module needs input and output buffers, which are dependent upon pcm
229 * params, so once we have calculate params, we need buffer calculation as
230 * well.
231 */
skl_tplg_update_buffer_size(struct skl_dev * skl,struct skl_module_cfg * mcfg)232 static void skl_tplg_update_buffer_size(struct skl_dev *skl,
233 struct skl_module_cfg *mcfg)
234 {
235 int multiplier = 1;
236 struct skl_module_fmt *in_fmt, *out_fmt;
237 struct skl_module_res *res;
238
239 /* Since fixups is applied to pin 0 only, ibs, obs needs
240 * change for pin 0 only
241 */
242 res = &mcfg->module->resources[mcfg->res_idx];
243 in_fmt = &mcfg->module->formats[mcfg->fmt_idx].inputs[0].fmt;
244 out_fmt = &mcfg->module->formats[mcfg->fmt_idx].outputs[0].fmt;
245
246 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
247 multiplier = 5;
248
249 res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
250 in_fmt->channels * (in_fmt->bit_depth >> 3) *
251 multiplier;
252
253 res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
254 out_fmt->channels * (out_fmt->bit_depth >> 3) *
255 multiplier;
256 }
257
skl_tplg_be_dev_type(int dev_type)258 static u8 skl_tplg_be_dev_type(int dev_type)
259 {
260 int ret;
261
262 switch (dev_type) {
263 case SKL_DEVICE_BT:
264 ret = NHLT_DEVICE_BT;
265 break;
266
267 case SKL_DEVICE_DMIC:
268 ret = NHLT_DEVICE_DMIC;
269 break;
270
271 case SKL_DEVICE_I2S:
272 ret = NHLT_DEVICE_I2S;
273 break;
274
275 default:
276 ret = NHLT_DEVICE_INVALID;
277 break;
278 }
279
280 return ret;
281 }
282
skl_tplg_update_be_blob(struct snd_soc_dapm_widget * w,struct skl_dev * skl)283 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
284 struct skl_dev *skl)
285 {
286 struct skl_module_cfg *m_cfg = w->priv;
287 int link_type, dir;
288 u32 ch, s_freq, s_fmt;
289 struct nhlt_specific_cfg *cfg;
290 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
291 int fmt_idx = m_cfg->fmt_idx;
292 struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx];
293
294 /* check if we already have blob */
295 if (m_cfg->formats_config[SKL_PARAM_INIT].caps_size > 0)
296 return 0;
297
298 dev_dbg(skl->dev, "Applying default cfg blob\n");
299 switch (m_cfg->dev_type) {
300 case SKL_DEVICE_DMIC:
301 link_type = NHLT_LINK_DMIC;
302 dir = SNDRV_PCM_STREAM_CAPTURE;
303 s_freq = m_iface->inputs[0].fmt.s_freq;
304 s_fmt = m_iface->inputs[0].fmt.bit_depth;
305 ch = m_iface->inputs[0].fmt.channels;
306 break;
307
308 case SKL_DEVICE_I2S:
309 link_type = NHLT_LINK_SSP;
310 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
311 dir = SNDRV_PCM_STREAM_PLAYBACK;
312 s_freq = m_iface->outputs[0].fmt.s_freq;
313 s_fmt = m_iface->outputs[0].fmt.bit_depth;
314 ch = m_iface->outputs[0].fmt.channels;
315 } else {
316 dir = SNDRV_PCM_STREAM_CAPTURE;
317 s_freq = m_iface->inputs[0].fmt.s_freq;
318 s_fmt = m_iface->inputs[0].fmt.bit_depth;
319 ch = m_iface->inputs[0].fmt.channels;
320 }
321 break;
322
323 default:
324 return -EINVAL;
325 }
326
327 /* update the blob based on virtual bus_id and default params */
328 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
329 s_fmt, ch, s_freq, dir, dev_type);
330 if (cfg) {
331 m_cfg->formats_config[SKL_PARAM_INIT].caps_size = cfg->size;
332 m_cfg->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps;
333 } else {
334 dev_err(skl->dev, "Blob NULL for id %x type %d dirn %d\n",
335 m_cfg->vbus_id, link_type, dir);
336 dev_err(skl->dev, "PCM: ch %d, freq %d, fmt %d\n",
337 ch, s_freq, s_fmt);
338 return -EIO;
339 }
340
341 return 0;
342 }
343
skl_tplg_update_module_params(struct snd_soc_dapm_widget * w,struct skl_dev * skl)344 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
345 struct skl_dev *skl)
346 {
347 struct skl_module_cfg *m_cfg = w->priv;
348 struct skl_pipe_params *params = m_cfg->pipe->p_params;
349 int p_conn_type = m_cfg->pipe->conn_type;
350 bool is_fe;
351
352 if (!m_cfg->params_fixup)
353 return;
354
355 dev_dbg(skl->dev, "Mconfig for widget=%s BEFORE updation\n",
356 w->name);
357
358 skl_dump_mconfig(skl, m_cfg);
359
360 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
361 is_fe = true;
362 else
363 is_fe = false;
364
365 skl_tplg_update_params_fixup(m_cfg, params, is_fe);
366 skl_tplg_update_buffer_size(skl, m_cfg);
367
368 dev_dbg(skl->dev, "Mconfig for widget=%s AFTER updation\n",
369 w->name);
370
371 skl_dump_mconfig(skl, m_cfg);
372 }
373
374 /*
375 * some modules can have multiple params set from user control and
376 * need to be set after module is initialized. If set_param flag is
377 * set module params will be done after module is initialised.
378 */
skl_tplg_set_module_params(struct snd_soc_dapm_widget * w,struct skl_dev * skl)379 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
380 struct skl_dev *skl)
381 {
382 int i, ret;
383 struct skl_module_cfg *mconfig = w->priv;
384 const struct snd_kcontrol_new *k;
385 struct soc_bytes_ext *sb;
386 struct skl_algo_data *bc;
387 struct skl_specific_cfg *sp_cfg;
388
389 if (mconfig->formats_config[SKL_PARAM_SET].caps_size > 0 &&
390 mconfig->formats_config[SKL_PARAM_SET].set_params == SKL_PARAM_SET) {
391 sp_cfg = &mconfig->formats_config[SKL_PARAM_SET];
392 ret = skl_set_module_params(skl, sp_cfg->caps,
393 sp_cfg->caps_size,
394 sp_cfg->param_id, mconfig);
395 if (ret < 0)
396 return ret;
397 }
398
399 for (i = 0; i < w->num_kcontrols; i++) {
400 k = &w->kcontrol_news[i];
401 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
402 sb = (void *) k->private_value;
403 bc = (struct skl_algo_data *)sb->dobj.private;
404
405 if (bc->set_params == SKL_PARAM_SET) {
406 ret = skl_set_module_params(skl,
407 (u32 *)bc->params, bc->size,
408 bc->param_id, mconfig);
409 if (ret < 0)
410 return ret;
411 }
412 }
413 }
414
415 return 0;
416 }
417
418 /*
419 * some module param can set from user control and this is required as
420 * when module is initailzed. if module param is required in init it is
421 * identifed by set_param flag. if set_param flag is not set, then this
422 * parameter needs to set as part of module init.
423 */
skl_tplg_set_module_init_data(struct snd_soc_dapm_widget * w)424 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
425 {
426 const struct snd_kcontrol_new *k;
427 struct soc_bytes_ext *sb;
428 struct skl_algo_data *bc;
429 struct skl_module_cfg *mconfig = w->priv;
430 int i;
431
432 for (i = 0; i < w->num_kcontrols; i++) {
433 k = &w->kcontrol_news[i];
434 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
435 sb = (struct soc_bytes_ext *)k->private_value;
436 bc = (struct skl_algo_data *)sb->dobj.private;
437
438 if (bc->set_params != SKL_PARAM_INIT)
439 continue;
440
441 mconfig->formats_config[SKL_PARAM_INIT].caps =
442 (u32 *)bc->params;
443 mconfig->formats_config[SKL_PARAM_INIT].caps_size =
444 bc->size;
445
446 break;
447 }
448 }
449
450 return 0;
451 }
452
skl_tplg_module_prepare(struct skl_dev * skl,struct skl_pipe * pipe,struct snd_soc_dapm_widget * w,struct skl_module_cfg * mcfg)453 static int skl_tplg_module_prepare(struct skl_dev *skl, struct skl_pipe *pipe,
454 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
455 {
456 switch (mcfg->dev_type) {
457 case SKL_DEVICE_HDAHOST:
458 return skl_pcm_host_dma_prepare(skl->dev, pipe->p_params);
459
460 case SKL_DEVICE_HDALINK:
461 return skl_pcm_link_dma_prepare(skl->dev, pipe->p_params);
462 }
463
464 return 0;
465 }
466
467 /*
468 * Inside a pipe instance, we can have various modules. These modules need
469 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
470 * skl_init_module() routine, so invoke that for all modules in a pipeline
471 */
472 static int
skl_tplg_init_pipe_modules(struct skl_dev * skl,struct skl_pipe * pipe)473 skl_tplg_init_pipe_modules(struct skl_dev *skl, struct skl_pipe *pipe)
474 {
475 struct skl_pipe_module *w_module;
476 struct snd_soc_dapm_widget *w;
477 struct skl_module_cfg *mconfig;
478 u8 cfg_idx;
479 int ret = 0;
480
481 list_for_each_entry(w_module, &pipe->w_list, node) {
482 guid_t *uuid_mod;
483 w = w_module->w;
484 mconfig = w->priv;
485
486 /* check if module ids are populated */
487 if (mconfig->id.module_id < 0) {
488 dev_err(skl->dev,
489 "module %pUL id not populated\n",
490 (guid_t *)mconfig->guid);
491 return -EIO;
492 }
493
494 cfg_idx = mconfig->pipe->cur_config_idx;
495 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
496 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
497
498 if (mconfig->module->loadable && skl->dsp->fw_ops.load_mod) {
499 ret = skl->dsp->fw_ops.load_mod(skl->dsp,
500 mconfig->id.module_id, mconfig->guid);
501 if (ret < 0)
502 return ret;
503 }
504
505 /* prepare the DMA if the module is gateway cpr */
506 ret = skl_tplg_module_prepare(skl, pipe, w, mconfig);
507 if (ret < 0)
508 return ret;
509
510 /* update blob if blob is null for be with default value */
511 skl_tplg_update_be_blob(w, skl);
512
513 /*
514 * apply fix/conversion to module params based on
515 * FE/BE params
516 */
517 skl_tplg_update_module_params(w, skl);
518 uuid_mod = (guid_t *)mconfig->guid;
519 mconfig->id.pvt_id = skl_get_pvt_id(skl, uuid_mod,
520 mconfig->id.instance_id);
521 if (mconfig->id.pvt_id < 0)
522 return ret;
523 skl_tplg_set_module_init_data(w);
524
525 ret = skl_dsp_get_core(skl->dsp, mconfig->core_id);
526 if (ret < 0) {
527 dev_err(skl->dev, "Failed to wake up core %d ret=%d\n",
528 mconfig->core_id, ret);
529 return ret;
530 }
531
532 ret = skl_init_module(skl, mconfig);
533 if (ret < 0) {
534 skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id);
535 goto err;
536 }
537
538 ret = skl_tplg_set_module_params(w, skl);
539 if (ret < 0)
540 goto err;
541 }
542
543 return 0;
544 err:
545 skl_dsp_put_core(skl->dsp, mconfig->core_id);
546 return ret;
547 }
548
skl_tplg_unload_pipe_modules(struct skl_dev * skl,struct skl_pipe * pipe)549 static int skl_tplg_unload_pipe_modules(struct skl_dev *skl,
550 struct skl_pipe *pipe)
551 {
552 int ret = 0;
553 struct skl_pipe_module *w_module;
554 struct skl_module_cfg *mconfig;
555
556 list_for_each_entry(w_module, &pipe->w_list, node) {
557 guid_t *uuid_mod;
558 mconfig = w_module->w->priv;
559 uuid_mod = (guid_t *)mconfig->guid;
560
561 if (mconfig->module->loadable && skl->dsp->fw_ops.unload_mod) {
562 ret = skl->dsp->fw_ops.unload_mod(skl->dsp,
563 mconfig->id.module_id);
564 if (ret < 0)
565 return -EIO;
566 }
567 skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id);
568
569 ret = skl_dsp_put_core(skl->dsp, mconfig->core_id);
570 if (ret < 0) {
571 /* don't return; continue with other modules */
572 dev_err(skl->dev, "Failed to sleep core %d ret=%d\n",
573 mconfig->core_id, ret);
574 }
575 }
576
577 /* no modules to unload in this path, so return */
578 return ret;
579 }
580
skl_tplg_is_multi_fmt(struct skl_dev * skl,struct skl_pipe * pipe)581 static bool skl_tplg_is_multi_fmt(struct skl_dev *skl, struct skl_pipe *pipe)
582 {
583 struct skl_pipe_fmt *cur_fmt;
584 struct skl_pipe_fmt *next_fmt;
585 int i;
586
587 if (pipe->nr_cfgs <= 1)
588 return false;
589
590 if (pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
591 return true;
592
593 for (i = 0; i < pipe->nr_cfgs - 1; i++) {
594 if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) {
595 cur_fmt = &pipe->configs[i].out_fmt;
596 next_fmt = &pipe->configs[i + 1].out_fmt;
597 } else {
598 cur_fmt = &pipe->configs[i].in_fmt;
599 next_fmt = &pipe->configs[i + 1].in_fmt;
600 }
601
602 if (!CHECK_HW_PARAMS(cur_fmt->channels, cur_fmt->freq,
603 cur_fmt->bps,
604 next_fmt->channels,
605 next_fmt->freq,
606 next_fmt->bps))
607 return true;
608 }
609
610 return false;
611 }
612
613 /*
614 * Here, we select pipe format based on the pipe type and pipe
615 * direction to determine the current config index for the pipeline.
616 * The config index is then used to select proper module resources.
617 * Intermediate pipes currently have a fixed format hence we select the
618 * 0th configuratation by default for such pipes.
619 */
620 static int
skl_tplg_get_pipe_config(struct skl_dev * skl,struct skl_module_cfg * mconfig)621 skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig)
622 {
623 struct skl_pipe *pipe = mconfig->pipe;
624 struct skl_pipe_params *params = pipe->p_params;
625 struct skl_path_config *pconfig = &pipe->configs[0];
626 struct skl_pipe_fmt *fmt = NULL;
627 bool in_fmt = false;
628 int i;
629
630 if (pipe->nr_cfgs == 0) {
631 pipe->cur_config_idx = 0;
632 return 0;
633 }
634
635 if (skl_tplg_is_multi_fmt(skl, pipe)) {
636 pipe->cur_config_idx = pipe->pipe_config_idx;
637 pipe->memory_pages = pconfig->mem_pages;
638 dev_dbg(skl->dev, "found pipe config idx:%d\n",
639 pipe->cur_config_idx);
640 return 0;
641 }
642
643 if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE || pipe->nr_cfgs == 1) {
644 dev_dbg(skl->dev, "No conn_type or just 1 pathcfg, taking 0th for %d\n",
645 pipe->ppl_id);
646 pipe->cur_config_idx = 0;
647 pipe->memory_pages = pconfig->mem_pages;
648
649 return 0;
650 }
651
652 if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE &&
653 pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
654 (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE &&
655 pipe->direction == SNDRV_PCM_STREAM_CAPTURE))
656 in_fmt = true;
657
658 for (i = 0; i < pipe->nr_cfgs; i++) {
659 pconfig = &pipe->configs[i];
660 if (in_fmt)
661 fmt = &pconfig->in_fmt;
662 else
663 fmt = &pconfig->out_fmt;
664
665 if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt,
666 fmt->channels, fmt->freq, fmt->bps)) {
667 pipe->cur_config_idx = i;
668 pipe->memory_pages = pconfig->mem_pages;
669 dev_dbg(skl->dev, "Using pipe config: %d\n", i);
670
671 return 0;
672 }
673 }
674
675 dev_err(skl->dev, "Invalid pipe config: %d %d %d for pipe: %d\n",
676 params->ch, params->s_freq, params->s_fmt, pipe->ppl_id);
677 return -EINVAL;
678 }
679
680 /*
681 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
682 * need create the pipeline. So we do following:
683 * - Create the pipeline
684 * - Initialize the modules in pipeline
685 * - finally bind all modules together
686 */
skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget * w,struct skl_dev * skl)687 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
688 struct skl_dev *skl)
689 {
690 int ret;
691 struct skl_module_cfg *mconfig = w->priv;
692 struct skl_pipe_module *w_module;
693 struct skl_pipe *s_pipe = mconfig->pipe;
694 struct skl_module_cfg *src_module = NULL, *dst_module, *module;
695 struct skl_module_deferred_bind *modules;
696
697 ret = skl_tplg_get_pipe_config(skl, mconfig);
698 if (ret < 0)
699 return ret;
700
701 /*
702 * Create a list of modules for pipe.
703 * This list contains modules from source to sink
704 */
705 ret = skl_create_pipeline(skl, mconfig->pipe);
706 if (ret < 0)
707 return ret;
708
709 /* Init all pipe modules from source to sink */
710 ret = skl_tplg_init_pipe_modules(skl, s_pipe);
711 if (ret < 0)
712 return ret;
713
714 /* Bind modules from source to sink */
715 list_for_each_entry(w_module, &s_pipe->w_list, node) {
716 dst_module = w_module->w->priv;
717
718 if (src_module == NULL) {
719 src_module = dst_module;
720 continue;
721 }
722
723 ret = skl_bind_modules(skl, src_module, dst_module);
724 if (ret < 0)
725 return ret;
726
727 src_module = dst_module;
728 }
729
730 /*
731 * When the destination module is initialized, check for these modules
732 * in deferred bind list. If found, bind them.
733 */
734 list_for_each_entry(w_module, &s_pipe->w_list, node) {
735 if (list_empty(&skl->bind_list))
736 break;
737
738 list_for_each_entry(modules, &skl->bind_list, node) {
739 module = w_module->w->priv;
740 if (modules->dst == module)
741 skl_bind_modules(skl, modules->src,
742 modules->dst);
743 }
744 }
745
746 return 0;
747 }
748
skl_fill_sink_instance_id(struct skl_dev * skl,u32 * params,int size,struct skl_module_cfg * mcfg)749 static int skl_fill_sink_instance_id(struct skl_dev *skl, u32 *params,
750 int size, struct skl_module_cfg *mcfg)
751 {
752 int i, pvt_id;
753
754 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
755 struct skl_kpb_params *kpb_params =
756 (struct skl_kpb_params *)params;
757 struct skl_mod_inst_map *inst = kpb_params->u.map;
758
759 for (i = 0; i < kpb_params->num_modules; i++) {
760 pvt_id = skl_get_pvt_instance_id_map(skl, inst->mod_id,
761 inst->inst_id);
762 if (pvt_id < 0)
763 return -EINVAL;
764
765 inst->inst_id = pvt_id;
766 inst++;
767 }
768 }
769
770 return 0;
771 }
772 /*
773 * Some modules require params to be set after the module is bound to
774 * all pins connected.
775 *
776 * The module provider initializes set_param flag for such modules and we
777 * send params after binding
778 */
skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget * w,struct skl_module_cfg * mcfg,struct skl_dev * skl)779 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
780 struct skl_module_cfg *mcfg, struct skl_dev *skl)
781 {
782 int i, ret;
783 struct skl_module_cfg *mconfig = w->priv;
784 const struct snd_kcontrol_new *k;
785 struct soc_bytes_ext *sb;
786 struct skl_algo_data *bc;
787 struct skl_specific_cfg *sp_cfg;
788 u32 *params;
789
790 /*
791 * check all out/in pins are in bind state.
792 * if so set the module param
793 */
794 for (i = 0; i < mcfg->module->max_output_pins; i++) {
795 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
796 return 0;
797 }
798
799 for (i = 0; i < mcfg->module->max_input_pins; i++) {
800 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
801 return 0;
802 }
803
804 if (mconfig->formats_config[SKL_PARAM_BIND].caps_size > 0 &&
805 mconfig->formats_config[SKL_PARAM_BIND].set_params ==
806 SKL_PARAM_BIND) {
807 sp_cfg = &mconfig->formats_config[SKL_PARAM_BIND];
808 ret = skl_set_module_params(skl, sp_cfg->caps,
809 sp_cfg->caps_size,
810 sp_cfg->param_id, mconfig);
811 if (ret < 0)
812 return ret;
813 }
814
815 for (i = 0; i < w->num_kcontrols; i++) {
816 k = &w->kcontrol_news[i];
817 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
818 sb = (void *) k->private_value;
819 bc = (struct skl_algo_data *)sb->dobj.private;
820
821 if (bc->set_params == SKL_PARAM_BIND) {
822 params = kmemdup(bc->params, bc->max, GFP_KERNEL);
823 if (!params)
824 return -ENOMEM;
825
826 skl_fill_sink_instance_id(skl, params, bc->max,
827 mconfig);
828
829 ret = skl_set_module_params(skl, params,
830 bc->max, bc->param_id, mconfig);
831 kfree(params);
832
833 if (ret < 0)
834 return ret;
835 }
836 }
837 }
838
839 return 0;
840 }
841
skl_get_module_id(struct skl_dev * skl,guid_t * uuid)842 static int skl_get_module_id(struct skl_dev *skl, guid_t *uuid)
843 {
844 struct uuid_module *module;
845
846 list_for_each_entry(module, &skl->uuid_list, list) {
847 if (guid_equal(uuid, &module->uuid))
848 return module->id;
849 }
850
851 return -EINVAL;
852 }
853
skl_tplg_find_moduleid_from_uuid(struct skl_dev * skl,const struct snd_kcontrol_new * k)854 static int skl_tplg_find_moduleid_from_uuid(struct skl_dev *skl,
855 const struct snd_kcontrol_new *k)
856 {
857 struct soc_bytes_ext *sb = (void *) k->private_value;
858 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
859 struct skl_kpb_params *uuid_params, *params;
860 struct hdac_bus *bus = skl_to_bus(skl);
861 int i, size, module_id;
862
863 if (bc->set_params == SKL_PARAM_BIND && bc->max) {
864 uuid_params = (struct skl_kpb_params *)bc->params;
865 size = struct_size(params, u.map, uuid_params->num_modules);
866
867 params = devm_kzalloc(bus->dev, size, GFP_KERNEL);
868 if (!params)
869 return -ENOMEM;
870
871 params->num_modules = uuid_params->num_modules;
872
873 for (i = 0; i < uuid_params->num_modules; i++) {
874 module_id = skl_get_module_id(skl,
875 &uuid_params->u.map_uuid[i].mod_uuid);
876 if (module_id < 0) {
877 devm_kfree(bus->dev, params);
878 return -EINVAL;
879 }
880
881 params->u.map[i].mod_id = module_id;
882 params->u.map[i].inst_id =
883 uuid_params->u.map_uuid[i].inst_id;
884 }
885
886 devm_kfree(bus->dev, bc->params);
887 bc->params = (char *)params;
888 bc->max = size;
889 }
890
891 return 0;
892 }
893
894 /*
895 * Retrieve the module id from UUID mentioned in the
896 * post bind params
897 */
skl_tplg_add_moduleid_in_bind_params(struct skl_dev * skl,struct snd_soc_dapm_widget * w)898 void skl_tplg_add_moduleid_in_bind_params(struct skl_dev *skl,
899 struct snd_soc_dapm_widget *w)
900 {
901 struct skl_module_cfg *mconfig = w->priv;
902 int i;
903
904 /*
905 * Post bind params are used for only for KPB
906 * to set copier instances to drain the data
907 * in fast mode
908 */
909 if (mconfig->m_type != SKL_MODULE_TYPE_KPB)
910 return;
911
912 for (i = 0; i < w->num_kcontrols; i++)
913 if ((w->kcontrol_news[i].access &
914 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
915 (skl_tplg_find_moduleid_from_uuid(skl,
916 &w->kcontrol_news[i]) < 0))
917 dev_err(skl->dev,
918 "%s: invalid kpb post bind params\n",
919 __func__);
920 }
921
skl_tplg_module_add_deferred_bind(struct skl_dev * skl,struct skl_module_cfg * src,struct skl_module_cfg * dst)922 static int skl_tplg_module_add_deferred_bind(struct skl_dev *skl,
923 struct skl_module_cfg *src, struct skl_module_cfg *dst)
924 {
925 struct skl_module_deferred_bind *m_list, *modules;
926 int i;
927
928 /* only supported for module with static pin connection */
929 for (i = 0; i < dst->module->max_input_pins; i++) {
930 struct skl_module_pin *pin = &dst->m_in_pin[i];
931
932 if (pin->is_dynamic)
933 continue;
934
935 if ((pin->id.module_id == src->id.module_id) &&
936 (pin->id.instance_id == src->id.instance_id)) {
937
938 if (!list_empty(&skl->bind_list)) {
939 list_for_each_entry(modules, &skl->bind_list, node) {
940 if (modules->src == src && modules->dst == dst)
941 return 0;
942 }
943 }
944
945 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL);
946 if (!m_list)
947 return -ENOMEM;
948
949 m_list->src = src;
950 m_list->dst = dst;
951
952 list_add(&m_list->node, &skl->bind_list);
953 }
954 }
955
956 return 0;
957 }
958
skl_tplg_bind_sinks(struct snd_soc_dapm_widget * w,struct skl_dev * skl,struct snd_soc_dapm_widget * src_w,struct skl_module_cfg * src_mconfig)959 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
960 struct skl_dev *skl,
961 struct snd_soc_dapm_widget *src_w,
962 struct skl_module_cfg *src_mconfig)
963 {
964 struct snd_soc_dapm_path *p;
965 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
966 struct skl_module_cfg *sink_mconfig;
967 int ret;
968
969 snd_soc_dapm_widget_for_each_sink_path(w, p) {
970 if (!p->connect)
971 continue;
972
973 dev_dbg(skl->dev,
974 "%s: src widget=%s\n", __func__, w->name);
975 dev_dbg(skl->dev,
976 "%s: sink widget=%s\n", __func__, p->sink->name);
977
978 next_sink = p->sink;
979
980 if (!is_skl_dsp_widget_type(p->sink, skl->dev))
981 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
982
983 /*
984 * here we will check widgets in sink pipelines, so that
985 * can be any widgets type and we are only interested if
986 * they are ones used for SKL so check that first
987 */
988 if ((p->sink->priv != NULL) &&
989 is_skl_dsp_widget_type(p->sink, skl->dev)) {
990
991 sink = p->sink;
992 sink_mconfig = sink->priv;
993
994 /*
995 * Modules other than PGA leaf can be connected
996 * directly or via switch to a module in another
997 * pipeline. EX: reference path
998 * when the path is enabled, the dst module that needs
999 * to be bound may not be initialized. if the module is
1000 * not initialized, add these modules in the deferred
1001 * bind list and when the dst module is initialised,
1002 * bind this module to the dst_module in deferred list.
1003 */
1004 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE)
1005 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) {
1006
1007 ret = skl_tplg_module_add_deferred_bind(skl,
1008 src_mconfig, sink_mconfig);
1009
1010 if (ret < 0)
1011 return ret;
1012
1013 }
1014
1015
1016 if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
1017 sink_mconfig->m_state == SKL_MODULE_UNINIT)
1018 continue;
1019
1020 /* Bind source to sink, mixin is always source */
1021 ret = skl_bind_modules(skl, src_mconfig, sink_mconfig);
1022 if (ret)
1023 return ret;
1024
1025 /* set module params after bind */
1026 skl_tplg_set_module_bind_params(src_w,
1027 src_mconfig, skl);
1028 skl_tplg_set_module_bind_params(sink,
1029 sink_mconfig, skl);
1030
1031 /* Start sinks pipe first */
1032 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
1033 if (sink_mconfig->pipe->conn_type !=
1034 SKL_PIPE_CONN_TYPE_FE)
1035 ret = skl_run_pipe(skl,
1036 sink_mconfig->pipe);
1037 if (ret)
1038 return ret;
1039 }
1040 }
1041 }
1042
1043 if (!sink && next_sink)
1044 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
1045
1046 return 0;
1047 }
1048
1049 /*
1050 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
1051 * we need to do following:
1052 * - Bind to sink pipeline
1053 * Since the sink pipes can be running and we don't get mixer event on
1054 * connect for already running mixer, we need to find the sink pipes
1055 * here and bind to them. This way dynamic connect works.
1056 * - Start sink pipeline, if not running
1057 * - Then run current pipe
1058 */
skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget * w,struct skl_dev * skl)1059 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
1060 struct skl_dev *skl)
1061 {
1062 struct skl_module_cfg *src_mconfig;
1063 int ret = 0;
1064
1065 src_mconfig = w->priv;
1066
1067 /*
1068 * find which sink it is connected to, bind with the sink,
1069 * if sink is not started, start sink pipe first, then start
1070 * this pipe
1071 */
1072 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
1073 if (ret)
1074 return ret;
1075
1076 /* Start source pipe last after starting all sinks */
1077 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1078 return skl_run_pipe(skl, src_mconfig->pipe);
1079
1080 return 0;
1081 }
1082
skl_get_src_dsp_widget(struct snd_soc_dapm_widget * w,struct skl_dev * skl)1083 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
1084 struct snd_soc_dapm_widget *w, struct skl_dev *skl)
1085 {
1086 struct snd_soc_dapm_path *p;
1087 struct snd_soc_dapm_widget *src_w = NULL;
1088
1089 snd_soc_dapm_widget_for_each_source_path(w, p) {
1090 src_w = p->source;
1091 if (!p->connect)
1092 continue;
1093
1094 dev_dbg(skl->dev, "sink widget=%s\n", w->name);
1095 dev_dbg(skl->dev, "src widget=%s\n", p->source->name);
1096
1097 /*
1098 * here we will check widgets in sink pipelines, so that can
1099 * be any widgets type and we are only interested if they are
1100 * ones used for SKL so check that first
1101 */
1102 if ((p->source->priv != NULL) &&
1103 is_skl_dsp_widget_type(p->source, skl->dev)) {
1104 return p->source;
1105 }
1106 }
1107
1108 if (src_w != NULL)
1109 return skl_get_src_dsp_widget(src_w, skl);
1110
1111 return NULL;
1112 }
1113
1114 /*
1115 * in the Post-PMU event of mixer we need to do following:
1116 * - Check if this pipe is running
1117 * - if not, then
1118 * - bind this pipeline to its source pipeline
1119 * if source pipe is already running, this means it is a dynamic
1120 * connection and we need to bind only to that pipe
1121 * - start this pipeline
1122 */
skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget * w,struct skl_dev * skl)1123 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
1124 struct skl_dev *skl)
1125 {
1126 int ret = 0;
1127 struct snd_soc_dapm_widget *source, *sink;
1128 struct skl_module_cfg *src_mconfig, *sink_mconfig;
1129 int src_pipe_started = 0;
1130
1131 sink = w;
1132 sink_mconfig = sink->priv;
1133
1134 /*
1135 * If source pipe is already started, that means source is driving
1136 * one more sink before this sink got connected, Since source is
1137 * started, bind this sink to source and start this pipe.
1138 */
1139 source = skl_get_src_dsp_widget(w, skl);
1140 if (source != NULL) {
1141 src_mconfig = source->priv;
1142 sink_mconfig = sink->priv;
1143 src_pipe_started = 1;
1144
1145 /*
1146 * check pipe state, then no need to bind or start the
1147 * pipe
1148 */
1149 if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
1150 src_pipe_started = 0;
1151 }
1152
1153 if (src_pipe_started) {
1154 ret = skl_bind_modules(skl, src_mconfig, sink_mconfig);
1155 if (ret)
1156 return ret;
1157
1158 /* set module params after bind */
1159 skl_tplg_set_module_bind_params(source, src_mconfig, skl);
1160 skl_tplg_set_module_bind_params(sink, sink_mconfig, skl);
1161
1162 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1163 ret = skl_run_pipe(skl, sink_mconfig->pipe);
1164 }
1165
1166 return ret;
1167 }
1168
1169 /*
1170 * in the Pre-PMD event of mixer we need to do following:
1171 * - Stop the pipe
1172 * - find the source connections and remove that from dapm_path_list
1173 * - unbind with source pipelines if still connected
1174 */
skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget * w,struct skl_dev * skl)1175 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
1176 struct skl_dev *skl)
1177 {
1178 struct skl_module_cfg *src_mconfig, *sink_mconfig;
1179 int ret = 0, i;
1180
1181 sink_mconfig = w->priv;
1182
1183 /* Stop the pipe */
1184 ret = skl_stop_pipe(skl, sink_mconfig->pipe);
1185 if (ret)
1186 return ret;
1187
1188 for (i = 0; i < sink_mconfig->module->max_input_pins; i++) {
1189 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1190 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
1191 if (!src_mconfig)
1192 continue;
1193
1194 ret = skl_unbind_modules(skl,
1195 src_mconfig, sink_mconfig);
1196 }
1197 }
1198
1199 return ret;
1200 }
1201
1202 /*
1203 * in the Post-PMD event of mixer we need to do following:
1204 * - Unbind the modules within the pipeline
1205 * - Delete the pipeline (modules are not required to be explicitly
1206 * deleted, pipeline delete is enough here
1207 */
skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget * w,struct skl_dev * skl)1208 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1209 struct skl_dev *skl)
1210 {
1211 struct skl_module_cfg *mconfig = w->priv;
1212 struct skl_pipe_module *w_module;
1213 struct skl_module_cfg *src_module = NULL, *dst_module;
1214 struct skl_pipe *s_pipe = mconfig->pipe;
1215 struct skl_module_deferred_bind *modules, *tmp;
1216
1217 if (s_pipe->state == SKL_PIPE_INVALID)
1218 return -EINVAL;
1219
1220 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1221 if (list_empty(&skl->bind_list))
1222 break;
1223
1224 src_module = w_module->w->priv;
1225
1226 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
1227 /*
1228 * When the destination module is deleted, Unbind the
1229 * modules from deferred bind list.
1230 */
1231 if (modules->dst == src_module) {
1232 skl_unbind_modules(skl, modules->src,
1233 modules->dst);
1234 }
1235
1236 /*
1237 * When the source module is deleted, remove this entry
1238 * from the deferred bind list.
1239 */
1240 if (modules->src == src_module) {
1241 list_del(&modules->node);
1242 modules->src = NULL;
1243 modules->dst = NULL;
1244 kfree(modules);
1245 }
1246 }
1247 }
1248
1249 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1250 dst_module = w_module->w->priv;
1251
1252 if (src_module == NULL) {
1253 src_module = dst_module;
1254 continue;
1255 }
1256
1257 skl_unbind_modules(skl, src_module, dst_module);
1258 src_module = dst_module;
1259 }
1260
1261 skl_delete_pipe(skl, mconfig->pipe);
1262
1263 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1264 src_module = w_module->w->priv;
1265 src_module->m_state = SKL_MODULE_UNINIT;
1266 }
1267
1268 return skl_tplg_unload_pipe_modules(skl, s_pipe);
1269 }
1270
1271 /*
1272 * in the Post-PMD event of PGA we need to do following:
1273 * - Stop the pipeline
1274 * - In source pipe is connected, unbind with source pipelines
1275 */
skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget * w,struct skl_dev * skl)1276 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1277 struct skl_dev *skl)
1278 {
1279 struct skl_module_cfg *src_mconfig, *sink_mconfig;
1280 int ret = 0, i;
1281
1282 src_mconfig = w->priv;
1283
1284 /* Stop the pipe since this is a mixin module */
1285 ret = skl_stop_pipe(skl, src_mconfig->pipe);
1286 if (ret)
1287 return ret;
1288
1289 for (i = 0; i < src_mconfig->module->max_output_pins; i++) {
1290 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1291 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1292 if (!sink_mconfig)
1293 continue;
1294 /*
1295 * This is a connecter and if path is found that means
1296 * unbind between source and sink has not happened yet
1297 */
1298 ret = skl_unbind_modules(skl, src_mconfig,
1299 sink_mconfig);
1300 }
1301 }
1302
1303 return ret;
1304 }
1305
1306 /*
1307 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1308 * second one is required that is created as another pipe entity.
1309 * The mixer is responsible for pipe management and represent a pipeline
1310 * instance
1311 */
skl_tplg_mixer_event(struct snd_soc_dapm_widget * w,struct snd_kcontrol * k,int event)1312 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1313 struct snd_kcontrol *k, int event)
1314 {
1315 struct snd_soc_dapm_context *dapm = w->dapm;
1316 struct skl_dev *skl = get_skl_ctx(dapm->dev);
1317
1318 switch (event) {
1319 case SND_SOC_DAPM_PRE_PMU:
1320 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1321
1322 case SND_SOC_DAPM_POST_PMU:
1323 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1324
1325 case SND_SOC_DAPM_PRE_PMD:
1326 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1327
1328 case SND_SOC_DAPM_POST_PMD:
1329 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1330 }
1331
1332 return 0;
1333 }
1334
1335 /*
1336 * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1337 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1338 * the sink when it is running (two FE to one BE or one FE to two BE)
1339 * scenarios
1340 */
skl_tplg_pga_event(struct snd_soc_dapm_widget * w,struct snd_kcontrol * k,int event)1341 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1342 struct snd_kcontrol *k, int event)
1343
1344 {
1345 struct snd_soc_dapm_context *dapm = w->dapm;
1346 struct skl_dev *skl = get_skl_ctx(dapm->dev);
1347
1348 switch (event) {
1349 case SND_SOC_DAPM_PRE_PMU:
1350 return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1351
1352 case SND_SOC_DAPM_POST_PMD:
1353 return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1354 }
1355
1356 return 0;
1357 }
1358
skl_tplg_multi_config_set_get(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol,bool is_set)1359 static int skl_tplg_multi_config_set_get(struct snd_kcontrol *kcontrol,
1360 struct snd_ctl_elem_value *ucontrol,
1361 bool is_set)
1362 {
1363 struct snd_soc_component *component =
1364 snd_soc_kcontrol_component(kcontrol);
1365 struct hdac_bus *bus = snd_soc_component_get_drvdata(component);
1366 struct skl_dev *skl = bus_to_skl(bus);
1367 struct skl_pipeline *ppl;
1368 struct skl_pipe *pipe = NULL;
1369 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1370 u32 *pipe_id;
1371
1372 if (!ec)
1373 return -EINVAL;
1374
1375 if (is_set && ucontrol->value.enumerated.item[0] > ec->items)
1376 return -EINVAL;
1377
1378 pipe_id = ec->dobj.private;
1379
1380 list_for_each_entry(ppl, &skl->ppl_list, node) {
1381 if (ppl->pipe->ppl_id == *pipe_id) {
1382 pipe = ppl->pipe;
1383 break;
1384 }
1385 }
1386 if (!pipe)
1387 return -EIO;
1388
1389 if (is_set)
1390 pipe->pipe_config_idx = ucontrol->value.enumerated.item[0];
1391 else
1392 ucontrol->value.enumerated.item[0] = pipe->pipe_config_idx;
1393
1394 return 0;
1395 }
1396
skl_tplg_multi_config_get(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1397 static int skl_tplg_multi_config_get(struct snd_kcontrol *kcontrol,
1398 struct snd_ctl_elem_value *ucontrol)
1399 {
1400 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
1401 }
1402
skl_tplg_multi_config_set(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1403 static int skl_tplg_multi_config_set(struct snd_kcontrol *kcontrol,
1404 struct snd_ctl_elem_value *ucontrol)
1405 {
1406 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
1407 }
1408
skl_tplg_multi_config_get_dmic(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1409 static int skl_tplg_multi_config_get_dmic(struct snd_kcontrol *kcontrol,
1410 struct snd_ctl_elem_value *ucontrol)
1411 {
1412 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
1413 }
1414
skl_tplg_multi_config_set_dmic(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1415 static int skl_tplg_multi_config_set_dmic(struct snd_kcontrol *kcontrol,
1416 struct snd_ctl_elem_value *ucontrol)
1417 {
1418 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
1419 }
1420
skl_tplg_tlv_control_get(struct snd_kcontrol * kcontrol,unsigned int __user * data,unsigned int size)1421 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1422 unsigned int __user *data, unsigned int size)
1423 {
1424 struct soc_bytes_ext *sb =
1425 (struct soc_bytes_ext *)kcontrol->private_value;
1426 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
1427 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1428 struct skl_module_cfg *mconfig = w->priv;
1429 struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
1430
1431 if (w->power)
1432 skl_get_module_params(skl, (u32 *)bc->params,
1433 bc->size, bc->param_id, mconfig);
1434
1435 /* decrement size for TLV header */
1436 size -= 2 * sizeof(u32);
1437
1438 /* check size as we don't want to send kernel data */
1439 if (size > bc->max)
1440 size = bc->max;
1441
1442 if (bc->params) {
1443 if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1444 return -EFAULT;
1445 if (copy_to_user(data + 1, &size, sizeof(u32)))
1446 return -EFAULT;
1447 if (copy_to_user(data + 2, bc->params, size))
1448 return -EFAULT;
1449 }
1450
1451 return 0;
1452 }
1453
1454 #define SKL_PARAM_VENDOR_ID 0xff
1455
skl_tplg_tlv_control_set(struct snd_kcontrol * kcontrol,const unsigned int __user * data,unsigned int size)1456 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1457 const unsigned int __user *data, unsigned int size)
1458 {
1459 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1460 struct skl_module_cfg *mconfig = w->priv;
1461 struct soc_bytes_ext *sb =
1462 (struct soc_bytes_ext *)kcontrol->private_value;
1463 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1464 struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
1465
1466 if (ac->params) {
1467 if (size > ac->max)
1468 return -EINVAL;
1469 ac->size = size;
1470
1471 if (copy_from_user(ac->params, data, size))
1472 return -EFAULT;
1473
1474 if (w->power)
1475 return skl_set_module_params(skl,
1476 (u32 *)ac->params, ac->size,
1477 ac->param_id, mconfig);
1478 }
1479
1480 return 0;
1481 }
1482
skl_tplg_mic_control_get(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1483 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol,
1484 struct snd_ctl_elem_value *ucontrol)
1485 {
1486 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1487 struct skl_module_cfg *mconfig = w->priv;
1488 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1489 u32 ch_type = *((u32 *)ec->dobj.private);
1490
1491 if (mconfig->dmic_ch_type == ch_type)
1492 ucontrol->value.enumerated.item[0] =
1493 mconfig->dmic_ch_combo_index;
1494 else
1495 ucontrol->value.enumerated.item[0] = 0;
1496
1497 return 0;
1498 }
1499
skl_fill_mic_sel_params(struct skl_module_cfg * mconfig,struct skl_mic_sel_config * mic_cfg,struct device * dev)1500 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig,
1501 struct skl_mic_sel_config *mic_cfg, struct device *dev)
1502 {
1503 struct skl_specific_cfg *sp_cfg =
1504 &mconfig->formats_config[SKL_PARAM_INIT];
1505
1506 sp_cfg->caps_size = sizeof(struct skl_mic_sel_config);
1507 sp_cfg->set_params = SKL_PARAM_SET;
1508 sp_cfg->param_id = 0x00;
1509 if (!sp_cfg->caps) {
1510 sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL);
1511 if (!sp_cfg->caps)
1512 return -ENOMEM;
1513 }
1514
1515 mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH;
1516 mic_cfg->flags = 0;
1517 memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size);
1518
1519 return 0;
1520 }
1521
skl_tplg_mic_control_set(struct snd_kcontrol * kcontrol,struct snd_ctl_elem_value * ucontrol)1522 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol,
1523 struct snd_ctl_elem_value *ucontrol)
1524 {
1525 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1526 struct skl_module_cfg *mconfig = w->priv;
1527 struct skl_mic_sel_config mic_cfg = {0};
1528 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1529 u32 ch_type = *((u32 *)ec->dobj.private);
1530 const int *list;
1531 u8 in_ch, out_ch, index;
1532
1533 mconfig->dmic_ch_type = ch_type;
1534 mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0];
1535
1536 /* enum control index 0 is INVALID, so no channels to be set */
1537 if (mconfig->dmic_ch_combo_index == 0)
1538 return 0;
1539
1540 /* No valid channel selection map for index 0, so offset by 1 */
1541 index = mconfig->dmic_ch_combo_index - 1;
1542
1543 switch (ch_type) {
1544 case SKL_CH_MONO:
1545 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list))
1546 return -EINVAL;
1547
1548 list = &mic_mono_list[index];
1549 break;
1550
1551 case SKL_CH_STEREO:
1552 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list))
1553 return -EINVAL;
1554
1555 list = mic_stereo_list[index];
1556 break;
1557
1558 case SKL_CH_TRIO:
1559 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list))
1560 return -EINVAL;
1561
1562 list = mic_trio_list[index];
1563 break;
1564
1565 case SKL_CH_QUATRO:
1566 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list))
1567 return -EINVAL;
1568
1569 list = mic_quatro_list[index];
1570 break;
1571
1572 default:
1573 dev_err(w->dapm->dev,
1574 "Invalid channel %d for mic_select module\n",
1575 ch_type);
1576 return -EINVAL;
1577
1578 }
1579
1580 /* channel type enum map to number of chanels for that type */
1581 for (out_ch = 0; out_ch < ch_type; out_ch++) {
1582 in_ch = list[out_ch];
1583 mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN;
1584 }
1585
1586 return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev);
1587 }
1588
1589 /*
1590 * Fill the dma id for host and link. In case of passthrough
1591 * pipeline, this will both host and link in the same
1592 * pipeline, so need to copy the link and host based on dev_type
1593 */
skl_tplg_fill_dma_id(struct skl_module_cfg * mcfg,struct skl_pipe_params * params)1594 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1595 struct skl_pipe_params *params)
1596 {
1597 struct skl_pipe *pipe = mcfg->pipe;
1598
1599 if (pipe->passthru) {
1600 switch (mcfg->dev_type) {
1601 case SKL_DEVICE_HDALINK:
1602 pipe->p_params->link_dma_id = params->link_dma_id;
1603 pipe->p_params->link_index = params->link_index;
1604 pipe->p_params->link_bps = params->link_bps;
1605 break;
1606
1607 case SKL_DEVICE_HDAHOST:
1608 pipe->p_params->host_dma_id = params->host_dma_id;
1609 pipe->p_params->host_bps = params->host_bps;
1610 break;
1611
1612 default:
1613 break;
1614 }
1615 pipe->p_params->s_fmt = params->s_fmt;
1616 pipe->p_params->ch = params->ch;
1617 pipe->p_params->s_freq = params->s_freq;
1618 pipe->p_params->stream = params->stream;
1619 pipe->p_params->format = params->format;
1620
1621 } else {
1622 memcpy(pipe->p_params, params, sizeof(*params));
1623 }
1624 }
1625
1626 /*
1627 * The FE params are passed by hw_params of the DAI.
1628 * On hw_params, the params are stored in Gateway module of the FE and we
1629 * need to calculate the format in DSP module configuration, that
1630 * conversion is done here
1631 */
skl_tplg_update_pipe_params(struct device * dev,struct skl_module_cfg * mconfig,struct skl_pipe_params * params)1632 int skl_tplg_update_pipe_params(struct device *dev,
1633 struct skl_module_cfg *mconfig,
1634 struct skl_pipe_params *params)
1635 {
1636 struct skl_module_res *res;
1637 struct skl_dev *skl = get_skl_ctx(dev);
1638 struct skl_module_fmt *format = NULL;
1639 u8 cfg_idx = mconfig->pipe->cur_config_idx;
1640
1641 res = &mconfig->module->resources[mconfig->res_idx];
1642 skl_tplg_fill_dma_id(mconfig, params);
1643 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
1644 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
1645
1646 if (skl->nr_modules)
1647 return 0;
1648
1649 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
1650 format = &mconfig->module->formats[mconfig->fmt_idx].inputs[0].fmt;
1651 else
1652 format = &mconfig->module->formats[mconfig->fmt_idx].outputs[0].fmt;
1653
1654 /* set the hw_params */
1655 format->s_freq = params->s_freq;
1656 format->channels = params->ch;
1657 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1658
1659 /*
1660 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1661 * container so update bit depth accordingly
1662 */
1663 switch (format->valid_bit_depth) {
1664 case SKL_DEPTH_16BIT:
1665 format->bit_depth = format->valid_bit_depth;
1666 break;
1667
1668 case SKL_DEPTH_24BIT:
1669 case SKL_DEPTH_32BIT:
1670 format->bit_depth = SKL_DEPTH_32BIT;
1671 break;
1672
1673 default:
1674 dev_err(dev, "Invalid bit depth %x for pipe\n",
1675 format->valid_bit_depth);
1676 return -EINVAL;
1677 }
1678
1679 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1680 res->ibs = (format->s_freq / 1000) *
1681 (format->channels) *
1682 (format->bit_depth >> 3);
1683 } else {
1684 res->obs = (format->s_freq / 1000) *
1685 (format->channels) *
1686 (format->bit_depth >> 3);
1687 }
1688
1689 return 0;
1690 }
1691
1692 /*
1693 * Query the module config for the FE DAI
1694 * This is used to find the hw_params set for that DAI and apply to FE
1695 * pipeline
1696 */
1697 struct skl_module_cfg *
skl_tplg_fe_get_cpr_module(struct snd_soc_dai * dai,int stream)1698 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1699 {
1700 struct snd_soc_dapm_widget *w;
1701 struct snd_soc_dapm_path *p = NULL;
1702
1703 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1704 w = dai->playback_widget;
1705 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1706 if (p->connect && p->sink->power &&
1707 !is_skl_dsp_widget_type(p->sink, dai->dev))
1708 continue;
1709
1710 if (p->sink->priv) {
1711 dev_dbg(dai->dev, "set params for %s\n",
1712 p->sink->name);
1713 return p->sink->priv;
1714 }
1715 }
1716 } else {
1717 w = dai->capture_widget;
1718 snd_soc_dapm_widget_for_each_source_path(w, p) {
1719 if (p->connect && p->source->power &&
1720 !is_skl_dsp_widget_type(p->source, dai->dev))
1721 continue;
1722
1723 if (p->source->priv) {
1724 dev_dbg(dai->dev, "set params for %s\n",
1725 p->source->name);
1726 return p->source->priv;
1727 }
1728 }
1729 }
1730
1731 return NULL;
1732 }
1733
skl_get_mconfig_pb_cpr(struct snd_soc_dai * dai,struct snd_soc_dapm_widget * w)1734 static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1735 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1736 {
1737 struct snd_soc_dapm_path *p;
1738 struct skl_module_cfg *mconfig = NULL;
1739
1740 snd_soc_dapm_widget_for_each_source_path(w, p) {
1741 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1742 if (p->connect &&
1743 (p->sink->id == snd_soc_dapm_aif_out) &&
1744 p->source->priv) {
1745 mconfig = p->source->priv;
1746 return mconfig;
1747 }
1748 mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1749 if (mconfig)
1750 return mconfig;
1751 }
1752 }
1753 return mconfig;
1754 }
1755
skl_get_mconfig_cap_cpr(struct snd_soc_dai * dai,struct snd_soc_dapm_widget * w)1756 static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1757 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1758 {
1759 struct snd_soc_dapm_path *p;
1760 struct skl_module_cfg *mconfig = NULL;
1761
1762 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1763 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1764 if (p->connect &&
1765 (p->source->id == snd_soc_dapm_aif_in) &&
1766 p->sink->priv) {
1767 mconfig = p->sink->priv;
1768 return mconfig;
1769 }
1770 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1771 if (mconfig)
1772 return mconfig;
1773 }
1774 }
1775 return mconfig;
1776 }
1777
1778 struct skl_module_cfg *
skl_tplg_be_get_cpr_module(struct snd_soc_dai * dai,int stream)1779 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1780 {
1781 struct snd_soc_dapm_widget *w;
1782 struct skl_module_cfg *mconfig;
1783
1784 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1785 w = dai->playback_widget;
1786 mconfig = skl_get_mconfig_pb_cpr(dai, w);
1787 } else {
1788 w = dai->capture_widget;
1789 mconfig = skl_get_mconfig_cap_cpr(dai, w);
1790 }
1791 return mconfig;
1792 }
1793
skl_tplg_be_link_type(int dev_type)1794 static u8 skl_tplg_be_link_type(int dev_type)
1795 {
1796 int ret;
1797
1798 switch (dev_type) {
1799 case SKL_DEVICE_BT:
1800 ret = NHLT_LINK_SSP;
1801 break;
1802
1803 case SKL_DEVICE_DMIC:
1804 ret = NHLT_LINK_DMIC;
1805 break;
1806
1807 case SKL_DEVICE_I2S:
1808 ret = NHLT_LINK_SSP;
1809 break;
1810
1811 case SKL_DEVICE_HDALINK:
1812 ret = NHLT_LINK_HDA;
1813 break;
1814
1815 default:
1816 ret = NHLT_LINK_INVALID;
1817 break;
1818 }
1819
1820 return ret;
1821 }
1822
1823 /*
1824 * Fill the BE gateway parameters
1825 * The BE gateway expects a blob of parameters which are kept in the ACPI
1826 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1827 * The port can have multiple settings so pick based on the pipeline
1828 * parameters
1829 */
skl_tplg_be_fill_pipe_params(struct snd_soc_dai * dai,struct skl_module_cfg * mconfig,struct skl_pipe_params * params)1830 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1831 struct skl_module_cfg *mconfig,
1832 struct skl_pipe_params *params)
1833 {
1834 struct nhlt_specific_cfg *cfg;
1835 struct skl_pipe *pipe = mconfig->pipe;
1836 struct skl_pipe_fmt *pipe_fmt;
1837 struct skl_dev *skl = get_skl_ctx(dai->dev);
1838 int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1839 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
1840
1841 skl_tplg_fill_dma_id(mconfig, params);
1842
1843 if (link_type == NHLT_LINK_HDA)
1844 return 0;
1845
1846 if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK)
1847 pipe_fmt = &pipe->configs[pipe->pipe_config_idx].out_fmt;
1848 else
1849 pipe_fmt = &pipe->configs[pipe->pipe_config_idx].in_fmt;
1850
1851 /* update the blob based on virtual bus_id*/
1852 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1853 pipe_fmt->bps, pipe_fmt->channels,
1854 pipe_fmt->freq, pipe->direction,
1855 dev_type);
1856 if (cfg) {
1857 mconfig->formats_config[SKL_PARAM_INIT].caps_size = cfg->size;
1858 mconfig->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps;
1859 } else {
1860 dev_err(dai->dev, "Blob NULL for id:%d type:%d dirn:%d ch:%d, freq:%d, fmt:%d\n",
1861 mconfig->vbus_id, link_type, params->stream,
1862 params->ch, params->s_freq, params->s_fmt);
1863 return -EINVAL;
1864 }
1865
1866 return 0;
1867 }
1868
skl_tplg_be_set_src_pipe_params(struct snd_soc_dai * dai,struct snd_soc_dapm_widget * w,struct skl_pipe_params * params)1869 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1870 struct snd_soc_dapm_widget *w,
1871 struct skl_pipe_params *params)
1872 {
1873 struct snd_soc_dapm_path *p;
1874 int ret = -EIO;
1875
1876 snd_soc_dapm_widget_for_each_source_path(w, p) {
1877 if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) &&
1878 p->source->priv) {
1879
1880 ret = skl_tplg_be_fill_pipe_params(dai,
1881 p->source->priv, params);
1882 if (ret < 0)
1883 return ret;
1884 } else {
1885 ret = skl_tplg_be_set_src_pipe_params(dai,
1886 p->source, params);
1887 if (ret < 0)
1888 return ret;
1889 }
1890 }
1891
1892 return ret;
1893 }
1894
skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai * dai,struct snd_soc_dapm_widget * w,struct skl_pipe_params * params)1895 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1896 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1897 {
1898 struct snd_soc_dapm_path *p;
1899 int ret = -EIO;
1900
1901 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1902 if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) &&
1903 p->sink->priv) {
1904
1905 ret = skl_tplg_be_fill_pipe_params(dai,
1906 p->sink->priv, params);
1907 if (ret < 0)
1908 return ret;
1909 } else {
1910 ret = skl_tplg_be_set_sink_pipe_params(
1911 dai, p->sink, params);
1912 if (ret < 0)
1913 return ret;
1914 }
1915 }
1916
1917 return ret;
1918 }
1919
1920 /*
1921 * BE hw_params can be a source parameters (capture) or sink parameters
1922 * (playback). Based on sink and source we need to either find the source
1923 * list or the sink list and set the pipeline parameters
1924 */
skl_tplg_be_update_params(struct snd_soc_dai * dai,struct skl_pipe_params * params)1925 int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1926 struct skl_pipe_params *params)
1927 {
1928 struct snd_soc_dapm_widget *w;
1929
1930 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1931 w = dai->playback_widget;
1932
1933 return skl_tplg_be_set_src_pipe_params(dai, w, params);
1934
1935 } else {
1936 w = dai->capture_widget;
1937
1938 return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1939 }
1940
1941 return 0;
1942 }
1943
1944 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1945 {SKL_MIXER_EVENT, skl_tplg_mixer_event},
1946 {SKL_VMIXER_EVENT, skl_tplg_mixer_event},
1947 {SKL_PGA_EVENT, skl_tplg_pga_event},
1948 };
1949
1950 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1951 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1952 skl_tplg_tlv_control_set},
1953 };
1954
1955 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
1956 {
1957 .id = SKL_CONTROL_TYPE_MIC_SELECT,
1958 .get = skl_tplg_mic_control_get,
1959 .put = skl_tplg_mic_control_set,
1960 },
1961 {
1962 .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT,
1963 .get = skl_tplg_multi_config_get,
1964 .put = skl_tplg_multi_config_set,
1965 },
1966 {
1967 .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC,
1968 .get = skl_tplg_multi_config_get_dmic,
1969 .put = skl_tplg_multi_config_set_dmic,
1970 }
1971 };
1972
skl_tplg_fill_pipe_cfg(struct device * dev,struct skl_pipe * pipe,u32 tkn,u32 tkn_val,int conf_idx,int dir)1973 static int skl_tplg_fill_pipe_cfg(struct device *dev,
1974 struct skl_pipe *pipe, u32 tkn,
1975 u32 tkn_val, int conf_idx, int dir)
1976 {
1977 struct skl_pipe_fmt *fmt;
1978 struct skl_path_config *config;
1979
1980 switch (dir) {
1981 case SKL_DIR_IN:
1982 fmt = &pipe->configs[conf_idx].in_fmt;
1983 break;
1984
1985 case SKL_DIR_OUT:
1986 fmt = &pipe->configs[conf_idx].out_fmt;
1987 break;
1988
1989 default:
1990 dev_err(dev, "Invalid direction: %d\n", dir);
1991 return -EINVAL;
1992 }
1993
1994 config = &pipe->configs[conf_idx];
1995
1996 switch (tkn) {
1997 case SKL_TKN_U32_CFG_FREQ:
1998 fmt->freq = tkn_val;
1999 break;
2000
2001 case SKL_TKN_U8_CFG_CHAN:
2002 fmt->channels = tkn_val;
2003 break;
2004
2005 case SKL_TKN_U8_CFG_BPS:
2006 fmt->bps = tkn_val;
2007 break;
2008
2009 case SKL_TKN_U32_PATH_MEM_PGS:
2010 config->mem_pages = tkn_val;
2011 break;
2012
2013 default:
2014 dev_err(dev, "Invalid token config: %d\n", tkn);
2015 return -EINVAL;
2016 }
2017
2018 return 0;
2019 }
2020
skl_tplg_fill_pipe_tkn(struct device * dev,struct skl_pipe * pipe,u32 tkn,u32 tkn_val)2021 static int skl_tplg_fill_pipe_tkn(struct device *dev,
2022 struct skl_pipe *pipe, u32 tkn,
2023 u32 tkn_val)
2024 {
2025
2026 switch (tkn) {
2027 case SKL_TKN_U32_PIPE_CONN_TYPE:
2028 pipe->conn_type = tkn_val;
2029 break;
2030
2031 case SKL_TKN_U32_PIPE_PRIORITY:
2032 pipe->pipe_priority = tkn_val;
2033 break;
2034
2035 case SKL_TKN_U32_PIPE_MEM_PGS:
2036 pipe->memory_pages = tkn_val;
2037 break;
2038
2039 case SKL_TKN_U32_PMODE:
2040 pipe->lp_mode = tkn_val;
2041 break;
2042
2043 case SKL_TKN_U32_PIPE_DIRECTION:
2044 pipe->direction = tkn_val;
2045 break;
2046
2047 case SKL_TKN_U32_NUM_CONFIGS:
2048 pipe->nr_cfgs = tkn_val;
2049 break;
2050
2051 default:
2052 dev_err(dev, "Token not handled %d\n", tkn);
2053 return -EINVAL;
2054 }
2055
2056 return 0;
2057 }
2058
2059 /*
2060 * Add pipeline by parsing the relevant tokens
2061 * Return an existing pipe if the pipe already exists.
2062 */
skl_tplg_add_pipe(struct device * dev,struct skl_module_cfg * mconfig,struct skl_dev * skl,struct snd_soc_tplg_vendor_value_elem * tkn_elem)2063 static int skl_tplg_add_pipe(struct device *dev,
2064 struct skl_module_cfg *mconfig, struct skl_dev *skl,
2065 struct snd_soc_tplg_vendor_value_elem *tkn_elem)
2066 {
2067 struct skl_pipeline *ppl;
2068 struct skl_pipe *pipe;
2069 struct skl_pipe_params *params;
2070
2071 list_for_each_entry(ppl, &skl->ppl_list, node) {
2072 if (ppl->pipe->ppl_id == tkn_elem->value) {
2073 mconfig->pipe = ppl->pipe;
2074 return -EEXIST;
2075 }
2076 }
2077
2078 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2079 if (!ppl)
2080 return -ENOMEM;
2081
2082 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2083 if (!pipe)
2084 return -ENOMEM;
2085
2086 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2087 if (!params)
2088 return -ENOMEM;
2089
2090 pipe->p_params = params;
2091 pipe->ppl_id = tkn_elem->value;
2092 INIT_LIST_HEAD(&pipe->w_list);
2093
2094 ppl->pipe = pipe;
2095 list_add(&ppl->node, &skl->ppl_list);
2096
2097 mconfig->pipe = pipe;
2098 mconfig->pipe->state = SKL_PIPE_INVALID;
2099
2100 return 0;
2101 }
2102
skl_tplg_get_uuid(struct device * dev,guid_t * guid,struct snd_soc_tplg_vendor_uuid_elem * uuid_tkn)2103 static int skl_tplg_get_uuid(struct device *dev, guid_t *guid,
2104 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
2105 {
2106 if (uuid_tkn->token == SKL_TKN_UUID) {
2107 import_guid(guid, uuid_tkn->uuid);
2108 return 0;
2109 }
2110
2111 dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token);
2112
2113 return -EINVAL;
2114 }
2115
skl_tplg_fill_pin(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_module_pin * m_pin,int pin_index)2116 static int skl_tplg_fill_pin(struct device *dev,
2117 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2118 struct skl_module_pin *m_pin,
2119 int pin_index)
2120 {
2121 int ret;
2122
2123 switch (tkn_elem->token) {
2124 case SKL_TKN_U32_PIN_MOD_ID:
2125 m_pin[pin_index].id.module_id = tkn_elem->value;
2126 break;
2127
2128 case SKL_TKN_U32_PIN_INST_ID:
2129 m_pin[pin_index].id.instance_id = tkn_elem->value;
2130 break;
2131
2132 case SKL_TKN_UUID:
2133 ret = skl_tplg_get_uuid(dev, &m_pin[pin_index].id.mod_uuid,
2134 (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem);
2135 if (ret < 0)
2136 return ret;
2137
2138 break;
2139
2140 default:
2141 dev_err(dev, "%d Not a pin token\n", tkn_elem->token);
2142 return -EINVAL;
2143 }
2144
2145 return 0;
2146 }
2147
2148 /*
2149 * Parse for pin config specific tokens to fill up the
2150 * module private data
2151 */
skl_tplg_fill_pins_info(struct device * dev,struct skl_module_cfg * mconfig,struct snd_soc_tplg_vendor_value_elem * tkn_elem,int dir,int pin_count)2152 static int skl_tplg_fill_pins_info(struct device *dev,
2153 struct skl_module_cfg *mconfig,
2154 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2155 int dir, int pin_count)
2156 {
2157 int ret;
2158 struct skl_module_pin *m_pin;
2159
2160 switch (dir) {
2161 case SKL_DIR_IN:
2162 m_pin = mconfig->m_in_pin;
2163 break;
2164
2165 case SKL_DIR_OUT:
2166 m_pin = mconfig->m_out_pin;
2167 break;
2168
2169 default:
2170 dev_err(dev, "Invalid direction value\n");
2171 return -EINVAL;
2172 }
2173
2174 ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count);
2175 if (ret < 0)
2176 return ret;
2177
2178 m_pin[pin_count].in_use = false;
2179 m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
2180
2181 return 0;
2182 }
2183
2184 /*
2185 * Fill up input/output module config format based
2186 * on the direction
2187 */
skl_tplg_fill_fmt(struct device * dev,struct skl_module_fmt * dst_fmt,u32 tkn,u32 value)2188 static int skl_tplg_fill_fmt(struct device *dev,
2189 struct skl_module_fmt *dst_fmt,
2190 u32 tkn, u32 value)
2191 {
2192 switch (tkn) {
2193 case SKL_TKN_U32_FMT_CH:
2194 dst_fmt->channels = value;
2195 break;
2196
2197 case SKL_TKN_U32_FMT_FREQ:
2198 dst_fmt->s_freq = value;
2199 break;
2200
2201 case SKL_TKN_U32_FMT_BIT_DEPTH:
2202 dst_fmt->bit_depth = value;
2203 break;
2204
2205 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2206 dst_fmt->valid_bit_depth = value;
2207 break;
2208
2209 case SKL_TKN_U32_FMT_CH_CONFIG:
2210 dst_fmt->ch_cfg = value;
2211 break;
2212
2213 case SKL_TKN_U32_FMT_INTERLEAVE:
2214 dst_fmt->interleaving_style = value;
2215 break;
2216
2217 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2218 dst_fmt->sample_type = value;
2219 break;
2220
2221 case SKL_TKN_U32_FMT_CH_MAP:
2222 dst_fmt->ch_map = value;
2223 break;
2224
2225 default:
2226 dev_err(dev, "Invalid token %d\n", tkn);
2227 return -EINVAL;
2228 }
2229
2230 return 0;
2231 }
2232
skl_tplg_widget_fill_fmt(struct device * dev,struct skl_module_iface * fmt,u32 tkn,u32 val,u32 dir,int fmt_idx)2233 static int skl_tplg_widget_fill_fmt(struct device *dev,
2234 struct skl_module_iface *fmt,
2235 u32 tkn, u32 val, u32 dir, int fmt_idx)
2236 {
2237 struct skl_module_fmt *dst_fmt;
2238
2239 if (!fmt)
2240 return -EINVAL;
2241
2242 switch (dir) {
2243 case SKL_DIR_IN:
2244 dst_fmt = &fmt->inputs[fmt_idx].fmt;
2245 break;
2246
2247 case SKL_DIR_OUT:
2248 dst_fmt = &fmt->outputs[fmt_idx].fmt;
2249 break;
2250
2251 default:
2252 dev_err(dev, "Invalid direction: %d\n", dir);
2253 return -EINVAL;
2254 }
2255
2256 return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val);
2257 }
2258
skl_tplg_fill_pin_dynamic_val(struct skl_module_pin * mpin,u32 pin_count,u32 value)2259 static void skl_tplg_fill_pin_dynamic_val(
2260 struct skl_module_pin *mpin, u32 pin_count, u32 value)
2261 {
2262 int i;
2263
2264 for (i = 0; i < pin_count; i++)
2265 mpin[i].is_dynamic = value;
2266 }
2267
2268 /*
2269 * Resource table in the manifest has pin specific resources
2270 * like pin and pin buffer size
2271 */
skl_tplg_manifest_pin_res_tkn(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_module_res * res,int pin_idx,int dir)2272 static int skl_tplg_manifest_pin_res_tkn(struct device *dev,
2273 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2274 struct skl_module_res *res, int pin_idx, int dir)
2275 {
2276 struct skl_module_pin_resources *m_pin;
2277
2278 switch (dir) {
2279 case SKL_DIR_IN:
2280 m_pin = &res->input[pin_idx];
2281 break;
2282
2283 case SKL_DIR_OUT:
2284 m_pin = &res->output[pin_idx];
2285 break;
2286
2287 default:
2288 dev_err(dev, "Invalid pin direction: %d\n", dir);
2289 return -EINVAL;
2290 }
2291
2292 switch (tkn_elem->token) {
2293 case SKL_TKN_MM_U32_RES_PIN_ID:
2294 m_pin->pin_index = tkn_elem->value;
2295 break;
2296
2297 case SKL_TKN_MM_U32_PIN_BUF:
2298 m_pin->buf_size = tkn_elem->value;
2299 break;
2300
2301 default:
2302 dev_err(dev, "Invalid token: %d\n", tkn_elem->token);
2303 return -EINVAL;
2304 }
2305
2306 return 0;
2307 }
2308
2309 /*
2310 * Fill module specific resources from the manifest's resource
2311 * table like CPS, DMA size, mem_pages.
2312 */
skl_tplg_fill_res_tkn(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_module_res * res,int pin_idx,int dir)2313 static int skl_tplg_fill_res_tkn(struct device *dev,
2314 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2315 struct skl_module_res *res,
2316 int pin_idx, int dir)
2317 {
2318 int ret, tkn_count = 0;
2319
2320 if (!res)
2321 return -EINVAL;
2322
2323 switch (tkn_elem->token) {
2324 case SKL_TKN_MM_U32_DMA_SIZE:
2325 res->dma_buffer_size = tkn_elem->value;
2326 break;
2327
2328 case SKL_TKN_MM_U32_CPC:
2329 res->cpc = tkn_elem->value;
2330 break;
2331
2332 case SKL_TKN_U32_MEM_PAGES:
2333 res->is_pages = tkn_elem->value;
2334 break;
2335
2336 case SKL_TKN_U32_OBS:
2337 res->obs = tkn_elem->value;
2338 break;
2339
2340 case SKL_TKN_U32_IBS:
2341 res->ibs = tkn_elem->value;
2342 break;
2343
2344 case SKL_TKN_MM_U32_RES_PIN_ID:
2345 case SKL_TKN_MM_U32_PIN_BUF:
2346 ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res,
2347 pin_idx, dir);
2348 if (ret < 0)
2349 return ret;
2350 break;
2351
2352 case SKL_TKN_MM_U32_CPS:
2353 case SKL_TKN_U32_MAX_MCPS:
2354 /* ignore unused tokens */
2355 break;
2356
2357 default:
2358 dev_err(dev, "Not a res type token: %d", tkn_elem->token);
2359 return -EINVAL;
2360
2361 }
2362 tkn_count++;
2363
2364 return tkn_count;
2365 }
2366
2367 /*
2368 * Parse tokens to fill up the module private data
2369 */
skl_tplg_get_token(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_dev * skl,struct skl_module_cfg * mconfig)2370 static int skl_tplg_get_token(struct device *dev,
2371 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2372 struct skl_dev *skl, struct skl_module_cfg *mconfig)
2373 {
2374 int tkn_count = 0;
2375 int ret;
2376 static int is_pipe_exists;
2377 static int pin_index, dir, conf_idx;
2378 struct skl_module_iface *iface = NULL;
2379 struct skl_module_res *res = NULL;
2380 int res_idx = mconfig->res_idx;
2381 int fmt_idx = mconfig->fmt_idx;
2382
2383 /*
2384 * If the manifest structure contains no modules, fill all
2385 * the module data to 0th index.
2386 * res_idx and fmt_idx are default set to 0.
2387 */
2388 if (skl->nr_modules == 0) {
2389 res = &mconfig->module->resources[res_idx];
2390 iface = &mconfig->module->formats[fmt_idx];
2391 }
2392
2393 if (tkn_elem->token > SKL_TKN_MAX)
2394 return -EINVAL;
2395
2396 switch (tkn_elem->token) {
2397 case SKL_TKN_U8_IN_QUEUE_COUNT:
2398 mconfig->module->max_input_pins = tkn_elem->value;
2399 break;
2400
2401 case SKL_TKN_U8_OUT_QUEUE_COUNT:
2402 mconfig->module->max_output_pins = tkn_elem->value;
2403 break;
2404
2405 case SKL_TKN_U8_DYN_IN_PIN:
2406 if (!mconfig->m_in_pin)
2407 mconfig->m_in_pin =
2408 devm_kcalloc(dev, MAX_IN_QUEUE,
2409 sizeof(*mconfig->m_in_pin),
2410 GFP_KERNEL);
2411 if (!mconfig->m_in_pin)
2412 return -ENOMEM;
2413
2414 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE,
2415 tkn_elem->value);
2416 break;
2417
2418 case SKL_TKN_U8_DYN_OUT_PIN:
2419 if (!mconfig->m_out_pin)
2420 mconfig->m_out_pin =
2421 devm_kcalloc(dev, MAX_IN_QUEUE,
2422 sizeof(*mconfig->m_in_pin),
2423 GFP_KERNEL);
2424 if (!mconfig->m_out_pin)
2425 return -ENOMEM;
2426
2427 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE,
2428 tkn_elem->value);
2429 break;
2430
2431 case SKL_TKN_U8_TIME_SLOT:
2432 mconfig->time_slot = tkn_elem->value;
2433 break;
2434
2435 case SKL_TKN_U8_CORE_ID:
2436 mconfig->core_id = tkn_elem->value;
2437 break;
2438
2439 case SKL_TKN_U8_MOD_TYPE:
2440 mconfig->m_type = tkn_elem->value;
2441 break;
2442
2443 case SKL_TKN_U8_DEV_TYPE:
2444 mconfig->dev_type = tkn_elem->value;
2445 break;
2446
2447 case SKL_TKN_U8_HW_CONN_TYPE:
2448 mconfig->hw_conn_type = tkn_elem->value;
2449 break;
2450
2451 case SKL_TKN_U16_MOD_INST_ID:
2452 mconfig->id.instance_id =
2453 tkn_elem->value;
2454 break;
2455
2456 case SKL_TKN_U32_MEM_PAGES:
2457 case SKL_TKN_U32_MAX_MCPS:
2458 case SKL_TKN_U32_OBS:
2459 case SKL_TKN_U32_IBS:
2460 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir);
2461 if (ret < 0)
2462 return ret;
2463
2464 break;
2465
2466 case SKL_TKN_U32_VBUS_ID:
2467 mconfig->vbus_id = tkn_elem->value;
2468 break;
2469
2470 case SKL_TKN_U32_PARAMS_FIXUP:
2471 mconfig->params_fixup = tkn_elem->value;
2472 break;
2473
2474 case SKL_TKN_U32_CONVERTER:
2475 mconfig->converter = tkn_elem->value;
2476 break;
2477
2478 case SKL_TKN_U32_D0I3_CAPS:
2479 mconfig->d0i3_caps = tkn_elem->value;
2480 break;
2481
2482 case SKL_TKN_U32_PIPE_ID:
2483 ret = skl_tplg_add_pipe(dev,
2484 mconfig, skl, tkn_elem);
2485
2486 if (ret < 0) {
2487 if (ret == -EEXIST) {
2488 is_pipe_exists = 1;
2489 break;
2490 }
2491 return is_pipe_exists;
2492 }
2493
2494 break;
2495
2496 case SKL_TKN_U32_PIPE_CONFIG_ID:
2497 conf_idx = tkn_elem->value;
2498 break;
2499
2500 case SKL_TKN_U32_PIPE_CONN_TYPE:
2501 case SKL_TKN_U32_PIPE_PRIORITY:
2502 case SKL_TKN_U32_PIPE_MEM_PGS:
2503 case SKL_TKN_U32_PMODE:
2504 case SKL_TKN_U32_PIPE_DIRECTION:
2505 case SKL_TKN_U32_NUM_CONFIGS:
2506 if (is_pipe_exists) {
2507 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
2508 tkn_elem->token, tkn_elem->value);
2509 if (ret < 0)
2510 return ret;
2511 }
2512
2513 break;
2514
2515 case SKL_TKN_U32_PATH_MEM_PGS:
2516 case SKL_TKN_U32_CFG_FREQ:
2517 case SKL_TKN_U8_CFG_CHAN:
2518 case SKL_TKN_U8_CFG_BPS:
2519 if (mconfig->pipe->nr_cfgs) {
2520 ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe,
2521 tkn_elem->token, tkn_elem->value,
2522 conf_idx, dir);
2523 if (ret < 0)
2524 return ret;
2525 }
2526 break;
2527
2528 case SKL_TKN_CFG_MOD_RES_ID:
2529 mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value;
2530 break;
2531
2532 case SKL_TKN_CFG_MOD_FMT_ID:
2533 mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value;
2534 break;
2535
2536 /*
2537 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
2538 * direction and the pin count. The first four bits represent
2539 * direction and next four the pin count.
2540 */
2541 case SKL_TKN_U32_DIR_PIN_COUNT:
2542 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
2543 pin_index = (tkn_elem->value &
2544 SKL_PIN_COUNT_MASK) >> 4;
2545
2546 break;
2547
2548 case SKL_TKN_U32_FMT_CH:
2549 case SKL_TKN_U32_FMT_FREQ:
2550 case SKL_TKN_U32_FMT_BIT_DEPTH:
2551 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2552 case SKL_TKN_U32_FMT_CH_CONFIG:
2553 case SKL_TKN_U32_FMT_INTERLEAVE:
2554 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2555 case SKL_TKN_U32_FMT_CH_MAP:
2556 ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token,
2557 tkn_elem->value, dir, pin_index);
2558
2559 if (ret < 0)
2560 return ret;
2561
2562 break;
2563
2564 case SKL_TKN_U32_PIN_MOD_ID:
2565 case SKL_TKN_U32_PIN_INST_ID:
2566 case SKL_TKN_UUID:
2567 ret = skl_tplg_fill_pins_info(dev,
2568 mconfig, tkn_elem, dir,
2569 pin_index);
2570 if (ret < 0)
2571 return ret;
2572
2573 break;
2574
2575 case SKL_TKN_U32_FMT_CFG_IDX:
2576 if (tkn_elem->value > SKL_MAX_PARAMS_TYPES)
2577 return -EINVAL;
2578
2579 mconfig->fmt_cfg_idx = tkn_elem->value;
2580 break;
2581
2582 case SKL_TKN_U32_CAPS_SIZE:
2583 mconfig->formats_config[mconfig->fmt_cfg_idx].caps_size =
2584 tkn_elem->value;
2585
2586 break;
2587
2588 case SKL_TKN_U32_CAPS_SET_PARAMS:
2589 mconfig->formats_config[mconfig->fmt_cfg_idx].set_params =
2590 tkn_elem->value;
2591 break;
2592
2593 case SKL_TKN_U32_CAPS_PARAMS_ID:
2594 mconfig->formats_config[mconfig->fmt_cfg_idx].param_id =
2595 tkn_elem->value;
2596 break;
2597
2598 case SKL_TKN_U32_PROC_DOMAIN:
2599 mconfig->domain =
2600 tkn_elem->value;
2601
2602 break;
2603
2604 case SKL_TKN_U32_DMA_BUF_SIZE:
2605 mconfig->dma_buffer_size = tkn_elem->value;
2606 break;
2607
2608 case SKL_TKN_U8_IN_PIN_TYPE:
2609 case SKL_TKN_U8_OUT_PIN_TYPE:
2610 case SKL_TKN_U8_CONN_TYPE:
2611 break;
2612
2613 default:
2614 dev_err(dev, "Token %d not handled\n",
2615 tkn_elem->token);
2616 return -EINVAL;
2617 }
2618
2619 tkn_count++;
2620
2621 return tkn_count;
2622 }
2623
2624 /*
2625 * Parse the vendor array for specific tokens to construct
2626 * module private data
2627 */
skl_tplg_get_tokens(struct device * dev,char * pvt_data,struct skl_dev * skl,struct skl_module_cfg * mconfig,int block_size)2628 static int skl_tplg_get_tokens(struct device *dev,
2629 char *pvt_data, struct skl_dev *skl,
2630 struct skl_module_cfg *mconfig, int block_size)
2631 {
2632 struct snd_soc_tplg_vendor_array *array;
2633 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2634 int tkn_count = 0, ret;
2635 int off = 0, tuple_size = 0;
2636 bool is_module_guid = true;
2637
2638 if (block_size <= 0)
2639 return -EINVAL;
2640
2641 while (tuple_size < block_size) {
2642 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2643
2644 off += array->size;
2645
2646 switch (array->type) {
2647 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2648 dev_warn(dev, "no string tokens expected for skl tplg\n");
2649 continue;
2650
2651 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2652 if (is_module_guid) {
2653 ret = skl_tplg_get_uuid(dev, (guid_t *)mconfig->guid,
2654 array->uuid);
2655 is_module_guid = false;
2656 } else {
2657 ret = skl_tplg_get_token(dev, array->value, skl,
2658 mconfig);
2659 }
2660
2661 if (ret < 0)
2662 return ret;
2663
2664 tuple_size += sizeof(*array->uuid);
2665
2666 continue;
2667
2668 default:
2669 tkn_elem = array->value;
2670 tkn_count = 0;
2671 break;
2672 }
2673
2674 while (tkn_count <= (array->num_elems - 1)) {
2675 ret = skl_tplg_get_token(dev, tkn_elem,
2676 skl, mconfig);
2677
2678 if (ret < 0)
2679 return ret;
2680
2681 tkn_count = tkn_count + ret;
2682 tkn_elem++;
2683 }
2684
2685 tuple_size += tkn_count * sizeof(*tkn_elem);
2686 }
2687
2688 return off;
2689 }
2690
2691 /*
2692 * Every data block is preceded by a descriptor to read the number
2693 * of data blocks, they type of the block and it's size
2694 */
skl_tplg_get_desc_blocks(struct device * dev,struct snd_soc_tplg_vendor_array * array)2695 static int skl_tplg_get_desc_blocks(struct device *dev,
2696 struct snd_soc_tplg_vendor_array *array)
2697 {
2698 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2699
2700 tkn_elem = array->value;
2701
2702 switch (tkn_elem->token) {
2703 case SKL_TKN_U8_NUM_BLOCKS:
2704 case SKL_TKN_U8_BLOCK_TYPE:
2705 case SKL_TKN_U16_BLOCK_SIZE:
2706 return tkn_elem->value;
2707
2708 default:
2709 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
2710 break;
2711 }
2712
2713 return -EINVAL;
2714 }
2715
2716 /* Functions to parse private data from configuration file format v4 */
2717
2718 /*
2719 * Add pipeline from topology binary into driver pipeline list
2720 *
2721 * If already added we return that instance
2722 * Otherwise we create a new instance and add into driver list
2723 */
skl_tplg_add_pipe_v4(struct device * dev,struct skl_module_cfg * mconfig,struct skl_dev * skl,struct skl_dfw_v4_pipe * dfw_pipe)2724 static int skl_tplg_add_pipe_v4(struct device *dev,
2725 struct skl_module_cfg *mconfig, struct skl_dev *skl,
2726 struct skl_dfw_v4_pipe *dfw_pipe)
2727 {
2728 struct skl_pipeline *ppl;
2729 struct skl_pipe *pipe;
2730 struct skl_pipe_params *params;
2731
2732 list_for_each_entry(ppl, &skl->ppl_list, node) {
2733 if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) {
2734 mconfig->pipe = ppl->pipe;
2735 return 0;
2736 }
2737 }
2738
2739 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2740 if (!ppl)
2741 return -ENOMEM;
2742
2743 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2744 if (!pipe)
2745 return -ENOMEM;
2746
2747 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2748 if (!params)
2749 return -ENOMEM;
2750
2751 pipe->ppl_id = dfw_pipe->pipe_id;
2752 pipe->memory_pages = dfw_pipe->memory_pages;
2753 pipe->pipe_priority = dfw_pipe->pipe_priority;
2754 pipe->conn_type = dfw_pipe->conn_type;
2755 pipe->state = SKL_PIPE_INVALID;
2756 pipe->p_params = params;
2757 INIT_LIST_HEAD(&pipe->w_list);
2758
2759 ppl->pipe = pipe;
2760 list_add(&ppl->node, &skl->ppl_list);
2761
2762 mconfig->pipe = pipe;
2763
2764 return 0;
2765 }
2766
skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin * dfw_pin,struct skl_module_pin * m_pin,bool is_dynamic,int max_pin)2767 static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin,
2768 struct skl_module_pin *m_pin,
2769 bool is_dynamic, int max_pin)
2770 {
2771 int i;
2772
2773 for (i = 0; i < max_pin; i++) {
2774 m_pin[i].id.module_id = dfw_pin[i].module_id;
2775 m_pin[i].id.instance_id = dfw_pin[i].instance_id;
2776 m_pin[i].in_use = false;
2777 m_pin[i].is_dynamic = is_dynamic;
2778 m_pin[i].pin_state = SKL_PIN_UNBIND;
2779 }
2780 }
2781
skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt * dst_fmt,struct skl_dfw_v4_module_fmt * src_fmt,int pins)2782 static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt,
2783 struct skl_dfw_v4_module_fmt *src_fmt,
2784 int pins)
2785 {
2786 int i;
2787
2788 for (i = 0; i < pins; i++) {
2789 dst_fmt[i].fmt.channels = src_fmt[i].channels;
2790 dst_fmt[i].fmt.s_freq = src_fmt[i].freq;
2791 dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth;
2792 dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth;
2793 dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg;
2794 dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map;
2795 dst_fmt[i].fmt.interleaving_style =
2796 src_fmt[i].interleaving_style;
2797 dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type;
2798 }
2799 }
2800
skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget * tplg_w,struct skl_dev * skl,struct device * dev,struct skl_module_cfg * mconfig)2801 static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w,
2802 struct skl_dev *skl, struct device *dev,
2803 struct skl_module_cfg *mconfig)
2804 {
2805 struct skl_dfw_v4_module *dfw =
2806 (struct skl_dfw_v4_module *)tplg_w->priv.data;
2807 int ret;
2808 int idx = mconfig->fmt_cfg_idx;
2809
2810 dev_dbg(dev, "Parsing Skylake v4 widget topology data\n");
2811
2812 ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid);
2813 if (ret)
2814 return ret;
2815 mconfig->id.module_id = -1;
2816 mconfig->id.instance_id = dfw->instance_id;
2817 mconfig->module->resources[0].cpc = dfw->max_mcps / 1000;
2818 mconfig->module->resources[0].ibs = dfw->ibs;
2819 mconfig->module->resources[0].obs = dfw->obs;
2820 mconfig->core_id = dfw->core_id;
2821 mconfig->module->max_input_pins = dfw->max_in_queue;
2822 mconfig->module->max_output_pins = dfw->max_out_queue;
2823 mconfig->module->loadable = dfw->is_loadable;
2824 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt,
2825 MAX_IN_QUEUE);
2826 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt,
2827 MAX_OUT_QUEUE);
2828
2829 mconfig->params_fixup = dfw->params_fixup;
2830 mconfig->converter = dfw->converter;
2831 mconfig->m_type = dfw->module_type;
2832 mconfig->vbus_id = dfw->vbus_id;
2833 mconfig->module->resources[0].is_pages = dfw->mem_pages;
2834
2835 ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe);
2836 if (ret)
2837 return ret;
2838
2839 mconfig->dev_type = dfw->dev_type;
2840 mconfig->hw_conn_type = dfw->hw_conn_type;
2841 mconfig->time_slot = dfw->time_slot;
2842 mconfig->formats_config[idx].caps_size = dfw->caps.caps_size;
2843
2844 mconfig->m_in_pin = devm_kcalloc(dev,
2845 MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin),
2846 GFP_KERNEL);
2847 if (!mconfig->m_in_pin)
2848 return -ENOMEM;
2849
2850 mconfig->m_out_pin = devm_kcalloc(dev,
2851 MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin),
2852 GFP_KERNEL);
2853 if (!mconfig->m_out_pin)
2854 return -ENOMEM;
2855
2856 skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin,
2857 dfw->is_dynamic_in_pin,
2858 mconfig->module->max_input_pins);
2859 skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin,
2860 dfw->is_dynamic_out_pin,
2861 mconfig->module->max_output_pins);
2862
2863 if (mconfig->formats_config[idx].caps_size) {
2864 mconfig->formats_config[idx].set_params = dfw->caps.set_params;
2865 mconfig->formats_config[idx].param_id = dfw->caps.param_id;
2866 mconfig->formats_config[idx].caps =
2867 devm_kzalloc(dev, mconfig->formats_config[idx].caps_size,
2868 GFP_KERNEL);
2869 if (!mconfig->formats_config[idx].caps)
2870 return -ENOMEM;
2871 memcpy(mconfig->formats_config[idx].caps, dfw->caps.caps,
2872 dfw->caps.caps_size);
2873 }
2874
2875 return 0;
2876 }
2877
skl_tplg_get_caps_data(struct device * dev,char * data,struct skl_module_cfg * mconfig)2878 static int skl_tplg_get_caps_data(struct device *dev, char *data,
2879 struct skl_module_cfg *mconfig)
2880 {
2881 int idx = mconfig->fmt_cfg_idx;
2882
2883 if (mconfig->formats_config[idx].caps_size > 0) {
2884 mconfig->formats_config[idx].caps =
2885 devm_kzalloc(dev, mconfig->formats_config[idx].caps_size,
2886 GFP_KERNEL);
2887 if (!mconfig->formats_config[idx].caps)
2888 return -ENOMEM;
2889 memcpy(mconfig->formats_config[idx].caps, data,
2890 mconfig->formats_config[idx].caps_size);
2891 }
2892
2893 return mconfig->formats_config[idx].caps_size;
2894 }
2895
2896 /*
2897 * Parse the private data for the token and corresponding value.
2898 * The private data can have multiple data blocks. So, a data block
2899 * is preceded by a descriptor for number of blocks and a descriptor
2900 * for the type and size of the suceeding data block.
2901 */
skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget * tplg_w,struct skl_dev * skl,struct device * dev,struct skl_module_cfg * mconfig)2902 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2903 struct skl_dev *skl, struct device *dev,
2904 struct skl_module_cfg *mconfig)
2905 {
2906 struct snd_soc_tplg_vendor_array *array;
2907 int num_blocks, block_size, block_type, off = 0;
2908 char *data;
2909 int ret;
2910
2911 /*
2912 * v4 configuration files have a valid UUID at the start of
2913 * the widget's private data.
2914 */
2915 if (uuid_is_valid((char *)tplg_w->priv.data))
2916 return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig);
2917
2918 /* Read the NUM_DATA_BLOCKS descriptor */
2919 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2920 ret = skl_tplg_get_desc_blocks(dev, array);
2921 if (ret < 0)
2922 return ret;
2923 num_blocks = ret;
2924
2925 off += array->size;
2926 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2927 while (num_blocks > 0) {
2928 array = (struct snd_soc_tplg_vendor_array *)
2929 (tplg_w->priv.data + off);
2930
2931 ret = skl_tplg_get_desc_blocks(dev, array);
2932
2933 if (ret < 0)
2934 return ret;
2935 block_type = ret;
2936 off += array->size;
2937
2938 array = (struct snd_soc_tplg_vendor_array *)
2939 (tplg_w->priv.data + off);
2940
2941 ret = skl_tplg_get_desc_blocks(dev, array);
2942
2943 if (ret < 0)
2944 return ret;
2945 block_size = ret;
2946 off += array->size;
2947
2948 array = (struct snd_soc_tplg_vendor_array *)
2949 (tplg_w->priv.data + off);
2950
2951 data = (tplg_w->priv.data + off);
2952
2953 if (block_type == SKL_TYPE_TUPLE) {
2954 ret = skl_tplg_get_tokens(dev, data,
2955 skl, mconfig, block_size);
2956 } else {
2957 ret = skl_tplg_get_caps_data(dev, data, mconfig);
2958 }
2959
2960 if (ret < 0)
2961 return ret;
2962
2963 --num_blocks;
2964 off += ret;
2965 }
2966
2967 return 0;
2968 }
2969
skl_clear_pin_config(struct snd_soc_component * component,struct snd_soc_dapm_widget * w)2970 static void skl_clear_pin_config(struct snd_soc_component *component,
2971 struct snd_soc_dapm_widget *w)
2972 {
2973 int i;
2974 struct skl_module_cfg *mconfig;
2975 struct skl_pipe *pipe;
2976
2977 if (!strncmp(w->dapm->component->name, component->name,
2978 strlen(component->name))) {
2979 mconfig = w->priv;
2980 pipe = mconfig->pipe;
2981 for (i = 0; i < mconfig->module->max_input_pins; i++) {
2982 mconfig->m_in_pin[i].in_use = false;
2983 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2984 }
2985 for (i = 0; i < mconfig->module->max_output_pins; i++) {
2986 mconfig->m_out_pin[i].in_use = false;
2987 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2988 }
2989 pipe->state = SKL_PIPE_INVALID;
2990 mconfig->m_state = SKL_MODULE_UNINIT;
2991 }
2992 }
2993
skl_cleanup_resources(struct skl_dev * skl)2994 void skl_cleanup_resources(struct skl_dev *skl)
2995 {
2996 struct snd_soc_component *soc_component = skl->component;
2997 struct snd_soc_dapm_widget *w;
2998 struct snd_soc_card *card;
2999
3000 if (soc_component == NULL)
3001 return;
3002
3003 card = soc_component->card;
3004 if (!card || !card->instantiated)
3005 return;
3006
3007 list_for_each_entry(w, &card->widgets, list) {
3008 if (is_skl_dsp_widget_type(w, skl->dev) && w->priv != NULL)
3009 skl_clear_pin_config(soc_component, w);
3010 }
3011
3012 skl_clear_module_cnt(skl->dsp);
3013 }
3014
3015 /*
3016 * Topology core widget load callback
3017 *
3018 * This is used to save the private data for each widget which gives
3019 * information to the driver about module and pipeline parameters which DSP
3020 * FW expects like ids, resource values, formats etc
3021 */
skl_tplg_widget_load(struct snd_soc_component * cmpnt,int index,struct snd_soc_dapm_widget * w,struct snd_soc_tplg_dapm_widget * tplg_w)3022 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index,
3023 struct snd_soc_dapm_widget *w,
3024 struct snd_soc_tplg_dapm_widget *tplg_w)
3025 {
3026 int ret;
3027 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3028 struct skl_dev *skl = bus_to_skl(bus);
3029 struct skl_module_cfg *mconfig;
3030
3031 if (!tplg_w->priv.size)
3032 goto bind_event;
3033
3034 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
3035
3036 if (!mconfig)
3037 return -ENOMEM;
3038
3039 if (skl->nr_modules == 0) {
3040 mconfig->module = devm_kzalloc(bus->dev,
3041 sizeof(*mconfig->module), GFP_KERNEL);
3042 if (!mconfig->module)
3043 return -ENOMEM;
3044 }
3045
3046 w->priv = mconfig;
3047
3048 /*
3049 * module binary can be loaded later, so set it to query when
3050 * module is load for a use case
3051 */
3052 mconfig->id.module_id = -1;
3053
3054 /* To provide backward compatibility, set default as SKL_PARAM_INIT */
3055 mconfig->fmt_cfg_idx = SKL_PARAM_INIT;
3056
3057 /* Parse private data for tuples */
3058 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
3059 if (ret < 0)
3060 return ret;
3061
3062 skl_debug_init_module(skl->debugfs, w, mconfig);
3063
3064 bind_event:
3065 if (tplg_w->event_type == 0) {
3066 dev_dbg(bus->dev, "ASoC: No event handler required\n");
3067 return 0;
3068 }
3069
3070 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
3071 ARRAY_SIZE(skl_tplg_widget_ops),
3072 tplg_w->event_type);
3073
3074 if (ret) {
3075 dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
3076 __func__, tplg_w->event_type);
3077 return -EINVAL;
3078 }
3079
3080 return 0;
3081 }
3082
skl_init_algo_data(struct device * dev,struct soc_bytes_ext * be,struct snd_soc_tplg_bytes_control * bc)3083 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
3084 struct snd_soc_tplg_bytes_control *bc)
3085 {
3086 struct skl_algo_data *ac;
3087 struct skl_dfw_algo_data *dfw_ac =
3088 (struct skl_dfw_algo_data *)bc->priv.data;
3089
3090 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
3091 if (!ac)
3092 return -ENOMEM;
3093
3094 /* Fill private data */
3095 ac->max = dfw_ac->max;
3096 ac->param_id = dfw_ac->param_id;
3097 ac->set_params = dfw_ac->set_params;
3098 ac->size = dfw_ac->max;
3099
3100 if (ac->max) {
3101 ac->params = devm_kzalloc(dev, ac->max, GFP_KERNEL);
3102 if (!ac->params)
3103 return -ENOMEM;
3104
3105 memcpy(ac->params, dfw_ac->params, ac->max);
3106 }
3107
3108 be->dobj.private = ac;
3109 return 0;
3110 }
3111
skl_init_enum_data(struct device * dev,struct soc_enum * se,struct snd_soc_tplg_enum_control * ec)3112 static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
3113 struct snd_soc_tplg_enum_control *ec)
3114 {
3115
3116 void *data;
3117
3118 if (ec->priv.size) {
3119 data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL);
3120 if (!data)
3121 return -ENOMEM;
3122 memcpy(data, ec->priv.data, ec->priv.size);
3123 se->dobj.private = data;
3124 }
3125
3126 return 0;
3127
3128 }
3129
skl_tplg_control_load(struct snd_soc_component * cmpnt,int index,struct snd_kcontrol_new * kctl,struct snd_soc_tplg_ctl_hdr * hdr)3130 static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
3131 int index,
3132 struct snd_kcontrol_new *kctl,
3133 struct snd_soc_tplg_ctl_hdr *hdr)
3134 {
3135 struct soc_bytes_ext *sb;
3136 struct snd_soc_tplg_bytes_control *tplg_bc;
3137 struct snd_soc_tplg_enum_control *tplg_ec;
3138 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3139 struct soc_enum *se;
3140
3141 switch (hdr->ops.info) {
3142 case SND_SOC_TPLG_CTL_BYTES:
3143 tplg_bc = container_of(hdr,
3144 struct snd_soc_tplg_bytes_control, hdr);
3145 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
3146 sb = (struct soc_bytes_ext *)kctl->private_value;
3147 if (tplg_bc->priv.size)
3148 return skl_init_algo_data(
3149 bus->dev, sb, tplg_bc);
3150 }
3151 break;
3152
3153 case SND_SOC_TPLG_CTL_ENUM:
3154 tplg_ec = container_of(hdr,
3155 struct snd_soc_tplg_enum_control, hdr);
3156 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READ) {
3157 se = (struct soc_enum *)kctl->private_value;
3158 if (tplg_ec->priv.size)
3159 skl_init_enum_data(bus->dev, se, tplg_ec);
3160 }
3161
3162 /*
3163 * now that the control initializations are done, remove
3164 * write permission for the DMIC configuration enums to
3165 * avoid conflicts between NHLT settings and user interaction
3166 */
3167
3168 if (hdr->ops.get == SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC)
3169 kctl->access = SNDRV_CTL_ELEM_ACCESS_READ;
3170
3171 break;
3172
3173 default:
3174 dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n",
3175 hdr->ops.get, hdr->ops.put, hdr->ops.info);
3176 break;
3177 }
3178
3179 return 0;
3180 }
3181
skl_tplg_fill_str_mfest_tkn(struct device * dev,struct snd_soc_tplg_vendor_string_elem * str_elem,struct skl_dev * skl)3182 static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
3183 struct snd_soc_tplg_vendor_string_elem *str_elem,
3184 struct skl_dev *skl)
3185 {
3186 int tkn_count = 0;
3187 static int ref_count;
3188
3189 switch (str_elem->token) {
3190 case SKL_TKN_STR_LIB_NAME:
3191 if (ref_count > skl->lib_count - 1) {
3192 ref_count = 0;
3193 return -EINVAL;
3194 }
3195
3196 strncpy(skl->lib_info[ref_count].name,
3197 str_elem->string,
3198 ARRAY_SIZE(skl->lib_info[ref_count].name));
3199 ref_count++;
3200 break;
3201
3202 default:
3203 dev_err(dev, "Not a string token %d\n", str_elem->token);
3204 break;
3205 }
3206 tkn_count++;
3207
3208 return tkn_count;
3209 }
3210
skl_tplg_get_str_tkn(struct device * dev,struct snd_soc_tplg_vendor_array * array,struct skl_dev * skl)3211 static int skl_tplg_get_str_tkn(struct device *dev,
3212 struct snd_soc_tplg_vendor_array *array,
3213 struct skl_dev *skl)
3214 {
3215 int tkn_count = 0, ret;
3216 struct snd_soc_tplg_vendor_string_elem *str_elem;
3217
3218 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
3219 while (tkn_count < array->num_elems) {
3220 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
3221 str_elem++;
3222
3223 if (ret < 0)
3224 return ret;
3225
3226 tkn_count = tkn_count + ret;
3227 }
3228
3229 return tkn_count;
3230 }
3231
skl_tplg_manifest_fill_fmt(struct device * dev,struct skl_module_iface * fmt,struct snd_soc_tplg_vendor_value_elem * tkn_elem,u32 dir,int fmt_idx)3232 static int skl_tplg_manifest_fill_fmt(struct device *dev,
3233 struct skl_module_iface *fmt,
3234 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3235 u32 dir, int fmt_idx)
3236 {
3237 struct skl_module_pin_fmt *dst_fmt;
3238 struct skl_module_fmt *mod_fmt;
3239 int ret;
3240
3241 if (!fmt)
3242 return -EINVAL;
3243
3244 switch (dir) {
3245 case SKL_DIR_IN:
3246 dst_fmt = &fmt->inputs[fmt_idx];
3247 break;
3248
3249 case SKL_DIR_OUT:
3250 dst_fmt = &fmt->outputs[fmt_idx];
3251 break;
3252
3253 default:
3254 dev_err(dev, "Invalid direction: %d\n", dir);
3255 return -EINVAL;
3256 }
3257
3258 mod_fmt = &dst_fmt->fmt;
3259
3260 switch (tkn_elem->token) {
3261 case SKL_TKN_MM_U32_INTF_PIN_ID:
3262 dst_fmt->id = tkn_elem->value;
3263 break;
3264
3265 default:
3266 ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token,
3267 tkn_elem->value);
3268 if (ret < 0)
3269 return ret;
3270 break;
3271 }
3272
3273 return 0;
3274 }
3275
skl_tplg_fill_mod_info(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_module * mod)3276 static int skl_tplg_fill_mod_info(struct device *dev,
3277 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3278 struct skl_module *mod)
3279 {
3280
3281 if (!mod)
3282 return -EINVAL;
3283
3284 switch (tkn_elem->token) {
3285 case SKL_TKN_U8_IN_PIN_TYPE:
3286 mod->input_pin_type = tkn_elem->value;
3287 break;
3288
3289 case SKL_TKN_U8_OUT_PIN_TYPE:
3290 mod->output_pin_type = tkn_elem->value;
3291 break;
3292
3293 case SKL_TKN_U8_IN_QUEUE_COUNT:
3294 mod->max_input_pins = tkn_elem->value;
3295 break;
3296
3297 case SKL_TKN_U8_OUT_QUEUE_COUNT:
3298 mod->max_output_pins = tkn_elem->value;
3299 break;
3300
3301 case SKL_TKN_MM_U8_NUM_RES:
3302 mod->nr_resources = tkn_elem->value;
3303 break;
3304
3305 case SKL_TKN_MM_U8_NUM_INTF:
3306 mod->nr_interfaces = tkn_elem->value;
3307 break;
3308
3309 default:
3310 dev_err(dev, "Invalid mod info token %d", tkn_elem->token);
3311 return -EINVAL;
3312 }
3313
3314 return 0;
3315 }
3316
3317
skl_tplg_get_int_tkn(struct device * dev,struct snd_soc_tplg_vendor_value_elem * tkn_elem,struct skl_dev * skl)3318 static int skl_tplg_get_int_tkn(struct device *dev,
3319 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3320 struct skl_dev *skl)
3321 {
3322 int tkn_count = 0, ret;
3323 static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
3324 struct skl_module_res *res = NULL;
3325 struct skl_module_iface *fmt = NULL;
3326 struct skl_module *mod = NULL;
3327 static struct skl_astate_param *astate_table;
3328 static int astate_cfg_idx, count;
3329 int i;
3330 size_t size;
3331
3332 if (skl->modules) {
3333 mod = skl->modules[mod_idx];
3334 res = &mod->resources[res_val_idx];
3335 fmt = &mod->formats[intf_val_idx];
3336 }
3337
3338 switch (tkn_elem->token) {
3339 case SKL_TKN_U32_LIB_COUNT:
3340 skl->lib_count = tkn_elem->value;
3341 break;
3342
3343 case SKL_TKN_U8_NUM_MOD:
3344 skl->nr_modules = tkn_elem->value;
3345 skl->modules = devm_kcalloc(dev, skl->nr_modules,
3346 sizeof(*skl->modules), GFP_KERNEL);
3347 if (!skl->modules)
3348 return -ENOMEM;
3349
3350 for (i = 0; i < skl->nr_modules; i++) {
3351 skl->modules[i] = devm_kzalloc(dev,
3352 sizeof(struct skl_module), GFP_KERNEL);
3353 if (!skl->modules[i])
3354 return -ENOMEM;
3355 }
3356 break;
3357
3358 case SKL_TKN_MM_U8_MOD_IDX:
3359 mod_idx = tkn_elem->value;
3360 break;
3361
3362 case SKL_TKN_U32_ASTATE_COUNT:
3363 if (astate_table != NULL) {
3364 dev_err(dev, "More than one entry for A-State count");
3365 return -EINVAL;
3366 }
3367
3368 if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
3369 dev_err(dev, "Invalid A-State count %d\n",
3370 tkn_elem->value);
3371 return -EINVAL;
3372 }
3373
3374 size = struct_size(skl->cfg.astate_cfg, astate_table,
3375 tkn_elem->value);
3376 skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
3377 if (!skl->cfg.astate_cfg)
3378 return -ENOMEM;
3379
3380 astate_table = skl->cfg.astate_cfg->astate_table;
3381 count = skl->cfg.astate_cfg->count = tkn_elem->value;
3382 break;
3383
3384 case SKL_TKN_U32_ASTATE_IDX:
3385 if (tkn_elem->value >= count) {
3386 dev_err(dev, "Invalid A-State index %d\n",
3387 tkn_elem->value);
3388 return -EINVAL;
3389 }
3390
3391 astate_cfg_idx = tkn_elem->value;
3392 break;
3393
3394 case SKL_TKN_U32_ASTATE_KCPS:
3395 astate_table[astate_cfg_idx].kcps = tkn_elem->value;
3396 break;
3397
3398 case SKL_TKN_U32_ASTATE_CLK_SRC:
3399 astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
3400 break;
3401
3402 case SKL_TKN_U8_IN_PIN_TYPE:
3403 case SKL_TKN_U8_OUT_PIN_TYPE:
3404 case SKL_TKN_U8_IN_QUEUE_COUNT:
3405 case SKL_TKN_U8_OUT_QUEUE_COUNT:
3406 case SKL_TKN_MM_U8_NUM_RES:
3407 case SKL_TKN_MM_U8_NUM_INTF:
3408 ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod);
3409 if (ret < 0)
3410 return ret;
3411 break;
3412
3413 case SKL_TKN_U32_DIR_PIN_COUNT:
3414 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
3415 pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4;
3416 break;
3417
3418 case SKL_TKN_MM_U32_RES_ID:
3419 if (!res)
3420 return -EINVAL;
3421
3422 res->id = tkn_elem->value;
3423 res_val_idx = tkn_elem->value;
3424 break;
3425
3426 case SKL_TKN_MM_U32_FMT_ID:
3427 if (!fmt)
3428 return -EINVAL;
3429
3430 fmt->fmt_idx = tkn_elem->value;
3431 intf_val_idx = tkn_elem->value;
3432 break;
3433
3434 case SKL_TKN_MM_U32_CPS:
3435 case SKL_TKN_MM_U32_DMA_SIZE:
3436 case SKL_TKN_MM_U32_CPC:
3437 case SKL_TKN_U32_MEM_PAGES:
3438 case SKL_TKN_U32_OBS:
3439 case SKL_TKN_U32_IBS:
3440 case SKL_TKN_MM_U32_RES_PIN_ID:
3441 case SKL_TKN_MM_U32_PIN_BUF:
3442 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir);
3443 if (ret < 0)
3444 return ret;
3445
3446 break;
3447
3448 case SKL_TKN_MM_U32_NUM_IN_FMT:
3449 if (!fmt)
3450 return -EINVAL;
3451
3452 res->nr_input_pins = tkn_elem->value;
3453 break;
3454
3455 case SKL_TKN_MM_U32_NUM_OUT_FMT:
3456 if (!fmt)
3457 return -EINVAL;
3458
3459 res->nr_output_pins = tkn_elem->value;
3460 break;
3461
3462 case SKL_TKN_U32_FMT_CH:
3463 case SKL_TKN_U32_FMT_FREQ:
3464 case SKL_TKN_U32_FMT_BIT_DEPTH:
3465 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
3466 case SKL_TKN_U32_FMT_CH_CONFIG:
3467 case SKL_TKN_U32_FMT_INTERLEAVE:
3468 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
3469 case SKL_TKN_U32_FMT_CH_MAP:
3470 case SKL_TKN_MM_U32_INTF_PIN_ID:
3471 ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem,
3472 dir, pin_idx);
3473 if (ret < 0)
3474 return ret;
3475 break;
3476
3477 default:
3478 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
3479 return -EINVAL;
3480 }
3481 tkn_count++;
3482
3483 return tkn_count;
3484 }
3485
3486 /*
3487 * Fill the manifest structure by parsing the tokens based on the
3488 * type.
3489 */
skl_tplg_get_manifest_tkn(struct device * dev,char * pvt_data,struct skl_dev * skl,int block_size)3490 static int skl_tplg_get_manifest_tkn(struct device *dev,
3491 char *pvt_data, struct skl_dev *skl,
3492 int block_size)
3493 {
3494 int tkn_count = 0, ret;
3495 int off = 0, tuple_size = 0;
3496 u8 uuid_index = 0;
3497 struct snd_soc_tplg_vendor_array *array;
3498 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
3499
3500 if (block_size <= 0)
3501 return -EINVAL;
3502
3503 while (tuple_size < block_size) {
3504 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
3505 off += array->size;
3506 switch (array->type) {
3507 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
3508 ret = skl_tplg_get_str_tkn(dev, array, skl);
3509
3510 if (ret < 0)
3511 return ret;
3512 tkn_count = ret;
3513
3514 tuple_size += tkn_count *
3515 sizeof(struct snd_soc_tplg_vendor_string_elem);
3516 continue;
3517
3518 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
3519 if (array->uuid->token != SKL_TKN_UUID) {
3520 dev_err(dev, "Not an UUID token: %d\n",
3521 array->uuid->token);
3522 return -EINVAL;
3523 }
3524 if (uuid_index >= skl->nr_modules) {
3525 dev_err(dev, "Too many UUID tokens\n");
3526 return -EINVAL;
3527 }
3528 import_guid(&skl->modules[uuid_index++]->uuid,
3529 array->uuid->uuid);
3530
3531 tuple_size += sizeof(*array->uuid);
3532 continue;
3533
3534 default:
3535 tkn_elem = array->value;
3536 tkn_count = 0;
3537 break;
3538 }
3539
3540 while (tkn_count <= array->num_elems - 1) {
3541 ret = skl_tplg_get_int_tkn(dev,
3542 tkn_elem, skl);
3543 if (ret < 0)
3544 return ret;
3545
3546 tkn_count = tkn_count + ret;
3547 tkn_elem++;
3548 }
3549 tuple_size += (tkn_count * sizeof(*tkn_elem));
3550 tkn_count = 0;
3551 }
3552
3553 return off;
3554 }
3555
3556 /*
3557 * Parse manifest private data for tokens. The private data block is
3558 * preceded by descriptors for type and size of data block.
3559 */
skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest * manifest,struct device * dev,struct skl_dev * skl)3560 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
3561 struct device *dev, struct skl_dev *skl)
3562 {
3563 struct snd_soc_tplg_vendor_array *array;
3564 int num_blocks, block_size = 0, block_type, off = 0;
3565 char *data;
3566 int ret;
3567
3568 /* Read the NUM_DATA_BLOCKS descriptor */
3569 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
3570 ret = skl_tplg_get_desc_blocks(dev, array);
3571 if (ret < 0)
3572 return ret;
3573 num_blocks = ret;
3574
3575 off += array->size;
3576 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
3577 while (num_blocks > 0) {
3578 array = (struct snd_soc_tplg_vendor_array *)
3579 (manifest->priv.data + off);
3580 ret = skl_tplg_get_desc_blocks(dev, array);
3581
3582 if (ret < 0)
3583 return ret;
3584 block_type = ret;
3585 off += array->size;
3586
3587 array = (struct snd_soc_tplg_vendor_array *)
3588 (manifest->priv.data + off);
3589
3590 ret = skl_tplg_get_desc_blocks(dev, array);
3591
3592 if (ret < 0)
3593 return ret;
3594 block_size = ret;
3595 off += array->size;
3596
3597 array = (struct snd_soc_tplg_vendor_array *)
3598 (manifest->priv.data + off);
3599
3600 data = (manifest->priv.data + off);
3601
3602 if (block_type == SKL_TYPE_TUPLE) {
3603 ret = skl_tplg_get_manifest_tkn(dev, data, skl,
3604 block_size);
3605
3606 if (ret < 0)
3607 return ret;
3608
3609 --num_blocks;
3610 } else {
3611 return -EINVAL;
3612 }
3613 off += ret;
3614 }
3615
3616 return 0;
3617 }
3618
skl_manifest_load(struct snd_soc_component * cmpnt,int index,struct snd_soc_tplg_manifest * manifest)3619 static int skl_manifest_load(struct snd_soc_component *cmpnt, int index,
3620 struct snd_soc_tplg_manifest *manifest)
3621 {
3622 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3623 struct skl_dev *skl = bus_to_skl(bus);
3624
3625 /* proceed only if we have private data defined */
3626 if (manifest->priv.size == 0)
3627 return 0;
3628
3629 skl_tplg_get_manifest_data(manifest, bus->dev, skl);
3630
3631 if (skl->lib_count > SKL_MAX_LIB) {
3632 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
3633 skl->lib_count);
3634 return -EINVAL;
3635 }
3636
3637 return 0;
3638 }
3639
skl_tplg_complete(struct snd_soc_component * component)3640 static void skl_tplg_complete(struct snd_soc_component *component)
3641 {
3642 struct snd_soc_dobj *dobj;
3643 struct snd_soc_acpi_mach *mach;
3644 struct snd_ctl_elem_value *val;
3645 int i;
3646
3647 val = kmalloc(sizeof(*val), GFP_KERNEL);
3648 if (!val)
3649 return;
3650
3651 mach = dev_get_platdata(component->card->dev);
3652 list_for_each_entry(dobj, &component->dobj_list, list) {
3653 struct snd_kcontrol *kcontrol = dobj->control.kcontrol;
3654 struct soc_enum *se;
3655 char **texts;
3656 char chan_text[4];
3657
3658 if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol ||
3659 kcontrol->put != skl_tplg_multi_config_set_dmic)
3660 continue;
3661
3662 se = (struct soc_enum *)kcontrol->private_value;
3663 texts = dobj->control.dtexts;
3664 sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
3665
3666 for (i = 0; i < se->items; i++) {
3667 if (strstr(texts[i], chan_text)) {
3668 memset(val, 0, sizeof(*val));
3669 val->value.enumerated.item[0] = i;
3670 kcontrol->put(kcontrol, val);
3671 }
3672 }
3673 }
3674 kfree(val);
3675 }
3676
3677 static struct snd_soc_tplg_ops skl_tplg_ops = {
3678 .widget_load = skl_tplg_widget_load,
3679 .control_load = skl_tplg_control_load,
3680 .bytes_ext_ops = skl_tlv_ops,
3681 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
3682 .io_ops = skl_tplg_kcontrol_ops,
3683 .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
3684 .manifest = skl_manifest_load,
3685 .dai_load = skl_dai_load,
3686 .complete = skl_tplg_complete,
3687 };
3688
3689 /*
3690 * A pipe can have multiple modules, each of them will be a DAPM widget as
3691 * well. While managing a pipeline we need to get the list of all the
3692 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
3693 * helps to get the SKL type widgets in that pipeline
3694 */
skl_tplg_create_pipe_widget_list(struct snd_soc_component * component)3695 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component)
3696 {
3697 struct snd_soc_dapm_widget *w;
3698 struct skl_module_cfg *mcfg = NULL;
3699 struct skl_pipe_module *p_module = NULL;
3700 struct skl_pipe *pipe;
3701
3702 list_for_each_entry(w, &component->card->widgets, list) {
3703 if (is_skl_dsp_widget_type(w, component->dev) && w->priv) {
3704 mcfg = w->priv;
3705 pipe = mcfg->pipe;
3706
3707 p_module = devm_kzalloc(component->dev,
3708 sizeof(*p_module), GFP_KERNEL);
3709 if (!p_module)
3710 return -ENOMEM;
3711
3712 p_module->w = w;
3713 list_add_tail(&p_module->node, &pipe->w_list);
3714 }
3715 }
3716
3717 return 0;
3718 }
3719
skl_tplg_set_pipe_type(struct skl_dev * skl,struct skl_pipe * pipe)3720 static void skl_tplg_set_pipe_type(struct skl_dev *skl, struct skl_pipe *pipe)
3721 {
3722 struct skl_pipe_module *w_module;
3723 struct snd_soc_dapm_widget *w;
3724 struct skl_module_cfg *mconfig;
3725 bool host_found = false, link_found = false;
3726
3727 list_for_each_entry(w_module, &pipe->w_list, node) {
3728 w = w_module->w;
3729 mconfig = w->priv;
3730
3731 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
3732 host_found = true;
3733 else if (mconfig->dev_type != SKL_DEVICE_NONE)
3734 link_found = true;
3735 }
3736
3737 if (host_found && link_found)
3738 pipe->passthru = true;
3739 else
3740 pipe->passthru = false;
3741 }
3742
3743 /*
3744 * SKL topology init routine
3745 */
skl_tplg_init(struct snd_soc_component * component,struct hdac_bus * bus)3746 int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus)
3747 {
3748 int ret;
3749 const struct firmware *fw;
3750 struct skl_dev *skl = bus_to_skl(bus);
3751 struct skl_pipeline *ppl;
3752
3753 ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3754 if (ret < 0) {
3755 char alt_tplg_name[64];
3756
3757 snprintf(alt_tplg_name, sizeof(alt_tplg_name), "%s-tplg.bin",
3758 skl->mach->drv_name);
3759 dev_info(bus->dev, "tplg fw %s load failed with %d, trying alternative tplg name %s",
3760 skl->tplg_name, ret, alt_tplg_name);
3761
3762 ret = request_firmware(&fw, alt_tplg_name, bus->dev);
3763 if (!ret)
3764 goto component_load;
3765
3766 dev_info(bus->dev, "tplg %s failed with %d, falling back to dfw_sst.bin",
3767 alt_tplg_name, ret);
3768
3769 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
3770 if (ret < 0) {
3771 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
3772 "dfw_sst.bin", ret);
3773 return ret;
3774 }
3775 }
3776
3777 component_load:
3778 ret = snd_soc_tplg_component_load(component, &skl_tplg_ops, fw);
3779 if (ret < 0) {
3780 dev_err(bus->dev, "tplg component load failed%d\n", ret);
3781 goto err;
3782 }
3783
3784 ret = skl_tplg_create_pipe_widget_list(component);
3785 if (ret < 0) {
3786 dev_err(bus->dev, "tplg create pipe widget list failed%d\n",
3787 ret);
3788 goto err;
3789 }
3790
3791 list_for_each_entry(ppl, &skl->ppl_list, node)
3792 skl_tplg_set_pipe_type(skl, ppl->pipe);
3793
3794 err:
3795 release_firmware(fw);
3796 return ret;
3797 }
3798
skl_tplg_exit(struct snd_soc_component * component,struct hdac_bus * bus)3799 void skl_tplg_exit(struct snd_soc_component *component, struct hdac_bus *bus)
3800 {
3801 struct skl_dev *skl = bus_to_skl(bus);
3802 struct skl_pipeline *ppl, *tmp;
3803
3804 list_for_each_entry_safe(ppl, tmp, &skl->ppl_list, node)
3805 list_del(&ppl->node);
3806
3807 /* clean up topology */
3808 snd_soc_tplg_component_remove(component);
3809 }
3810