]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - sound/soc/intel/skylake/skl-topology.c
ASoC: Intel: Skylake: fix invalid memory access due to wrong reference of pointer
[mirror_ubuntu-zesty-kernel.git] / sound / soc / intel / skylake / skl-topology.c
CommitLineData
e4e2d2f4
JK
1/*
2 * skl-topology.c - Implements Platform component ALSA controls/widget
3 * handlers.
4 *
5 * Copyright (C) 2014-2015 Intel Corp
6 * Author: Jeeja KP <jeeja.kp@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/firmware.h>
22#include <sound/soc.h>
23#include <sound/soc-topology.h>
6277e832 24#include <uapi/sound/snd_sst_tokens.h>
e4e2d2f4
JK
25#include "skl-sst-dsp.h"
26#include "skl-sst-ipc.h"
27#include "skl-topology.h"
28#include "skl.h"
29#include "skl-tplg-interface.h"
6c5768b3
D
30#include "../common/sst-dsp.h"
31#include "../common/sst-dsp-priv.h"
e4e2d2f4 32
f7590d4f
JK
33#define SKL_CH_FIXUP_MASK (1 << 0)
34#define SKL_RATE_FIXUP_MASK (1 << 1)
35#define SKL_FMT_FIXUP_MASK (1 << 2)
6277e832
SN
36#define SKL_IN_DIR_BIT_MASK BIT(0)
37#define SKL_PIN_COUNT_MASK GENMASK(7, 4)
f7590d4f 38
a83e3b4c
VK
39void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
40{
41 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
42
43 switch (caps) {
44 case SKL_D0I3_NONE:
45 d0i3->non_d0i3++;
46 break;
47
48 case SKL_D0I3_STREAMING:
49 d0i3->streaming++;
50 break;
51
52 case SKL_D0I3_NON_STREAMING:
53 d0i3->non_streaming++;
54 break;
55 }
56}
57
58void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
59{
60 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
61
62 switch (caps) {
63 case SKL_D0I3_NONE:
64 d0i3->non_d0i3--;
65 break;
66
67 case SKL_D0I3_STREAMING:
68 d0i3->streaming--;
69 break;
70
71 case SKL_D0I3_NON_STREAMING:
72 d0i3->non_streaming--;
73 break;
74 }
75}
76
e4e2d2f4
JK
77/*
78 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
79 * ignore. This helpers checks if the SKL driver handles this widget type
80 */
81static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
82{
83 switch (w->id) {
84 case snd_soc_dapm_dai_link:
85 case snd_soc_dapm_dai_in:
86 case snd_soc_dapm_aif_in:
87 case snd_soc_dapm_aif_out:
88 case snd_soc_dapm_dai_out:
89 case snd_soc_dapm_switch:
90 return false;
91 default:
92 return true;
93 }
94}
95
96/*
97 * Each pipelines needs memory to be allocated. Check if we have free memory
9ba8ffef 98 * from available pool.
e4e2d2f4 99 */
9ba8ffef 100static bool skl_is_pipe_mem_avail(struct skl *skl,
e4e2d2f4
JK
101 struct skl_module_cfg *mconfig)
102{
103 struct skl_sst *ctx = skl->skl_sst;
104
105 if (skl->resource.mem + mconfig->pipe->memory_pages >
106 skl->resource.max_mem) {
107 dev_err(ctx->dev,
108 "%s: module_id %d instance %d\n", __func__,
109 mconfig->id.module_id,
110 mconfig->id.instance_id);
111 dev_err(ctx->dev,
112 "exceeds ppl memory available %d mem %d\n",
113 skl->resource.max_mem, skl->resource.mem);
114 return false;
9ba8ffef
D
115 } else {
116 return true;
e4e2d2f4 117 }
9ba8ffef 118}
e4e2d2f4 119
9ba8ffef
D
120/*
121 * Add the mem to the mem pool. This is freed when pipe is deleted.
122 * Note: DSP does actual memory management we only keep track for complete
123 * pool
124 */
125static void skl_tplg_alloc_pipe_mem(struct skl *skl,
126 struct skl_module_cfg *mconfig)
127{
e4e2d2f4 128 skl->resource.mem += mconfig->pipe->memory_pages;
e4e2d2f4
JK
129}
130
131/*
132 * Pipeline needs needs DSP CPU resources for computation, this is
133 * quantified in MCPS (Million Clocks Per Second) required for module/pipe
134 *
135 * Each pipelines needs mcps to be allocated. Check if we have mcps for this
9ba8ffef 136 * pipe.
e4e2d2f4 137 */
9ba8ffef
D
138
139static bool skl_is_pipe_mcps_avail(struct skl *skl,
e4e2d2f4
JK
140 struct skl_module_cfg *mconfig)
141{
142 struct skl_sst *ctx = skl->skl_sst;
143
144 if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) {
145 dev_err(ctx->dev,
146 "%s: module_id %d instance %d\n", __func__,
147 mconfig->id.module_id, mconfig->id.instance_id);
148 dev_err(ctx->dev,
7ca42f5a 149 "exceeds ppl mcps available %d > mem %d\n",
e4e2d2f4
JK
150 skl->resource.max_mcps, skl->resource.mcps);
151 return false;
9ba8ffef
D
152 } else {
153 return true;
e4e2d2f4 154 }
9ba8ffef 155}
e4e2d2f4 156
9ba8ffef
D
157static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
158 struct skl_module_cfg *mconfig)
159{
e4e2d2f4 160 skl->resource.mcps += mconfig->mcps;
e4e2d2f4
JK
161}
162
163/*
164 * Free the mcps when tearing down
165 */
166static void
167skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
168{
169 skl->resource.mcps -= mconfig->mcps;
170}
171
172/*
173 * Free the memory when tearing down
174 */
175static void
176skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
177{
178 skl->resource.mem -= mconfig->pipe->memory_pages;
179}
180
f7590d4f
JK
181
182static void skl_dump_mconfig(struct skl_sst *ctx,
183 struct skl_module_cfg *mcfg)
184{
185 dev_dbg(ctx->dev, "Dumping config\n");
186 dev_dbg(ctx->dev, "Input Format:\n");
4cd9899f
HS
187 dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels);
188 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq);
189 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg);
190 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth);
f7590d4f 191 dev_dbg(ctx->dev, "Output Format:\n");
4cd9899f
HS
192 dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels);
193 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq);
194 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth);
195 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
f7590d4f
JK
196}
197
ea5a137d
SP
198static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
199{
200 int slot_map = 0xFFFFFFFF;
201 int start_slot = 0;
202 int i;
203
204 for (i = 0; i < chs; i++) {
205 /*
206 * For 2 channels with starting slot as 0, slot map will
207 * look like 0xFFFFFF10.
208 */
209 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
210 start_slot++;
211 }
212 fmt->ch_map = slot_map;
213}
214
f7590d4f
JK
215static void skl_tplg_update_params(struct skl_module_fmt *fmt,
216 struct skl_pipe_params *params, int fixup)
217{
218 if (fixup & SKL_RATE_FIXUP_MASK)
219 fmt->s_freq = params->s_freq;
ea5a137d 220 if (fixup & SKL_CH_FIXUP_MASK) {
f7590d4f 221 fmt->channels = params->ch;
ea5a137d
SP
222 skl_tplg_update_chmap(fmt, fmt->channels);
223 }
98256f83
JK
224 if (fixup & SKL_FMT_FIXUP_MASK) {
225 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
226
227 /*
228 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
229 * container so update bit depth accordingly
230 */
231 switch (fmt->valid_bit_depth) {
232 case SKL_DEPTH_16BIT:
233 fmt->bit_depth = fmt->valid_bit_depth;
234 break;
235
236 default:
237 fmt->bit_depth = SKL_DEPTH_32BIT;
238 break;
239 }
240 }
241
f7590d4f
JK
242}
243
244/*
245 * A pipeline may have modules which impact the pcm parameters, like SRC,
246 * channel converter, format converter.
247 * We need to calculate the output params by applying the 'fixup'
248 * Topology will tell driver which type of fixup is to be applied by
249 * supplying the fixup mask, so based on that we calculate the output
250 *
251 * Now In FE the pcm hw_params is source/target format. Same is applicable
252 * for BE with its hw_params invoked.
253 * here based on FE, BE pipeline and direction we calculate the input and
254 * outfix and then apply that for a module
255 */
256static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
257 struct skl_pipe_params *params, bool is_fe)
258{
259 int in_fixup, out_fixup;
260 struct skl_module_fmt *in_fmt, *out_fmt;
261
4cd9899f
HS
262 /* Fixups will be applied to pin 0 only */
263 in_fmt = &m_cfg->in_fmt[0];
264 out_fmt = &m_cfg->out_fmt[0];
f7590d4f
JK
265
266 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
267 if (is_fe) {
268 in_fixup = m_cfg->params_fixup;
269 out_fixup = (~m_cfg->converter) &
270 m_cfg->params_fixup;
271 } else {
272 out_fixup = m_cfg->params_fixup;
273 in_fixup = (~m_cfg->converter) &
274 m_cfg->params_fixup;
275 }
276 } else {
277 if (is_fe) {
278 out_fixup = m_cfg->params_fixup;
279 in_fixup = (~m_cfg->converter) &
280 m_cfg->params_fixup;
281 } else {
282 in_fixup = m_cfg->params_fixup;
283 out_fixup = (~m_cfg->converter) &
284 m_cfg->params_fixup;
285 }
286 }
287
288 skl_tplg_update_params(in_fmt, params, in_fixup);
289 skl_tplg_update_params(out_fmt, params, out_fixup);
290}
291
292/*
293 * A module needs input and output buffers, which are dependent upon pcm
294 * params, so once we have calculate params, we need buffer calculation as
295 * well.
296 */
297static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
298 struct skl_module_cfg *mcfg)
299{
300 int multiplier = 1;
4cd9899f 301 struct skl_module_fmt *in_fmt, *out_fmt;
f0c8e1d9 302 int in_rate, out_rate;
4cd9899f
HS
303
304
305 /* Since fixups is applied to pin 0 only, ibs, obs needs
306 * change for pin 0 only
307 */
308 in_fmt = &mcfg->in_fmt[0];
309 out_fmt = &mcfg->out_fmt[0];
f7590d4f
JK
310
311 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
312 multiplier = 5;
f0c8e1d9
SP
313
314 if (in_fmt->s_freq % 1000)
315 in_rate = (in_fmt->s_freq / 1000) + 1;
316 else
317 in_rate = (in_fmt->s_freq / 1000);
318
319 mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
320 (mcfg->in_fmt->bit_depth >> 3) *
321 multiplier;
322
323 if (mcfg->out_fmt->s_freq % 1000)
324 out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
325 else
326 out_rate = (mcfg->out_fmt->s_freq / 1000);
327
328 mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
329 (mcfg->out_fmt->bit_depth >> 3) *
330 multiplier;
f7590d4f
JK
331}
332
2d1419a3
JK
333static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
334 struct skl_sst *ctx)
335{
336 struct skl_module_cfg *m_cfg = w->priv;
337 int link_type, dir;
338 u32 ch, s_freq, s_fmt;
339 struct nhlt_specific_cfg *cfg;
340 struct skl *skl = get_skl_ctx(ctx->dev);
341
342 /* check if we already have blob */
343 if (m_cfg->formats_config.caps_size > 0)
344 return 0;
345
c7c6c736 346 dev_dbg(ctx->dev, "Applying default cfg blob\n");
2d1419a3
JK
347 switch (m_cfg->dev_type) {
348 case SKL_DEVICE_DMIC:
349 link_type = NHLT_LINK_DMIC;
c7c6c736 350 dir = SNDRV_PCM_STREAM_CAPTURE;
2d1419a3
JK
351 s_freq = m_cfg->in_fmt[0].s_freq;
352 s_fmt = m_cfg->in_fmt[0].bit_depth;
353 ch = m_cfg->in_fmt[0].channels;
354 break;
355
356 case SKL_DEVICE_I2S:
357 link_type = NHLT_LINK_SSP;
358 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
c7c6c736 359 dir = SNDRV_PCM_STREAM_PLAYBACK;
2d1419a3
JK
360 s_freq = m_cfg->out_fmt[0].s_freq;
361 s_fmt = m_cfg->out_fmt[0].bit_depth;
362 ch = m_cfg->out_fmt[0].channels;
c7c6c736
JK
363 } else {
364 dir = SNDRV_PCM_STREAM_CAPTURE;
365 s_freq = m_cfg->in_fmt[0].s_freq;
366 s_fmt = m_cfg->in_fmt[0].bit_depth;
367 ch = m_cfg->in_fmt[0].channels;
2d1419a3
JK
368 }
369 break;
370
371 default:
372 return -EINVAL;
373 }
374
375 /* update the blob based on virtual bus_id and default params */
376 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
377 s_fmt, ch, s_freq, dir);
378 if (cfg) {
379 m_cfg->formats_config.caps_size = cfg->size;
380 m_cfg->formats_config.caps = (u32 *) &cfg->caps;
381 } else {
382 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
383 m_cfg->vbus_id, link_type, dir);
384 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
385 ch, s_freq, s_fmt);
386 return -EIO;
387 }
388
389 return 0;
390}
391
f7590d4f
JK
392static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
393 struct skl_sst *ctx)
394{
395 struct skl_module_cfg *m_cfg = w->priv;
396 struct skl_pipe_params *params = m_cfg->pipe->p_params;
397 int p_conn_type = m_cfg->pipe->conn_type;
398 bool is_fe;
399
400 if (!m_cfg->params_fixup)
401 return;
402
403 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
404 w->name);
405
406 skl_dump_mconfig(ctx, m_cfg);
407
408 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
409 is_fe = true;
410 else
411 is_fe = false;
412
413 skl_tplg_update_params_fixup(m_cfg, params, is_fe);
414 skl_tplg_update_buffer_size(ctx, m_cfg);
415
416 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
417 w->name);
418
419 skl_dump_mconfig(ctx, m_cfg);
420}
421
abb74003
JK
422/*
423 * some modules can have multiple params set from user control and
424 * need to be set after module is initialized. If set_param flag is
425 * set module params will be done after module is initialised.
426 */
427static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
428 struct skl_sst *ctx)
429{
430 int i, ret;
431 struct skl_module_cfg *mconfig = w->priv;
432 const struct snd_kcontrol_new *k;
433 struct soc_bytes_ext *sb;
434 struct skl_algo_data *bc;
435 struct skl_specific_cfg *sp_cfg;
436
437 if (mconfig->formats_config.caps_size > 0 &&
4ced1827 438 mconfig->formats_config.set_params == SKL_PARAM_SET) {
abb74003
JK
439 sp_cfg = &mconfig->formats_config;
440 ret = skl_set_module_params(ctx, sp_cfg->caps,
441 sp_cfg->caps_size,
442 sp_cfg->param_id, mconfig);
443 if (ret < 0)
444 return ret;
445 }
446
447 for (i = 0; i < w->num_kcontrols; i++) {
448 k = &w->kcontrol_news[i];
449 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
450 sb = (void *) k->private_value;
451 bc = (struct skl_algo_data *)sb->dobj.private;
452
4ced1827 453 if (bc->set_params == SKL_PARAM_SET) {
abb74003 454 ret = skl_set_module_params(ctx,
0d682104 455 (u32 *)bc->params, bc->size,
abb74003
JK
456 bc->param_id, mconfig);
457 if (ret < 0)
458 return ret;
459 }
460 }
461 }
462
463 return 0;
464}
465
466/*
467 * some module param can set from user control and this is required as
468 * when module is initailzed. if module param is required in init it is
469 * identifed by set_param flag. if set_param flag is not set, then this
470 * parameter needs to set as part of module init.
471 */
472static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
473{
474 const struct snd_kcontrol_new *k;
475 struct soc_bytes_ext *sb;
476 struct skl_algo_data *bc;
477 struct skl_module_cfg *mconfig = w->priv;
478 int i;
479
480 for (i = 0; i < w->num_kcontrols; i++) {
481 k = &w->kcontrol_news[i];
482 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
483 sb = (struct soc_bytes_ext *)k->private_value;
484 bc = (struct skl_algo_data *)sb->dobj.private;
485
4ced1827 486 if (bc->set_params != SKL_PARAM_INIT)
abb74003
JK
487 continue;
488
6d4617c1 489 mconfig->formats_config.caps = (u32 *)bc->params;
0d682104 490 mconfig->formats_config.caps_size = bc->size;
abb74003
JK
491
492 break;
493 }
494 }
495
496 return 0;
497}
498
e4e2d2f4
JK
499/*
500 * Inside a pipe instance, we can have various modules. These modules need
501 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
502 * skl_init_module() routine, so invoke that for all modules in a pipeline
503 */
504static int
505skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
506{
507 struct skl_pipe_module *w_module;
508 struct snd_soc_dapm_widget *w;
509 struct skl_module_cfg *mconfig;
510 struct skl_sst *ctx = skl->skl_sst;
511 int ret = 0;
512
513 list_for_each_entry(w_module, &pipe->w_list, node) {
514 w = w_module->w;
515 mconfig = w->priv;
516
b7c50555
VK
517 /* check if module ids are populated */
518 if (mconfig->id.module_id < 0) {
a657ae7e
VK
519 dev_err(skl->skl_sst->dev,
520 "module %pUL id not populated\n",
521 (uuid_le *)mconfig->guid);
522 return -EIO;
b7c50555
VK
523 }
524
e4e2d2f4 525 /* check resource available */
9ba8ffef 526 if (!skl_is_pipe_mcps_avail(skl, mconfig))
e4e2d2f4
JK
527 return -ENOMEM;
528
6c5768b3
D
529 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
530 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
531 mconfig->id.module_id, mconfig->guid);
532 if (ret < 0)
533 return ret;
d643678b
JK
534
535 mconfig->m_state = SKL_MODULE_LOADED;
6c5768b3
D
536 }
537
2d1419a3
JK
538 /* update blob if blob is null for be with default value */
539 skl_tplg_update_be_blob(w, ctx);
540
f7590d4f
JK
541 /*
542 * apply fix/conversion to module params based on
543 * FE/BE params
544 */
545 skl_tplg_update_module_params(w, ctx);
ef2a352c
D
546 mconfig->id.pvt_id = skl_get_pvt_id(ctx, mconfig);
547 if (mconfig->id.pvt_id < 0)
548 return ret;
abb74003 549 skl_tplg_set_module_init_data(w);
9939a9c3 550 ret = skl_init_module(ctx, mconfig);
ef2a352c
D
551 if (ret < 0) {
552 skl_put_pvt_id(ctx, mconfig);
e4e2d2f4 553 return ret;
ef2a352c 554 }
260eb73a 555 skl_tplg_alloc_pipe_mcps(skl, mconfig);
abb74003 556 ret = skl_tplg_set_module_params(w, ctx);
e4e2d2f4
JK
557 if (ret < 0)
558 return ret;
559 }
560
561 return 0;
562}
d93f8e55 563
6c5768b3
D
564static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
565 struct skl_pipe *pipe)
566{
b0fab9c6 567 int ret;
6c5768b3
D
568 struct skl_pipe_module *w_module = NULL;
569 struct skl_module_cfg *mconfig = NULL;
570
571 list_for_each_entry(w_module, &pipe->w_list, node) {
572 mconfig = w_module->w->priv;
573
d643678b 574 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
b0fab9c6
D
575 mconfig->m_state > SKL_MODULE_UNINIT) {
576 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
6c5768b3 577 mconfig->id.module_id);
b0fab9c6
D
578 if (ret < 0)
579 return -EIO;
580 }
ef2a352c 581 skl_put_pvt_id(ctx, mconfig);
6c5768b3
D
582 }
583
584 /* no modules to unload in this path, so return */
585 return 0;
586}
587
d93f8e55
VK
588/*
589 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
590 * need create the pipeline. So we do following:
591 * - check the resources
592 * - Create the pipeline
593 * - Initialize the modules in pipeline
594 * - finally bind all modules together
595 */
596static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
597 struct skl *skl)
598{
599 int ret;
600 struct skl_module_cfg *mconfig = w->priv;
601 struct skl_pipe_module *w_module;
602 struct skl_pipe *s_pipe = mconfig->pipe;
603 struct skl_module_cfg *src_module = NULL, *dst_module;
604 struct skl_sst *ctx = skl->skl_sst;
605
606 /* check resource available */
9ba8ffef 607 if (!skl_is_pipe_mcps_avail(skl, mconfig))
d93f8e55
VK
608 return -EBUSY;
609
9ba8ffef 610 if (!skl_is_pipe_mem_avail(skl, mconfig))
d93f8e55
VK
611 return -ENOMEM;
612
613 /*
614 * Create a list of modules for pipe.
615 * This list contains modules from source to sink
616 */
617 ret = skl_create_pipeline(ctx, mconfig->pipe);
618 if (ret < 0)
619 return ret;
620
260eb73a
D
621 skl_tplg_alloc_pipe_mem(skl, mconfig);
622 skl_tplg_alloc_pipe_mcps(skl, mconfig);
d93f8e55
VK
623
624 /* Init all pipe modules from source to sink */
625 ret = skl_tplg_init_pipe_modules(skl, s_pipe);
626 if (ret < 0)
627 return ret;
628
629 /* Bind modules from source to sink */
630 list_for_each_entry(w_module, &s_pipe->w_list, node) {
631 dst_module = w_module->w->priv;
632
633 if (src_module == NULL) {
634 src_module = dst_module;
635 continue;
636 }
637
638 ret = skl_bind_modules(ctx, src_module, dst_module);
639 if (ret < 0)
640 return ret;
641
642 src_module = dst_module;
643 }
644
645 return 0;
646}
647
5e8f0ee4
D
648static int skl_fill_sink_instance_id(struct skl_sst *ctx,
649 struct skl_algo_data *alg_data)
650{
651 struct skl_kpb_params *params = (struct skl_kpb_params *)alg_data->params;
652 struct skl_mod_inst_map *inst;
653 int i, pvt_id;
654
655 inst = params->map;
656
657 for (i = 0; i < params->num_modules; i++) {
658 pvt_id = skl_get_pvt_instance_id_map(ctx,
659 inst->mod_id, inst->inst_id);
660 if (pvt_id < 0)
661 return -EINVAL;
662 inst->inst_id = pvt_id;
663 inst++;
664 }
665 return 0;
666}
667
cc6a4044
JK
668/*
669 * Some modules require params to be set after the module is bound to
670 * all pins connected.
671 *
672 * The module provider initializes set_param flag for such modules and we
673 * send params after binding
674 */
675static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
676 struct skl_module_cfg *mcfg, struct skl_sst *ctx)
677{
678 int i, ret;
679 struct skl_module_cfg *mconfig = w->priv;
680 const struct snd_kcontrol_new *k;
681 struct soc_bytes_ext *sb;
682 struct skl_algo_data *bc;
683 struct skl_specific_cfg *sp_cfg;
684
685 /*
686 * check all out/in pins are in bind state.
687 * if so set the module param
688 */
689 for (i = 0; i < mcfg->max_out_queue; i++) {
690 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
691 return 0;
692 }
693
694 for (i = 0; i < mcfg->max_in_queue; i++) {
695 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
696 return 0;
697 }
698
699 if (mconfig->formats_config.caps_size > 0 &&
700 mconfig->formats_config.set_params == SKL_PARAM_BIND) {
701 sp_cfg = &mconfig->formats_config;
702 ret = skl_set_module_params(ctx, sp_cfg->caps,
703 sp_cfg->caps_size,
704 sp_cfg->param_id, mconfig);
705 if (ret < 0)
706 return ret;
707 }
708
709 for (i = 0; i < w->num_kcontrols; i++) {
710 k = &w->kcontrol_news[i];
711 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
712 sb = (void *) k->private_value;
713 bc = (struct skl_algo_data *)sb->dobj.private;
714
715 if (bc->set_params == SKL_PARAM_BIND) {
5e8f0ee4
D
716 if (mconfig->m_type == SKL_MODULE_TYPE_KPB)
717 skl_fill_sink_instance_id(ctx, bc);
cc6a4044
JK
718 ret = skl_set_module_params(ctx,
719 (u32 *)bc->params, bc->max,
720 bc->param_id, mconfig);
721 if (ret < 0)
722 return ret;
723 }
724 }
725 }
726
727 return 0;
728}
729
8724ff17
JK
730static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
731 struct skl *skl,
6bd4cf85 732 struct snd_soc_dapm_widget *src_w,
8724ff17 733 struct skl_module_cfg *src_mconfig)
d93f8e55
VK
734{
735 struct snd_soc_dapm_path *p;
0ed95d76 736 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
8724ff17 737 struct skl_module_cfg *sink_mconfig;
d93f8e55 738 struct skl_sst *ctx = skl->skl_sst;
8724ff17 739 int ret;
d93f8e55 740
8724ff17 741 snd_soc_dapm_widget_for_each_sink_path(w, p) {
d93f8e55
VK
742 if (!p->connect)
743 continue;
744
745 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
746 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
747
0ed95d76 748 next_sink = p->sink;
6bd4cf85
JK
749
750 if (!is_skl_dsp_widget_type(p->sink))
751 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
752
d93f8e55
VK
753 /*
754 * here we will check widgets in sink pipelines, so that
755 * can be any widgets type and we are only interested if
756 * they are ones used for SKL so check that first
757 */
758 if ((p->sink->priv != NULL) &&
759 is_skl_dsp_widget_type(p->sink)) {
760
761 sink = p->sink;
d93f8e55
VK
762 sink_mconfig = sink->priv;
763
cc6a4044
JK
764 if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
765 sink_mconfig->m_state == SKL_MODULE_UNINIT)
766 continue;
767
d93f8e55
VK
768 /* Bind source to sink, mixin is always source */
769 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
770 if (ret)
771 return ret;
772
cc6a4044
JK
773 /* set module params after bind */
774 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
775 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
776
d93f8e55
VK
777 /* Start sinks pipe first */
778 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
d1730c3d
JK
779 if (sink_mconfig->pipe->conn_type !=
780 SKL_PIPE_CONN_TYPE_FE)
781 ret = skl_run_pipe(ctx,
782 sink_mconfig->pipe);
d93f8e55
VK
783 if (ret)
784 return ret;
785 }
d93f8e55
VK
786 }
787 }
788
8724ff17 789 if (!sink)
6bd4cf85 790 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
8724ff17
JK
791
792 return 0;
793}
794
795/*
796 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
797 * we need to do following:
798 * - Bind to sink pipeline
799 * Since the sink pipes can be running and we don't get mixer event on
800 * connect for already running mixer, we need to find the sink pipes
801 * here and bind to them. This way dynamic connect works.
802 * - Start sink pipeline, if not running
803 * - Then run current pipe
804 */
805static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
806 struct skl *skl)
807{
808 struct skl_module_cfg *src_mconfig;
809 struct skl_sst *ctx = skl->skl_sst;
810 int ret = 0;
811
812 src_mconfig = w->priv;
813
814 /*
815 * find which sink it is connected to, bind with the sink,
816 * if sink is not started, start sink pipe first, then start
817 * this pipe
818 */
6bd4cf85 819 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
d93f8e55
VK
820 if (ret)
821 return ret;
822
d93f8e55 823 /* Start source pipe last after starting all sinks */
d1730c3d
JK
824 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
825 return skl_run_pipe(ctx, src_mconfig->pipe);
d93f8e55
VK
826
827 return 0;
828}
829
8724ff17
JK
830static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
831 struct snd_soc_dapm_widget *w, struct skl *skl)
832{
833 struct snd_soc_dapm_path *p;
834 struct snd_soc_dapm_widget *src_w = NULL;
835 struct skl_sst *ctx = skl->skl_sst;
836
837 snd_soc_dapm_widget_for_each_source_path(w, p) {
838 src_w = p->source;
839 if (!p->connect)
840 continue;
841
842 dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
843 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
844
845 /*
846 * here we will check widgets in sink pipelines, so that can
847 * be any widgets type and we are only interested if they are
848 * ones used for SKL so check that first
849 */
850 if ((p->source->priv != NULL) &&
851 is_skl_dsp_widget_type(p->source)) {
852 return p->source;
853 }
854 }
855
856 if (src_w != NULL)
857 return skl_get_src_dsp_widget(src_w, skl);
858
859 return NULL;
860}
861
d93f8e55
VK
862/*
863 * in the Post-PMU event of mixer we need to do following:
864 * - Check if this pipe is running
865 * - if not, then
866 * - bind this pipeline to its source pipeline
867 * if source pipe is already running, this means it is a dynamic
868 * connection and we need to bind only to that pipe
869 * - start this pipeline
870 */
871static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
872 struct skl *skl)
873{
874 int ret = 0;
d93f8e55
VK
875 struct snd_soc_dapm_widget *source, *sink;
876 struct skl_module_cfg *src_mconfig, *sink_mconfig;
877 struct skl_sst *ctx = skl->skl_sst;
878 int src_pipe_started = 0;
879
880 sink = w;
881 sink_mconfig = sink->priv;
882
883 /*
884 * If source pipe is already started, that means source is driving
885 * one more sink before this sink got connected, Since source is
886 * started, bind this sink to source and start this pipe.
887 */
8724ff17
JK
888 source = skl_get_src_dsp_widget(w, skl);
889 if (source != NULL) {
890 src_mconfig = source->priv;
891 sink_mconfig = sink->priv;
892 src_pipe_started = 1;
d93f8e55
VK
893
894 /*
8724ff17
JK
895 * check pipe state, then no need to bind or start the
896 * pipe
d93f8e55 897 */
8724ff17
JK
898 if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
899 src_pipe_started = 0;
d93f8e55
VK
900 }
901
902 if (src_pipe_started) {
903 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
904 if (ret)
905 return ret;
906
cc6a4044
JK
907 /* set module params after bind */
908 skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
909 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
910
d1730c3d
JK
911 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
912 ret = skl_run_pipe(ctx, sink_mconfig->pipe);
d93f8e55
VK
913 }
914
915 return ret;
916}
917
918/*
919 * in the Pre-PMD event of mixer we need to do following:
920 * - Stop the pipe
921 * - find the source connections and remove that from dapm_path_list
922 * - unbind with source pipelines if still connected
923 */
924static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
925 struct skl *skl)
926{
d93f8e55 927 struct skl_module_cfg *src_mconfig, *sink_mconfig;
ce1b5551 928 int ret = 0, i;
d93f8e55
VK
929 struct skl_sst *ctx = skl->skl_sst;
930
ce1b5551 931 sink_mconfig = w->priv;
d93f8e55
VK
932
933 /* Stop the pipe */
934 ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
935 if (ret)
936 return ret;
937
ce1b5551
JK
938 for (i = 0; i < sink_mconfig->max_in_queue; i++) {
939 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
940 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
941 if (!src_mconfig)
942 continue;
943 /*
944 * If path_found == 1, that means pmd for source
945 * pipe has not occurred, source is connected to
946 * some other sink. so its responsibility of sink
947 * to unbind itself from source.
948 */
949 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
950 if (ret < 0)
951 return ret;
d93f8e55 952
ce1b5551
JK
953 ret = skl_unbind_modules(ctx,
954 src_mconfig, sink_mconfig);
d93f8e55 955 }
d93f8e55
VK
956 }
957
958 return ret;
959}
960
961/*
962 * in the Post-PMD event of mixer we need to do following:
963 * - Free the mcps used
964 * - Free the mem used
965 * - Unbind the modules within the pipeline
966 * - Delete the pipeline (modules are not required to be explicitly
967 * deleted, pipeline delete is enough here
968 */
969static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
970 struct skl *skl)
971{
972 struct skl_module_cfg *mconfig = w->priv;
973 struct skl_pipe_module *w_module;
974 struct skl_module_cfg *src_module = NULL, *dst_module;
975 struct skl_sst *ctx = skl->skl_sst;
976 struct skl_pipe *s_pipe = mconfig->pipe;
977 int ret = 0;
978
260eb73a
D
979 if (s_pipe->state == SKL_PIPE_INVALID)
980 return -EINVAL;
981
d93f8e55 982 skl_tplg_free_pipe_mcps(skl, mconfig);
65976878 983 skl_tplg_free_pipe_mem(skl, mconfig);
d93f8e55
VK
984
985 list_for_each_entry(w_module, &s_pipe->w_list, node) {
986 dst_module = w_module->w->priv;
987
260eb73a
D
988 if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
989 skl_tplg_free_pipe_mcps(skl, dst_module);
d93f8e55
VK
990 if (src_module == NULL) {
991 src_module = dst_module;
992 continue;
993 }
994
7ca42f5a 995 skl_unbind_modules(ctx, src_module, dst_module);
d93f8e55
VK
996 src_module = dst_module;
997 }
998
999 ret = skl_delete_pipe(ctx, mconfig->pipe);
d93f8e55 1000
6c5768b3 1001 return skl_tplg_unload_pipe_modules(ctx, s_pipe);
d93f8e55
VK
1002}
1003
1004/*
1005 * in the Post-PMD event of PGA we need to do following:
1006 * - Free the mcps used
1007 * - Stop the pipeline
1008 * - In source pipe is connected, unbind with source pipelines
1009 */
1010static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1011 struct skl *skl)
1012{
d93f8e55 1013 struct skl_module_cfg *src_mconfig, *sink_mconfig;
ce1b5551 1014 int ret = 0, i;
d93f8e55
VK
1015 struct skl_sst *ctx = skl->skl_sst;
1016
ce1b5551 1017 src_mconfig = w->priv;
d93f8e55 1018
d93f8e55
VK
1019 /* Stop the pipe since this is a mixin module */
1020 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
1021 if (ret)
1022 return ret;
1023
ce1b5551
JK
1024 for (i = 0; i < src_mconfig->max_out_queue; i++) {
1025 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1026 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1027 if (!sink_mconfig)
1028 continue;
1029 /*
1030 * This is a connecter and if path is found that means
1031 * unbind between source and sink has not happened yet
1032 */
ce1b5551
JK
1033 ret = skl_unbind_modules(ctx, src_mconfig,
1034 sink_mconfig);
d93f8e55
VK
1035 }
1036 }
1037
d93f8e55
VK
1038 return ret;
1039}
1040
1041/*
1042 * In modelling, we assume there will be ONLY one mixer in a pipeline. If
1043 * mixer is not required then it is treated as static mixer aka vmixer with
1044 * a hard path to source module
1045 * So we don't need to check if source is started or not as hard path puts
1046 * dependency on each other
1047 */
1048static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w,
1049 struct snd_kcontrol *k, int event)
1050{
1051 struct snd_soc_dapm_context *dapm = w->dapm;
1052 struct skl *skl = get_skl_ctx(dapm->dev);
1053
1054 switch (event) {
1055 case SND_SOC_DAPM_PRE_PMU:
1056 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1057
de1fedf2
JK
1058 case SND_SOC_DAPM_POST_PMU:
1059 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1060
1061 case SND_SOC_DAPM_PRE_PMD:
1062 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1063
d93f8e55
VK
1064 case SND_SOC_DAPM_POST_PMD:
1065 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1066 }
1067
1068 return 0;
1069}
1070
1071/*
1072 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1073 * second one is required that is created as another pipe entity.
1074 * The mixer is responsible for pipe management and represent a pipeline
1075 * instance
1076 */
1077static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1078 struct snd_kcontrol *k, int event)
1079{
1080 struct snd_soc_dapm_context *dapm = w->dapm;
1081 struct skl *skl = get_skl_ctx(dapm->dev);
1082
1083 switch (event) {
1084 case SND_SOC_DAPM_PRE_PMU:
1085 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1086
1087 case SND_SOC_DAPM_POST_PMU:
1088 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1089
1090 case SND_SOC_DAPM_PRE_PMD:
1091 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1092
1093 case SND_SOC_DAPM_POST_PMD:
1094 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1095 }
1096
1097 return 0;
1098}
1099
1100/*
1101 * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1102 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1103 * the sink when it is running (two FE to one BE or one FE to two BE)
1104 * scenarios
1105 */
1106static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1107 struct snd_kcontrol *k, int event)
1108
1109{
1110 struct snd_soc_dapm_context *dapm = w->dapm;
1111 struct skl *skl = get_skl_ctx(dapm->dev);
1112
1113 switch (event) {
1114 case SND_SOC_DAPM_PRE_PMU:
1115 return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1116
1117 case SND_SOC_DAPM_POST_PMD:
1118 return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1119 }
1120
1121 return 0;
1122}
cfb0a873 1123
140adfba
JK
1124static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1125 unsigned int __user *data, unsigned int size)
1126{
1127 struct soc_bytes_ext *sb =
1128 (struct soc_bytes_ext *)kcontrol->private_value;
1129 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
7d9f2911
OA
1130 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1131 struct skl_module_cfg *mconfig = w->priv;
1132 struct skl *skl = get_skl_ctx(w->dapm->dev);
1133
1134 if (w->power)
1135 skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
0d682104 1136 bc->size, bc->param_id, mconfig);
140adfba 1137
41556f68
VK
1138 /* decrement size for TLV header */
1139 size -= 2 * sizeof(u32);
1140
1141 /* check size as we don't want to send kernel data */
1142 if (size > bc->max)
1143 size = bc->max;
1144
140adfba
JK
1145 if (bc->params) {
1146 if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1147 return -EFAULT;
e8bc3c99 1148 if (copy_to_user(data + 1, &size, sizeof(u32)))
140adfba 1149 return -EFAULT;
e8bc3c99 1150 if (copy_to_user(data + 2, bc->params, size))
140adfba
JK
1151 return -EFAULT;
1152 }
1153
1154 return 0;
1155}
1156
1157#define SKL_PARAM_VENDOR_ID 0xff
1158
1159static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1160 const unsigned int __user *data, unsigned int size)
1161{
1162 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1163 struct skl_module_cfg *mconfig = w->priv;
1164 struct soc_bytes_ext *sb =
1165 (struct soc_bytes_ext *)kcontrol->private_value;
1166 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1167 struct skl *skl = get_skl_ctx(w->dapm->dev);
1168
1169 if (ac->params) {
0d682104
D
1170 if (size > ac->max)
1171 return -EINVAL;
1172
1173 ac->size = size;
140adfba
JK
1174 /*
1175 * if the param_is is of type Vendor, firmware expects actual
1176 * parameter id and size from the control.
1177 */
1178 if (ac->param_id == SKL_PARAM_VENDOR_ID) {
1179 if (copy_from_user(ac->params, data, size))
1180 return -EFAULT;
1181 } else {
1182 if (copy_from_user(ac->params,
65b4bcb8 1183 data + 2, size))
140adfba
JK
1184 return -EFAULT;
1185 }
1186
1187 if (w->power)
1188 return skl_set_module_params(skl->skl_sst,
0d682104 1189 (u32 *)ac->params, ac->size,
140adfba
JK
1190 ac->param_id, mconfig);
1191 }
1192
1193 return 0;
1194}
1195
8871dcb9
JK
1196/*
1197 * Fill the dma id for host and link. In case of passthrough
1198 * pipeline, this will both host and link in the same
1199 * pipeline, so need to copy the link and host based on dev_type
1200 */
1201static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1202 struct skl_pipe_params *params)
1203{
1204 struct skl_pipe *pipe = mcfg->pipe;
1205
1206 if (pipe->passthru) {
1207 switch (mcfg->dev_type) {
1208 case SKL_DEVICE_HDALINK:
1209 pipe->p_params->link_dma_id = params->link_dma_id;
1210 break;
1211
1212 case SKL_DEVICE_HDAHOST:
1213 pipe->p_params->host_dma_id = params->host_dma_id;
1214 break;
1215
1216 default:
1217 break;
1218 }
1219 pipe->p_params->s_fmt = params->s_fmt;
1220 pipe->p_params->ch = params->ch;
1221 pipe->p_params->s_freq = params->s_freq;
1222 pipe->p_params->stream = params->stream;
1223
1224 } else {
1225 memcpy(pipe->p_params, params, sizeof(*params));
1226 }
1227}
1228
cfb0a873
VK
1229/*
1230 * The FE params are passed by hw_params of the DAI.
1231 * On hw_params, the params are stored in Gateway module of the FE and we
1232 * need to calculate the format in DSP module configuration, that
1233 * conversion is done here
1234 */
1235int skl_tplg_update_pipe_params(struct device *dev,
1236 struct skl_module_cfg *mconfig,
1237 struct skl_pipe_params *params)
1238{
cfb0a873
VK
1239 struct skl_module_fmt *format = NULL;
1240
8871dcb9 1241 skl_tplg_fill_dma_id(mconfig, params);
cfb0a873
VK
1242
1243 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
4cd9899f 1244 format = &mconfig->in_fmt[0];
cfb0a873 1245 else
4cd9899f 1246 format = &mconfig->out_fmt[0];
cfb0a873
VK
1247
1248 /* set the hw_params */
1249 format->s_freq = params->s_freq;
1250 format->channels = params->ch;
1251 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1252
1253 /*
1254 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1255 * container so update bit depth accordingly
1256 */
1257 switch (format->valid_bit_depth) {
1258 case SKL_DEPTH_16BIT:
1259 format->bit_depth = format->valid_bit_depth;
1260 break;
1261
1262 case SKL_DEPTH_24BIT:
6654f39e 1263 case SKL_DEPTH_32BIT:
cfb0a873
VK
1264 format->bit_depth = SKL_DEPTH_32BIT;
1265 break;
1266
1267 default:
1268 dev_err(dev, "Invalid bit depth %x for pipe\n",
1269 format->valid_bit_depth);
1270 return -EINVAL;
1271 }
1272
1273 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1274 mconfig->ibs = (format->s_freq / 1000) *
1275 (format->channels) *
1276 (format->bit_depth >> 3);
1277 } else {
1278 mconfig->obs = (format->s_freq / 1000) *
1279 (format->channels) *
1280 (format->bit_depth >> 3);
1281 }
1282
1283 return 0;
1284}
1285
1286/*
1287 * Query the module config for the FE DAI
1288 * This is used to find the hw_params set for that DAI and apply to FE
1289 * pipeline
1290 */
1291struct skl_module_cfg *
1292skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1293{
1294 struct snd_soc_dapm_widget *w;
1295 struct snd_soc_dapm_path *p = NULL;
1296
1297 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1298 w = dai->playback_widget;
f0900eb2 1299 snd_soc_dapm_widget_for_each_sink_path(w, p) {
cfb0a873 1300 if (p->connect && p->sink->power &&
a28f51db 1301 !is_skl_dsp_widget_type(p->sink))
cfb0a873
VK
1302 continue;
1303
1304 if (p->sink->priv) {
1305 dev_dbg(dai->dev, "set params for %s\n",
1306 p->sink->name);
1307 return p->sink->priv;
1308 }
1309 }
1310 } else {
1311 w = dai->capture_widget;
f0900eb2 1312 snd_soc_dapm_widget_for_each_source_path(w, p) {
cfb0a873 1313 if (p->connect && p->source->power &&
a28f51db 1314 !is_skl_dsp_widget_type(p->source))
cfb0a873
VK
1315 continue;
1316
1317 if (p->source->priv) {
1318 dev_dbg(dai->dev, "set params for %s\n",
1319 p->source->name);
1320 return p->source->priv;
1321 }
1322 }
1323 }
1324
1325 return NULL;
1326}
1327
718a42b5
D
1328static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1329 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1330{
1331 struct snd_soc_dapm_path *p;
1332 struct skl_module_cfg *mconfig = NULL;
1333
1334 snd_soc_dapm_widget_for_each_source_path(w, p) {
1335 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1336 if (p->connect &&
1337 (p->sink->id == snd_soc_dapm_aif_out) &&
1338 p->source->priv) {
1339 mconfig = p->source->priv;
1340 return mconfig;
1341 }
1342 mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1343 if (mconfig)
1344 return mconfig;
1345 }
1346 }
1347 return mconfig;
1348}
1349
1350static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1351 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1352{
1353 struct snd_soc_dapm_path *p;
1354 struct skl_module_cfg *mconfig = NULL;
1355
1356 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1357 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1358 if (p->connect &&
1359 (p->source->id == snd_soc_dapm_aif_in) &&
1360 p->sink->priv) {
1361 mconfig = p->sink->priv;
1362 return mconfig;
1363 }
1364 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1365 if (mconfig)
1366 return mconfig;
1367 }
1368 }
1369 return mconfig;
1370}
1371
1372struct skl_module_cfg *
1373skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1374{
1375 struct snd_soc_dapm_widget *w;
1376 struct skl_module_cfg *mconfig;
1377
1378 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1379 w = dai->playback_widget;
1380 mconfig = skl_get_mconfig_pb_cpr(dai, w);
1381 } else {
1382 w = dai->capture_widget;
1383 mconfig = skl_get_mconfig_cap_cpr(dai, w);
1384 }
1385 return mconfig;
1386}
1387
cfb0a873
VK
1388static u8 skl_tplg_be_link_type(int dev_type)
1389{
1390 int ret;
1391
1392 switch (dev_type) {
1393 case SKL_DEVICE_BT:
1394 ret = NHLT_LINK_SSP;
1395 break;
1396
1397 case SKL_DEVICE_DMIC:
1398 ret = NHLT_LINK_DMIC;
1399 break;
1400
1401 case SKL_DEVICE_I2S:
1402 ret = NHLT_LINK_SSP;
1403 break;
1404
1405 case SKL_DEVICE_HDALINK:
1406 ret = NHLT_LINK_HDA;
1407 break;
1408
1409 default:
1410 ret = NHLT_LINK_INVALID;
1411 break;
1412 }
1413
1414 return ret;
1415}
1416
1417/*
1418 * Fill the BE gateway parameters
1419 * The BE gateway expects a blob of parameters which are kept in the ACPI
1420 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1421 * The port can have multiple settings so pick based on the PCM
1422 * parameters
1423 */
1424static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1425 struct skl_module_cfg *mconfig,
1426 struct skl_pipe_params *params)
1427{
cfb0a873
VK
1428 struct nhlt_specific_cfg *cfg;
1429 struct skl *skl = get_skl_ctx(dai->dev);
1430 int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1431
8871dcb9 1432 skl_tplg_fill_dma_id(mconfig, params);
cfb0a873 1433
b30c275e
JK
1434 if (link_type == NHLT_LINK_HDA)
1435 return 0;
1436
cfb0a873
VK
1437 /* update the blob based on virtual bus_id*/
1438 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1439 params->s_fmt, params->ch,
1440 params->s_freq, params->stream);
1441 if (cfg) {
1442 mconfig->formats_config.caps_size = cfg->size;
bc03281a 1443 mconfig->formats_config.caps = (u32 *) &cfg->caps;
cfb0a873
VK
1444 } else {
1445 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1446 mconfig->vbus_id, link_type,
1447 params->stream);
1448 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1449 params->ch, params->s_freq, params->s_fmt);
1450 return -EINVAL;
1451 }
1452
1453 return 0;
1454}
1455
1456static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1457 struct snd_soc_dapm_widget *w,
1458 struct skl_pipe_params *params)
1459{
1460 struct snd_soc_dapm_path *p;
4d8adccb 1461 int ret = -EIO;
cfb0a873 1462
f0900eb2 1463 snd_soc_dapm_widget_for_each_source_path(w, p) {
cfb0a873
VK
1464 if (p->connect && is_skl_dsp_widget_type(p->source) &&
1465 p->source->priv) {
1466
9a03cb49
JK
1467 ret = skl_tplg_be_fill_pipe_params(dai,
1468 p->source->priv, params);
1469 if (ret < 0)
1470 return ret;
cfb0a873 1471 } else {
9a03cb49
JK
1472 ret = skl_tplg_be_set_src_pipe_params(dai,
1473 p->source, params);
4d8adccb
SP
1474 if (ret < 0)
1475 return ret;
cfb0a873
VK
1476 }
1477 }
1478
4d8adccb 1479 return ret;
cfb0a873
VK
1480}
1481
1482static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1483 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1484{
1485 struct snd_soc_dapm_path *p = NULL;
4d8adccb 1486 int ret = -EIO;
cfb0a873 1487
f0900eb2 1488 snd_soc_dapm_widget_for_each_sink_path(w, p) {
cfb0a873
VK
1489 if (p->connect && is_skl_dsp_widget_type(p->sink) &&
1490 p->sink->priv) {
1491
9a03cb49
JK
1492 ret = skl_tplg_be_fill_pipe_params(dai,
1493 p->sink->priv, params);
1494 if (ret < 0)
1495 return ret;
cfb0a873 1496 } else {
4d8adccb 1497 ret = skl_tplg_be_set_sink_pipe_params(
cfb0a873 1498 dai, p->sink, params);
4d8adccb
SP
1499 if (ret < 0)
1500 return ret;
cfb0a873
VK
1501 }
1502 }
1503
4d8adccb 1504 return ret;
cfb0a873
VK
1505}
1506
1507/*
1508 * BE hw_params can be a source parameters (capture) or sink parameters
1509 * (playback). Based on sink and source we need to either find the source
1510 * list or the sink list and set the pipeline parameters
1511 */
1512int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1513 struct skl_pipe_params *params)
1514{
1515 struct snd_soc_dapm_widget *w;
1516
1517 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1518 w = dai->playback_widget;
1519
1520 return skl_tplg_be_set_src_pipe_params(dai, w, params);
1521
1522 } else {
1523 w = dai->capture_widget;
1524
1525 return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1526 }
1527
1528 return 0;
1529}
3af36706
VK
1530
1531static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1532 {SKL_MIXER_EVENT, skl_tplg_mixer_event},
1533 {SKL_VMIXER_EVENT, skl_tplg_vmixer_event},
1534 {SKL_PGA_EVENT, skl_tplg_pga_event},
1535};
1536
140adfba
JK
1537static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1538 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1539 skl_tplg_tlv_control_set},
1540};
1541
6277e832
SN
1542static int skl_tplg_fill_pipe_tkn(struct device *dev,
1543 struct skl_pipe *pipe, u32 tkn,
1544 u32 tkn_val)
3af36706 1545{
3af36706 1546
6277e832
SN
1547 switch (tkn) {
1548 case SKL_TKN_U32_PIPE_CONN_TYPE:
1549 pipe->conn_type = tkn_val;
1550 break;
1551
1552 case SKL_TKN_U32_PIPE_PRIORITY:
1553 pipe->pipe_priority = tkn_val;
1554 break;
1555
1556 case SKL_TKN_U32_PIPE_MEM_PGS:
1557 pipe->memory_pages = tkn_val;
1558 break;
1559
8a0cb236
VK
1560 case SKL_TKN_U32_PMODE:
1561 pipe->lp_mode = tkn_val;
1562 break;
1563
6277e832
SN
1564 default:
1565 dev_err(dev, "Token not handled %d\n", tkn);
1566 return -EINVAL;
3af36706 1567 }
6277e832
SN
1568
1569 return 0;
3af36706
VK
1570}
1571
1572/*
6277e832
SN
1573 * Add pipeline by parsing the relevant tokens
1574 * Return an existing pipe if the pipe already exists.
3af36706 1575 */
6277e832
SN
1576static int skl_tplg_add_pipe(struct device *dev,
1577 struct skl_module_cfg *mconfig, struct skl *skl,
1578 struct snd_soc_tplg_vendor_value_elem *tkn_elem)
3af36706
VK
1579{
1580 struct skl_pipeline *ppl;
1581 struct skl_pipe *pipe;
1582 struct skl_pipe_params *params;
1583
1584 list_for_each_entry(ppl, &skl->ppl_list, node) {
6277e832
SN
1585 if (ppl->pipe->ppl_id == tkn_elem->value) {
1586 mconfig->pipe = ppl->pipe;
1587 return EEXIST;
1588 }
3af36706
VK
1589 }
1590
1591 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
1592 if (!ppl)
6277e832 1593 return -ENOMEM;
3af36706
VK
1594
1595 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
1596 if (!pipe)
6277e832 1597 return -ENOMEM;
3af36706
VK
1598
1599 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
1600 if (!params)
6277e832 1601 return -ENOMEM;
3af36706 1602
3af36706 1603 pipe->p_params = params;
6277e832 1604 pipe->ppl_id = tkn_elem->value;
3af36706
VK
1605 INIT_LIST_HEAD(&pipe->w_list);
1606
1607 ppl->pipe = pipe;
1608 list_add(&ppl->node, &skl->ppl_list);
1609
6277e832
SN
1610 mconfig->pipe = pipe;
1611 mconfig->pipe->state = SKL_PIPE_INVALID;
1612
1613 return 0;
1614}
1615
1616static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
1617 struct skl_module_pin *m_pin,
1618 int pin_index, u32 value)
1619{
1620 switch (tkn) {
1621 case SKL_TKN_U32_PIN_MOD_ID:
1622 m_pin[pin_index].id.module_id = value;
1623 break;
1624
1625 case SKL_TKN_U32_PIN_INST_ID:
1626 m_pin[pin_index].id.instance_id = value;
1627 break;
1628
1629 default:
1630 dev_err(dev, "%d Not a pin token\n", value);
1631 return -EINVAL;
1632 }
1633
1634 return 0;
1635}
1636
1637/*
1638 * Parse for pin config specific tokens to fill up the
1639 * module private data
1640 */
1641static int skl_tplg_fill_pins_info(struct device *dev,
1642 struct skl_module_cfg *mconfig,
1643 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1644 int dir, int pin_count)
1645{
1646 int ret;
1647 struct skl_module_pin *m_pin;
1648
1649 switch (dir) {
1650 case SKL_DIR_IN:
1651 m_pin = mconfig->m_in_pin;
1652 break;
1653
1654 case SKL_DIR_OUT:
1655 m_pin = mconfig->m_out_pin;
1656 break;
1657
1658 default:
ecd286a9 1659 dev_err(dev, "Invalid direction value\n");
6277e832
SN
1660 return -EINVAL;
1661 }
1662
1663 ret = skl_tplg_fill_pin(dev, tkn_elem->token,
1664 m_pin, pin_count, tkn_elem->value);
1665
1666 if (ret < 0)
1667 return ret;
1668
1669 m_pin[pin_count].in_use = false;
1670 m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
1671
1672 return 0;
3af36706
VK
1673}
1674
6277e832
SN
1675/*
1676 * Fill up input/output module config format based
1677 * on the direction
1678 */
1679static int skl_tplg_fill_fmt(struct device *dev,
1680 struct skl_module_cfg *mconfig, u32 tkn,
1681 u32 value, u32 dir, u32 pin_count)
1682{
1683 struct skl_module_fmt *dst_fmt;
1684
1685 switch (dir) {
1686 case SKL_DIR_IN:
1687 dst_fmt = mconfig->in_fmt;
1688 dst_fmt += pin_count;
1689 break;
1690
1691 case SKL_DIR_OUT:
1692 dst_fmt = mconfig->out_fmt;
1693 dst_fmt += pin_count;
1694 break;
1695
1696 default:
ecd286a9 1697 dev_err(dev, "Invalid direction value\n");
6277e832
SN
1698 return -EINVAL;
1699 }
1700
1701 switch (tkn) {
1702 case SKL_TKN_U32_FMT_CH:
1703 dst_fmt->channels = value;
1704 break;
1705
1706 case SKL_TKN_U32_FMT_FREQ:
1707 dst_fmt->s_freq = value;
1708 break;
1709
1710 case SKL_TKN_U32_FMT_BIT_DEPTH:
1711 dst_fmt->bit_depth = value;
1712 break;
1713
1714 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1715 dst_fmt->valid_bit_depth = value;
1716 break;
1717
1718 case SKL_TKN_U32_FMT_CH_CONFIG:
1719 dst_fmt->ch_cfg = value;
1720 break;
1721
1722 case SKL_TKN_U32_FMT_INTERLEAVE:
1723 dst_fmt->interleaving_style = value;
1724 break;
1725
1726 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1727 dst_fmt->sample_type = value;
1728 break;
1729
1730 case SKL_TKN_U32_FMT_CH_MAP:
1731 dst_fmt->ch_map = value;
1732 break;
1733
1734 default:
ecd286a9 1735 dev_err(dev, "Invalid token %d\n", tkn);
6277e832
SN
1736 return -EINVAL;
1737 }
1738
1739 return 0;
1740}
1741
1742static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
1743 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
1744{
1745 if (uuid_tkn->token == SKL_TKN_UUID)
1746 memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
1747 else {
ecd286a9 1748 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
6277e832
SN
1749 return -EINVAL;
1750 }
1751
1752 return 0;
1753}
1754
1755static void skl_tplg_fill_pin_dynamic_val(
1756 struct skl_module_pin *mpin, u32 pin_count, u32 value)
4cd9899f
HS
1757{
1758 int i;
1759
6277e832
SN
1760 for (i = 0; i < pin_count; i++)
1761 mpin[i].is_dynamic = value;
1762}
1763
1764/*
1765 * Parse tokens to fill up the module private data
1766 */
1767static int skl_tplg_get_token(struct device *dev,
1768 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1769 struct skl *skl, struct skl_module_cfg *mconfig)
1770{
1771 int tkn_count = 0;
1772 int ret;
1773 static int is_pipe_exists;
1774 static int pin_index, dir;
1775
1776 if (tkn_elem->token > SKL_TKN_MAX)
1777 return -EINVAL;
1778
1779 switch (tkn_elem->token) {
1780 case SKL_TKN_U8_IN_QUEUE_COUNT:
1781 mconfig->max_in_queue = tkn_elem->value;
1782 mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
1783 sizeof(*mconfig->m_in_pin),
1784 GFP_KERNEL);
1785 if (!mconfig->m_in_pin)
1786 return -ENOMEM;
1787
1788 break;
1789
1790 case SKL_TKN_U8_OUT_QUEUE_COUNT:
1791 mconfig->max_out_queue = tkn_elem->value;
1792 mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
1793 sizeof(*mconfig->m_out_pin),
1794 GFP_KERNEL);
1795
1796 if (!mconfig->m_out_pin)
1797 return -ENOMEM;
1798
1799 break;
1800
1801 case SKL_TKN_U8_DYN_IN_PIN:
1802 if (!mconfig->m_in_pin)
1803 return -ENOMEM;
1804
1805 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
1806 mconfig->max_in_queue, tkn_elem->value);
1807
1808 break;
1809
1810 case SKL_TKN_U8_DYN_OUT_PIN:
1811 if (!mconfig->m_out_pin)
1812 return -ENOMEM;
1813
1814 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
1815 mconfig->max_out_queue, tkn_elem->value);
1816
1817 break;
1818
1819 case SKL_TKN_U8_TIME_SLOT:
1820 mconfig->time_slot = tkn_elem->value;
1821 break;
1822
1823 case SKL_TKN_U8_CORE_ID:
1824 mconfig->core_id = tkn_elem->value;
1825
1826 case SKL_TKN_U8_MOD_TYPE:
1827 mconfig->m_type = tkn_elem->value;
1828 break;
1829
1830 case SKL_TKN_U8_DEV_TYPE:
1831 mconfig->dev_type = tkn_elem->value;
1832 break;
1833
1834 case SKL_TKN_U8_HW_CONN_TYPE:
1835 mconfig->hw_conn_type = tkn_elem->value;
1836 break;
1837
1838 case SKL_TKN_U16_MOD_INST_ID:
1839 mconfig->id.instance_id =
1840 tkn_elem->value;
1841 break;
1842
1843 case SKL_TKN_U32_MEM_PAGES:
1844 mconfig->mem_pages = tkn_elem->value;
1845 break;
1846
1847 case SKL_TKN_U32_MAX_MCPS:
1848 mconfig->mcps = tkn_elem->value;
1849 break;
1850
1851 case SKL_TKN_U32_OBS:
1852 mconfig->obs = tkn_elem->value;
1853 break;
1854
1855 case SKL_TKN_U32_IBS:
1856 mconfig->ibs = tkn_elem->value;
1857 break;
1858
1859 case SKL_TKN_U32_VBUS_ID:
1860 mconfig->vbus_id = tkn_elem->value;
1861 break;
1862
1863 case SKL_TKN_U32_PARAMS_FIXUP:
1864 mconfig->params_fixup = tkn_elem->value;
1865 break;
1866
1867 case SKL_TKN_U32_CONVERTER:
1868 mconfig->converter = tkn_elem->value;
1869 break;
1870
6bd9dcf3
VK
1871 case SKL_TKL_U32_D0I3_CAPS:
1872 mconfig->d0i3_caps = tkn_elem->value;
1873 break;
1874
6277e832
SN
1875 case SKL_TKN_U32_PIPE_ID:
1876 ret = skl_tplg_add_pipe(dev,
1877 mconfig, skl, tkn_elem);
1878
1879 if (ret < 0)
1880 return is_pipe_exists;
1881
1882 if (ret == EEXIST)
1883 is_pipe_exists = 1;
1884
1885 break;
1886
1887 case SKL_TKN_U32_PIPE_CONN_TYPE:
1888 case SKL_TKN_U32_PIPE_PRIORITY:
1889 case SKL_TKN_U32_PIPE_MEM_PGS:
8a0cb236 1890 case SKL_TKN_U32_PMODE:
6277e832
SN
1891 if (is_pipe_exists) {
1892 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
1893 tkn_elem->token, tkn_elem->value);
1894 if (ret < 0)
1895 return ret;
1896 }
1897
1898 break;
1899
1900 /*
1901 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
1902 * direction and the pin count. The first four bits represent
1903 * direction and next four the pin count.
1904 */
1905 case SKL_TKN_U32_DIR_PIN_COUNT:
1906 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
1907 pin_index = (tkn_elem->value &
1908 SKL_PIN_COUNT_MASK) >> 4;
1909
1910 break;
1911
1912 case SKL_TKN_U32_FMT_CH:
1913 case SKL_TKN_U32_FMT_FREQ:
1914 case SKL_TKN_U32_FMT_BIT_DEPTH:
1915 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1916 case SKL_TKN_U32_FMT_CH_CONFIG:
1917 case SKL_TKN_U32_FMT_INTERLEAVE:
1918 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1919 case SKL_TKN_U32_FMT_CH_MAP:
1920 ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token,
1921 tkn_elem->value, dir, pin_index);
1922
1923 if (ret < 0)
1924 return ret;
1925
1926 break;
1927
1928 case SKL_TKN_U32_PIN_MOD_ID:
1929 case SKL_TKN_U32_PIN_INST_ID:
1930 ret = skl_tplg_fill_pins_info(dev,
1931 mconfig, tkn_elem, dir,
1932 pin_index);
1933 if (ret < 0)
1934 return ret;
1935
1936 break;
1937
1938 case SKL_TKN_U32_CAPS_SIZE:
1939 mconfig->formats_config.caps_size =
1940 tkn_elem->value;
1941
1942 break;
1943
1944 case SKL_TKN_U32_PROC_DOMAIN:
1945 mconfig->domain =
1946 tkn_elem->value;
1947
1948 break;
1949
1950 case SKL_TKN_U8_IN_PIN_TYPE:
1951 case SKL_TKN_U8_OUT_PIN_TYPE:
1952 case SKL_TKN_U8_CONN_TYPE:
1953 break;
1954
1955 default:
1956 dev_err(dev, "Token %d not handled\n",
1957 tkn_elem->token);
1958 return -EINVAL;
4cd9899f 1959 }
6277e832
SN
1960
1961 tkn_count++;
1962
1963 return tkn_count;
1964}
1965
1966/*
1967 * Parse the vendor array for specific tokens to construct
1968 * module private data
1969 */
1970static int skl_tplg_get_tokens(struct device *dev,
1971 char *pvt_data, struct skl *skl,
1972 struct skl_module_cfg *mconfig, int block_size)
1973{
1974 struct snd_soc_tplg_vendor_array *array;
1975 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
1976 int tkn_count = 0, ret;
1977 int off = 0, tuple_size = 0;
1978
1979 if (block_size <= 0)
1980 return -EINVAL;
1981
1982 while (tuple_size < block_size) {
1983 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
1984
1985 off += array->size;
1986
1987 switch (array->type) {
1988 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
ecd286a9 1989 dev_warn(dev, "no string tokens expected for skl tplg\n");
6277e832
SN
1990 continue;
1991
1992 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
1993 ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
1994 if (ret < 0)
1995 return ret;
1996
1997 tuple_size += sizeof(*array->uuid);
1998
1999 continue;
2000
2001 default:
2002 tkn_elem = array->value;
2003 tkn_count = 0;
2004 break;
2005 }
2006
2007 while (tkn_count <= (array->num_elems - 1)) {
2008 ret = skl_tplg_get_token(dev, tkn_elem,
2009 skl, mconfig);
2010
2011 if (ret < 0)
2012 return ret;
2013
2014 tkn_count = tkn_count + ret;
2015 tkn_elem++;
2016 }
2017
2018 tuple_size += tkn_count * sizeof(*tkn_elem);
2019 }
2020
2021 return 0;
2022}
2023
2024/*
2025 * Every data block is preceded by a descriptor to read the number
2026 * of data blocks, they type of the block and it's size
2027 */
2028static int skl_tplg_get_desc_blocks(struct device *dev,
2029 struct snd_soc_tplg_vendor_array *array)
2030{
2031 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2032
2033 tkn_elem = array->value;
2034
2035 switch (tkn_elem->token) {
2036 case SKL_TKN_U8_NUM_BLOCKS:
2037 case SKL_TKN_U8_BLOCK_TYPE:
2038 case SKL_TKN_U16_BLOCK_SIZE:
2039 return tkn_elem->value;
2040
2041 default:
ecd286a9 2042 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
6277e832
SN
2043 break;
2044 }
2045
2046 return -EINVAL;
2047}
2048
2049/*
2050 * Parse the private data for the token and corresponding value.
2051 * The private data can have multiple data blocks. So, a data block
2052 * is preceded by a descriptor for number of blocks and a descriptor
2053 * for the type and size of the suceeding data block.
2054 */
2055static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2056 struct skl *skl, struct device *dev,
2057 struct skl_module_cfg *mconfig)
2058{
2059 struct snd_soc_tplg_vendor_array *array;
2060 int num_blocks, block_size = 0, block_type, off = 0;
2061 char *data;
2062 int ret;
2063
2064 /* Read the NUM_DATA_BLOCKS descriptor */
2065 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2066 ret = skl_tplg_get_desc_blocks(dev, array);
2067 if (ret < 0)
2068 return ret;
2069 num_blocks = ret;
2070
2071 off += array->size;
2072 array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
2073
2074 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2075 while (num_blocks > 0) {
2076 ret = skl_tplg_get_desc_blocks(dev, array);
2077
2078 if (ret < 0)
2079 return ret;
2080 block_type = ret;
2081 off += array->size;
2082
2083 array = (struct snd_soc_tplg_vendor_array *)
2084 (tplg_w->priv.data + off);
2085
2086 ret = skl_tplg_get_desc_blocks(dev, array);
2087
2088 if (ret < 0)
2089 return ret;
2090 block_size = ret;
2091 off += array->size;
2092
2093 array = (struct snd_soc_tplg_vendor_array *)
2094 (tplg_w->priv.data + off);
2095
2096 data = (tplg_w->priv.data + off);
2097
2098 if (block_type == SKL_TYPE_TUPLE) {
2099 ret = skl_tplg_get_tokens(dev, data,
2100 skl, mconfig, block_size);
2101
2102 if (ret < 0)
2103 return ret;
2104
2105 --num_blocks;
2106 } else {
2107 if (mconfig->formats_config.caps_size > 0)
2108 memcpy(mconfig->formats_config.caps, data,
2109 mconfig->formats_config.caps_size);
2110 --num_blocks;
2111 }
2112 }
2113
2114 return 0;
4cd9899f
HS
2115}
2116
fe3f4442
D
2117static void skl_clear_pin_config(struct snd_soc_platform *platform,
2118 struct snd_soc_dapm_widget *w)
2119{
2120 int i;
2121 struct skl_module_cfg *mconfig;
2122 struct skl_pipe *pipe;
2123
2124 if (!strncmp(w->dapm->component->name, platform->component.name,
2125 strlen(platform->component.name))) {
2126 mconfig = w->priv;
2127 pipe = mconfig->pipe;
2128 for (i = 0; i < mconfig->max_in_queue; i++) {
2129 mconfig->m_in_pin[i].in_use = false;
2130 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2131 }
2132 for (i = 0; i < mconfig->max_out_queue; i++) {
2133 mconfig->m_out_pin[i].in_use = false;
2134 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2135 }
2136 pipe->state = SKL_PIPE_INVALID;
2137 mconfig->m_state = SKL_MODULE_UNINIT;
2138 }
2139}
2140
2141void skl_cleanup_resources(struct skl *skl)
2142{
2143 struct skl_sst *ctx = skl->skl_sst;
2144 struct snd_soc_platform *soc_platform = skl->platform;
2145 struct snd_soc_dapm_widget *w;
2146 struct snd_soc_card *card;
2147
2148 if (soc_platform == NULL)
2149 return;
2150
2151 card = soc_platform->component.card;
2152 if (!card || !card->instantiated)
2153 return;
2154
2155 skl->resource.mem = 0;
2156 skl->resource.mcps = 0;
2157
2158 list_for_each_entry(w, &card->widgets, list) {
2159 if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
2160 skl_clear_pin_config(soc_platform, w);
2161 }
2162
2163 skl_clear_module_cnt(ctx->dsp);
2164}
2165
3af36706
VK
2166/*
2167 * Topology core widget load callback
2168 *
2169 * This is used to save the private data for each widget which gives
2170 * information to the driver about module and pipeline parameters which DSP
2171 * FW expects like ids, resource values, formats etc
2172 */
2173static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
b663a8c5
JK
2174 struct snd_soc_dapm_widget *w,
2175 struct snd_soc_tplg_dapm_widget *tplg_w)
3af36706
VK
2176{
2177 int ret;
2178 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2179 struct skl *skl = ebus_to_skl(ebus);
2180 struct hdac_bus *bus = ebus_to_hbus(ebus);
2181 struct skl_module_cfg *mconfig;
3af36706
VK
2182
2183 if (!tplg_w->priv.size)
2184 goto bind_event;
2185
2186 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
2187
2188 if (!mconfig)
2189 return -ENOMEM;
2190
2191 w->priv = mconfig;
09305da9 2192
b7c50555
VK
2193 /*
2194 * module binary can be loaded later, so set it to query when
2195 * module is load for a use case
2196 */
2197 mconfig->id.module_id = -1;
3af36706 2198
6277e832
SN
2199 /* Parse private data for tuples */
2200 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
2201 if (ret < 0)
2202 return ret;
3af36706
VK
2203bind_event:
2204 if (tplg_w->event_type == 0) {
3373f716 2205 dev_dbg(bus->dev, "ASoC: No event handler required\n");
3af36706
VK
2206 return 0;
2207 }
2208
2209 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
b663a8c5
JK
2210 ARRAY_SIZE(skl_tplg_widget_ops),
2211 tplg_w->event_type);
3af36706
VK
2212
2213 if (ret) {
2214 dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
2215 __func__, tplg_w->event_type);
2216 return -EINVAL;
2217 }
2218
2219 return 0;
2220}
2221
140adfba
JK
2222static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
2223 struct snd_soc_tplg_bytes_control *bc)
2224{
2225 struct skl_algo_data *ac;
2226 struct skl_dfw_algo_data *dfw_ac =
2227 (struct skl_dfw_algo_data *)bc->priv.data;
2228
2229 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
2230 if (!ac)
2231 return -ENOMEM;
2232
2233 /* Fill private data */
2234 ac->max = dfw_ac->max;
2235 ac->param_id = dfw_ac->param_id;
2236 ac->set_params = dfw_ac->set_params;
0d682104 2237 ac->size = dfw_ac->max;
140adfba
JK
2238
2239 if (ac->max) {
2240 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
2241 if (!ac->params)
2242 return -ENOMEM;
2243
edd7ea2d 2244 memcpy(ac->params, dfw_ac->params, ac->max);
140adfba
JK
2245 }
2246
2247 be->dobj.private = ac;
2248 return 0;
2249}
2250
2251static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
2252 struct snd_kcontrol_new *kctl,
2253 struct snd_soc_tplg_ctl_hdr *hdr)
2254{
2255 struct soc_bytes_ext *sb;
2256 struct snd_soc_tplg_bytes_control *tplg_bc;
2257 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2258 struct hdac_bus *bus = ebus_to_hbus(ebus);
2259
2260 switch (hdr->ops.info) {
2261 case SND_SOC_TPLG_CTL_BYTES:
2262 tplg_bc = container_of(hdr,
2263 struct snd_soc_tplg_bytes_control, hdr);
2264 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
2265 sb = (struct soc_bytes_ext *)kctl->private_value;
2266 if (tplg_bc->priv.size)
2267 return skl_init_algo_data(
2268 bus->dev, sb, tplg_bc);
2269 }
2270 break;
2271
2272 default:
2273 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
2274 hdr->ops.get, hdr->ops.put, hdr->ops.info);
2275 break;
2276 }
2277
2278 return 0;
2279}
2280
541070ce
SN
2281static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
2282 struct snd_soc_tplg_vendor_string_elem *str_elem,
2283 struct skl_dfw_manifest *minfo)
2284{
2285 int tkn_count = 0;
2286 static int ref_count;
2287
2288 switch (str_elem->token) {
2289 case SKL_TKN_STR_LIB_NAME:
2290 if (ref_count > minfo->lib_count - 1) {
2291 ref_count = 0;
2292 return -EINVAL;
2293 }
2294
2295 strncpy(minfo->lib[ref_count].name, str_elem->string,
2296 ARRAY_SIZE(minfo->lib[ref_count].name));
2297 ref_count++;
2298 tkn_count++;
2299 break;
2300
2301 default:
ecd286a9 2302 dev_err(dev, "Not a string token %d\n", str_elem->token);
541070ce
SN
2303 break;
2304 }
2305
2306 return tkn_count;
2307}
2308
2309static int skl_tplg_get_str_tkn(struct device *dev,
2310 struct snd_soc_tplg_vendor_array *array,
2311 struct skl_dfw_manifest *minfo)
2312{
2313 int tkn_count = 0, ret;
2314 struct snd_soc_tplg_vendor_string_elem *str_elem;
2315
2316 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
2317 while (tkn_count < array->num_elems) {
2318 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, minfo);
2319 str_elem++;
2320
2321 if (ret < 0)
2322 return ret;
2323
2324 tkn_count = tkn_count + ret;
2325 }
2326
2327 return tkn_count;
2328}
2329
2330static int skl_tplg_get_int_tkn(struct device *dev,
2331 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2332 struct skl_dfw_manifest *minfo)
2333{
2334 int tkn_count = 0;
2335
2336 switch (tkn_elem->token) {
2337 case SKL_TKN_U32_LIB_COUNT:
2338 minfo->lib_count = tkn_elem->value;
2339 tkn_count++;
2340 break;
2341
2342 default:
ecd286a9 2343 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
541070ce
SN
2344 return -EINVAL;
2345 }
2346
2347 return tkn_count;
2348}
2349
2350/*
2351 * Fill the manifest structure by parsing the tokens based on the
2352 * type.
2353 */
2354static int skl_tplg_get_manifest_tkn(struct device *dev,
2355 char *pvt_data, struct skl_dfw_manifest *minfo,
2356 int block_size)
2357{
2358 int tkn_count = 0, ret;
2359 int off = 0, tuple_size = 0;
2360 struct snd_soc_tplg_vendor_array *array;
2361 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2362
2363 if (block_size <= 0)
2364 return -EINVAL;
2365
2366 while (tuple_size < block_size) {
2367 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2368 off += array->size;
2369 switch (array->type) {
2370 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2371 ret = skl_tplg_get_str_tkn(dev, array, minfo);
2372
2373 if (ret < 0)
2374 return ret;
2375 tkn_count += ret;
2376
2377 tuple_size += tkn_count *
2378 sizeof(struct snd_soc_tplg_vendor_string_elem);
2379 continue;
2380
2381 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
ecd286a9 2382 dev_warn(dev, "no uuid tokens for skl tplf manifest\n");
541070ce
SN
2383 continue;
2384
2385 default:
2386 tkn_elem = array->value;
2387 tkn_count = 0;
2388 break;
2389 }
2390
2391 while (tkn_count <= array->num_elems - 1) {
2392 ret = skl_tplg_get_int_tkn(dev,
2393 tkn_elem, minfo);
2394 if (ret < 0)
2395 return ret;
2396
2397 tkn_count = tkn_count + ret;
2398 tkn_elem++;
2399 tuple_size += tkn_count *
2400 sizeof(struct snd_soc_tplg_vendor_value_elem);
2401 break;
2402 }
2403 tkn_count = 0;
2404 }
2405
2406 return 0;
2407}
2408
2409/*
2410 * Parse manifest private data for tokens. The private data block is
2411 * preceded by descriptors for type and size of data block.
2412 */
2413static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
2414 struct device *dev, struct skl_dfw_manifest *minfo)
2415{
2416 struct snd_soc_tplg_vendor_array *array;
2417 int num_blocks, block_size = 0, block_type, off = 0;
2418 char *data;
2419 int ret;
2420
2421 /* Read the NUM_DATA_BLOCKS descriptor */
2422 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
2423 ret = skl_tplg_get_desc_blocks(dev, array);
2424 if (ret < 0)
2425 return ret;
2426 num_blocks = ret;
2427
2428 off += array->size;
2429 array = (struct snd_soc_tplg_vendor_array *)
2430 (manifest->priv.data + off);
2431
2432 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2433 while (num_blocks > 0) {
2434 ret = skl_tplg_get_desc_blocks(dev, array);
2435
2436 if (ret < 0)
2437 return ret;
2438 block_type = ret;
2439 off += array->size;
2440
2441 array = (struct snd_soc_tplg_vendor_array *)
2442 (manifest->priv.data + off);
2443
2444 ret = skl_tplg_get_desc_blocks(dev, array);
2445
2446 if (ret < 0)
2447 return ret;
2448 block_size = ret;
2449 off += array->size;
2450
2451 array = (struct snd_soc_tplg_vendor_array *)
2452 (manifest->priv.data + off);
2453
2454 data = (manifest->priv.data + off);
2455
2456 if (block_type == SKL_TYPE_TUPLE) {
2457 ret = skl_tplg_get_manifest_tkn(dev, data, minfo,
2458 block_size);
2459
2460 if (ret < 0)
2461 return ret;
2462
2463 --num_blocks;
2464 } else {
2465 return -EINVAL;
2466 }
2467 }
2468
2469 return 0;
2470}
2471
15ecaba9
K
2472static int skl_manifest_load(struct snd_soc_component *cmpnt,
2473 struct snd_soc_tplg_manifest *manifest)
2474{
2475 struct skl_dfw_manifest *minfo;
2476 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2477 struct hdac_bus *bus = ebus_to_hbus(ebus);
2478 struct skl *skl = ebus_to_skl(ebus);
2479 int ret = 0;
2480
c15ad605
VK
2481 /* proceed only if we have private data defined */
2482 if (manifest->priv.size == 0)
2483 return 0;
2484
15ecaba9 2485 minfo = &skl->skl_sst->manifest;
541070ce
SN
2486
2487 skl_tplg_get_manifest_data(manifest, bus->dev, minfo);
15ecaba9
K
2488
2489 if (minfo->lib_count > HDA_MAX_LIB) {
2490 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
2491 minfo->lib_count);
2492 ret = -EINVAL;
2493 }
2494
2495 return ret;
2496}
2497
3af36706
VK
2498static struct snd_soc_tplg_ops skl_tplg_ops = {
2499 .widget_load = skl_tplg_widget_load,
140adfba
JK
2500 .control_load = skl_tplg_control_load,
2501 .bytes_ext_ops = skl_tlv_ops,
2502 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
15ecaba9 2503 .manifest = skl_manifest_load,
3af36706
VK
2504};
2505
287af4f9
JK
2506/*
2507 * A pipe can have multiple modules, each of them will be a DAPM widget as
2508 * well. While managing a pipeline we need to get the list of all the
2509 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
2510 * helps to get the SKL type widgets in that pipeline
2511 */
2512static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
2513{
2514 struct snd_soc_dapm_widget *w;
2515 struct skl_module_cfg *mcfg = NULL;
2516 struct skl_pipe_module *p_module = NULL;
2517 struct skl_pipe *pipe;
2518
2519 list_for_each_entry(w, &platform->component.card->widgets, list) {
2520 if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
2521 mcfg = w->priv;
2522 pipe = mcfg->pipe;
2523
2524 p_module = devm_kzalloc(platform->dev,
2525 sizeof(*p_module), GFP_KERNEL);
2526 if (!p_module)
2527 return -ENOMEM;
2528
2529 p_module->w = w;
2530 list_add_tail(&p_module->node, &pipe->w_list);
2531 }
2532 }
2533
2534 return 0;
2535}
2536
f0aa94fa
JK
2537static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
2538{
2539 struct skl_pipe_module *w_module;
2540 struct snd_soc_dapm_widget *w;
2541 struct skl_module_cfg *mconfig;
2542 bool host_found = false, link_found = false;
2543
2544 list_for_each_entry(w_module, &pipe->w_list, node) {
2545 w = w_module->w;
2546 mconfig = w->priv;
2547
2548 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
2549 host_found = true;
2550 else if (mconfig->dev_type != SKL_DEVICE_NONE)
2551 link_found = true;
2552 }
2553
2554 if (host_found && link_found)
2555 pipe->passthru = true;
2556 else
2557 pipe->passthru = false;
2558}
2559
3af36706
VK
2560/* This will be read from topology manifest, currently defined here */
2561#define SKL_MAX_MCPS 30000000
2562#define SKL_FW_MAX_MEM 1000000
2563
2564/*
2565 * SKL topology init routine
2566 */
2567int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
2568{
2569 int ret;
2570 const struct firmware *fw;
2571 struct hdac_bus *bus = ebus_to_hbus(ebus);
2572 struct skl *skl = ebus_to_skl(ebus);
f0aa94fa 2573 struct skl_pipeline *ppl;
3af36706 2574
4b235c43 2575 ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3af36706 2576 if (ret < 0) {
b663a8c5 2577 dev_err(bus->dev, "tplg fw %s load failed with %d\n",
4b235c43
VK
2578 skl->tplg_name, ret);
2579 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
2580 if (ret < 0) {
2581 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
2582 "dfw_sst.bin", ret);
2583 return ret;
2584 }
3af36706
VK
2585 }
2586
2587 /*
2588 * The complete tplg for SKL is loaded as index 0, we don't use
2589 * any other index
2590 */
b663a8c5
JK
2591 ret = snd_soc_tplg_component_load(&platform->component,
2592 &skl_tplg_ops, fw, 0);
3af36706
VK
2593 if (ret < 0) {
2594 dev_err(bus->dev, "tplg component load failed%d\n", ret);
c14a82c7 2595 release_firmware(fw);
3af36706
VK
2596 return -EINVAL;
2597 }
2598
2599 skl->resource.max_mcps = SKL_MAX_MCPS;
2600 skl->resource.max_mem = SKL_FW_MAX_MEM;
2601
d8018361 2602 skl->tplg = fw;
287af4f9
JK
2603 ret = skl_tplg_create_pipe_widget_list(platform);
2604 if (ret < 0)
2605 return ret;
d8018361 2606
f0aa94fa
JK
2607 list_for_each_entry(ppl, &skl->ppl_list, node)
2608 skl_tplg_set_pipe_type(skl, ppl->pipe);
d8018361 2609
3af36706
VK
2610 return 0;
2611}