]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - sound/soc/intel/skylake/skl-topology.c
ASoC: Intel: Skylake: Rearrangement of code to cleanup SKL SST library
[mirror_ubuntu-eoan-kernel.git] / sound / soc / intel / skylake / skl-topology.c
CommitLineData
e4e2d2f4
JK
1/*
2 * skl-topology.c - Implements Platform component ALSA controls/widget
3 * handlers.
4 *
5 * Copyright (C) 2014-2015 Intel Corp
6 * Author: Jeeja KP <jeeja.kp@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/firmware.h>
22#include <sound/soc.h>
23#include <sound/soc-topology.h>
6277e832 24#include <uapi/sound/snd_sst_tokens.h>
e4e2d2f4
JK
25#include "skl-sst-dsp.h"
26#include "skl-sst-ipc.h"
27#include "skl-topology.h"
28#include "skl.h"
29#include "skl-tplg-interface.h"
6c5768b3
D
30#include "../common/sst-dsp.h"
31#include "../common/sst-dsp-priv.h"
e4e2d2f4 32
f7590d4f
JK
33#define SKL_CH_FIXUP_MASK (1 << 0)
34#define SKL_RATE_FIXUP_MASK (1 << 1)
35#define SKL_FMT_FIXUP_MASK (1 << 2)
6277e832
SN
36#define SKL_IN_DIR_BIT_MASK BIT(0)
37#define SKL_PIN_COUNT_MASK GENMASK(7, 4)
f7590d4f 38
a83e3b4c
VK
39void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
40{
41 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
42
43 switch (caps) {
44 case SKL_D0I3_NONE:
45 d0i3->non_d0i3++;
46 break;
47
48 case SKL_D0I3_STREAMING:
49 d0i3->streaming++;
50 break;
51
52 case SKL_D0I3_NON_STREAMING:
53 d0i3->non_streaming++;
54 break;
55 }
56}
57
58void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
59{
60 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
61
62 switch (caps) {
63 case SKL_D0I3_NONE:
64 d0i3->non_d0i3--;
65 break;
66
67 case SKL_D0I3_STREAMING:
68 d0i3->streaming--;
69 break;
70
71 case SKL_D0I3_NON_STREAMING:
72 d0i3->non_streaming--;
73 break;
74 }
75}
76
e4e2d2f4
JK
77/*
78 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
79 * ignore. This helpers checks if the SKL driver handles this widget type
80 */
81static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
82{
83 switch (w->id) {
84 case snd_soc_dapm_dai_link:
85 case snd_soc_dapm_dai_in:
86 case snd_soc_dapm_aif_in:
87 case snd_soc_dapm_aif_out:
88 case snd_soc_dapm_dai_out:
89 case snd_soc_dapm_switch:
90 return false;
91 default:
92 return true;
93 }
94}
95
96/*
97 * Each pipelines needs memory to be allocated. Check if we have free memory
9ba8ffef 98 * from available pool.
e4e2d2f4 99 */
9ba8ffef 100static bool skl_is_pipe_mem_avail(struct skl *skl,
e4e2d2f4
JK
101 struct skl_module_cfg *mconfig)
102{
103 struct skl_sst *ctx = skl->skl_sst;
104
105 if (skl->resource.mem + mconfig->pipe->memory_pages >
106 skl->resource.max_mem) {
107 dev_err(ctx->dev,
108 "%s: module_id %d instance %d\n", __func__,
109 mconfig->id.module_id,
110 mconfig->id.instance_id);
111 dev_err(ctx->dev,
112 "exceeds ppl memory available %d mem %d\n",
113 skl->resource.max_mem, skl->resource.mem);
114 return false;
9ba8ffef
D
115 } else {
116 return true;
e4e2d2f4 117 }
9ba8ffef 118}
e4e2d2f4 119
9ba8ffef
D
120/*
121 * Add the mem to the mem pool. This is freed when pipe is deleted.
122 * Note: DSP does actual memory management we only keep track for complete
123 * pool
124 */
125static void skl_tplg_alloc_pipe_mem(struct skl *skl,
126 struct skl_module_cfg *mconfig)
127{
e4e2d2f4 128 skl->resource.mem += mconfig->pipe->memory_pages;
e4e2d2f4
JK
129}
130
131/*
132 * Pipeline needs needs DSP CPU resources for computation, this is
133 * quantified in MCPS (Million Clocks Per Second) required for module/pipe
134 *
135 * Each pipelines needs mcps to be allocated. Check if we have mcps for this
9ba8ffef 136 * pipe.
e4e2d2f4 137 */
9ba8ffef
D
138
139static bool skl_is_pipe_mcps_avail(struct skl *skl,
e4e2d2f4
JK
140 struct skl_module_cfg *mconfig)
141{
142 struct skl_sst *ctx = skl->skl_sst;
143
144 if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) {
145 dev_err(ctx->dev,
146 "%s: module_id %d instance %d\n", __func__,
147 mconfig->id.module_id, mconfig->id.instance_id);
148 dev_err(ctx->dev,
7ca42f5a 149 "exceeds ppl mcps available %d > mem %d\n",
e4e2d2f4
JK
150 skl->resource.max_mcps, skl->resource.mcps);
151 return false;
9ba8ffef
D
152 } else {
153 return true;
e4e2d2f4 154 }
9ba8ffef 155}
e4e2d2f4 156
9ba8ffef
D
157static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
158 struct skl_module_cfg *mconfig)
159{
e4e2d2f4 160 skl->resource.mcps += mconfig->mcps;
e4e2d2f4
JK
161}
162
163/*
164 * Free the mcps when tearing down
165 */
166static void
167skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
168{
169 skl->resource.mcps -= mconfig->mcps;
170}
171
172/*
173 * Free the memory when tearing down
174 */
175static void
176skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
177{
178 skl->resource.mem -= mconfig->pipe->memory_pages;
179}
180
f7590d4f
JK
181
182static void skl_dump_mconfig(struct skl_sst *ctx,
183 struct skl_module_cfg *mcfg)
184{
185 dev_dbg(ctx->dev, "Dumping config\n");
186 dev_dbg(ctx->dev, "Input Format:\n");
4cd9899f
HS
187 dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels);
188 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq);
189 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg);
190 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth);
f7590d4f 191 dev_dbg(ctx->dev, "Output Format:\n");
4cd9899f
HS
192 dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels);
193 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq);
194 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth);
195 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
f7590d4f
JK
196}
197
ea5a137d
SP
198static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
199{
200 int slot_map = 0xFFFFFFFF;
201 int start_slot = 0;
202 int i;
203
204 for (i = 0; i < chs; i++) {
205 /*
206 * For 2 channels with starting slot as 0, slot map will
207 * look like 0xFFFFFF10.
208 */
209 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
210 start_slot++;
211 }
212 fmt->ch_map = slot_map;
213}
214
f7590d4f
JK
215static void skl_tplg_update_params(struct skl_module_fmt *fmt,
216 struct skl_pipe_params *params, int fixup)
217{
218 if (fixup & SKL_RATE_FIXUP_MASK)
219 fmt->s_freq = params->s_freq;
ea5a137d 220 if (fixup & SKL_CH_FIXUP_MASK) {
f7590d4f 221 fmt->channels = params->ch;
ea5a137d
SP
222 skl_tplg_update_chmap(fmt, fmt->channels);
223 }
98256f83
JK
224 if (fixup & SKL_FMT_FIXUP_MASK) {
225 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
226
227 /*
228 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
229 * container so update bit depth accordingly
230 */
231 switch (fmt->valid_bit_depth) {
232 case SKL_DEPTH_16BIT:
233 fmt->bit_depth = fmt->valid_bit_depth;
234 break;
235
236 default:
237 fmt->bit_depth = SKL_DEPTH_32BIT;
238 break;
239 }
240 }
241
f7590d4f
JK
242}
243
244/*
245 * A pipeline may have modules which impact the pcm parameters, like SRC,
246 * channel converter, format converter.
247 * We need to calculate the output params by applying the 'fixup'
248 * Topology will tell driver which type of fixup is to be applied by
249 * supplying the fixup mask, so based on that we calculate the output
250 *
251 * Now In FE the pcm hw_params is source/target format. Same is applicable
252 * for BE with its hw_params invoked.
253 * here based on FE, BE pipeline and direction we calculate the input and
254 * outfix and then apply that for a module
255 */
256static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
257 struct skl_pipe_params *params, bool is_fe)
258{
259 int in_fixup, out_fixup;
260 struct skl_module_fmt *in_fmt, *out_fmt;
261
4cd9899f
HS
262 /* Fixups will be applied to pin 0 only */
263 in_fmt = &m_cfg->in_fmt[0];
264 out_fmt = &m_cfg->out_fmt[0];
f7590d4f
JK
265
266 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
267 if (is_fe) {
268 in_fixup = m_cfg->params_fixup;
269 out_fixup = (~m_cfg->converter) &
270 m_cfg->params_fixup;
271 } else {
272 out_fixup = m_cfg->params_fixup;
273 in_fixup = (~m_cfg->converter) &
274 m_cfg->params_fixup;
275 }
276 } else {
277 if (is_fe) {
278 out_fixup = m_cfg->params_fixup;
279 in_fixup = (~m_cfg->converter) &
280 m_cfg->params_fixup;
281 } else {
282 in_fixup = m_cfg->params_fixup;
283 out_fixup = (~m_cfg->converter) &
284 m_cfg->params_fixup;
285 }
286 }
287
288 skl_tplg_update_params(in_fmt, params, in_fixup);
289 skl_tplg_update_params(out_fmt, params, out_fixup);
290}
291
292/*
293 * A module needs input and output buffers, which are dependent upon pcm
294 * params, so once we have calculate params, we need buffer calculation as
295 * well.
296 */
297static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
298 struct skl_module_cfg *mcfg)
299{
300 int multiplier = 1;
4cd9899f 301 struct skl_module_fmt *in_fmt, *out_fmt;
4cd9899f
HS
302
303 /* Since fixups is applied to pin 0 only, ibs, obs needs
304 * change for pin 0 only
305 */
306 in_fmt = &mcfg->in_fmt[0];
307 out_fmt = &mcfg->out_fmt[0];
f7590d4f
JK
308
309 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
310 multiplier = 5;
f0c8e1d9 311
8e15e762 312 mcfg->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
998d6fb5 313 in_fmt->channels * (in_fmt->bit_depth >> 3) *
f0c8e1d9
SP
314 multiplier;
315
998d6fb5
TS
316 mcfg->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
317 out_fmt->channels * (out_fmt->bit_depth >> 3) *
f0c8e1d9 318 multiplier;
f7590d4f
JK
319}
320
db2f586b
SV
321static u8 skl_tplg_be_dev_type(int dev_type)
322{
323 int ret;
324
325 switch (dev_type) {
326 case SKL_DEVICE_BT:
327 ret = NHLT_DEVICE_BT;
328 break;
329
330 case SKL_DEVICE_DMIC:
331 ret = NHLT_DEVICE_DMIC;
332 break;
333
334 case SKL_DEVICE_I2S:
335 ret = NHLT_DEVICE_I2S;
336 break;
337
338 default:
339 ret = NHLT_DEVICE_INVALID;
340 break;
341 }
342
343 return ret;
344}
345
2d1419a3
JK
346static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
347 struct skl_sst *ctx)
348{
349 struct skl_module_cfg *m_cfg = w->priv;
350 int link_type, dir;
351 u32 ch, s_freq, s_fmt;
352 struct nhlt_specific_cfg *cfg;
353 struct skl *skl = get_skl_ctx(ctx->dev);
db2f586b 354 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
2d1419a3
JK
355
356 /* check if we already have blob */
357 if (m_cfg->formats_config.caps_size > 0)
358 return 0;
359
c7c6c736 360 dev_dbg(ctx->dev, "Applying default cfg blob\n");
2d1419a3
JK
361 switch (m_cfg->dev_type) {
362 case SKL_DEVICE_DMIC:
363 link_type = NHLT_LINK_DMIC;
c7c6c736 364 dir = SNDRV_PCM_STREAM_CAPTURE;
2d1419a3
JK
365 s_freq = m_cfg->in_fmt[0].s_freq;
366 s_fmt = m_cfg->in_fmt[0].bit_depth;
367 ch = m_cfg->in_fmt[0].channels;
368 break;
369
370 case SKL_DEVICE_I2S:
371 link_type = NHLT_LINK_SSP;
372 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
c7c6c736 373 dir = SNDRV_PCM_STREAM_PLAYBACK;
2d1419a3
JK
374 s_freq = m_cfg->out_fmt[0].s_freq;
375 s_fmt = m_cfg->out_fmt[0].bit_depth;
376 ch = m_cfg->out_fmt[0].channels;
c7c6c736
JK
377 } else {
378 dir = SNDRV_PCM_STREAM_CAPTURE;
379 s_freq = m_cfg->in_fmt[0].s_freq;
380 s_fmt = m_cfg->in_fmt[0].bit_depth;
381 ch = m_cfg->in_fmt[0].channels;
2d1419a3
JK
382 }
383 break;
384
385 default:
386 return -EINVAL;
387 }
388
389 /* update the blob based on virtual bus_id and default params */
390 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
db2f586b 391 s_fmt, ch, s_freq, dir, dev_type);
2d1419a3
JK
392 if (cfg) {
393 m_cfg->formats_config.caps_size = cfg->size;
394 m_cfg->formats_config.caps = (u32 *) &cfg->caps;
395 } else {
396 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
397 m_cfg->vbus_id, link_type, dir);
398 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
399 ch, s_freq, s_fmt);
400 return -EIO;
401 }
402
403 return 0;
404}
405
f7590d4f
JK
406static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
407 struct skl_sst *ctx)
408{
409 struct skl_module_cfg *m_cfg = w->priv;
410 struct skl_pipe_params *params = m_cfg->pipe->p_params;
411 int p_conn_type = m_cfg->pipe->conn_type;
412 bool is_fe;
413
414 if (!m_cfg->params_fixup)
415 return;
416
417 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
418 w->name);
419
420 skl_dump_mconfig(ctx, m_cfg);
421
422 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
423 is_fe = true;
424 else
425 is_fe = false;
426
427 skl_tplg_update_params_fixup(m_cfg, params, is_fe);
428 skl_tplg_update_buffer_size(ctx, m_cfg);
429
430 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
431 w->name);
432
433 skl_dump_mconfig(ctx, m_cfg);
434}
435
abb74003
JK
436/*
437 * some modules can have multiple params set from user control and
438 * need to be set after module is initialized. If set_param flag is
439 * set module params will be done after module is initialised.
440 */
441static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
442 struct skl_sst *ctx)
443{
444 int i, ret;
445 struct skl_module_cfg *mconfig = w->priv;
446 const struct snd_kcontrol_new *k;
447 struct soc_bytes_ext *sb;
448 struct skl_algo_data *bc;
449 struct skl_specific_cfg *sp_cfg;
450
451 if (mconfig->formats_config.caps_size > 0 &&
4ced1827 452 mconfig->formats_config.set_params == SKL_PARAM_SET) {
abb74003
JK
453 sp_cfg = &mconfig->formats_config;
454 ret = skl_set_module_params(ctx, sp_cfg->caps,
455 sp_cfg->caps_size,
456 sp_cfg->param_id, mconfig);
457 if (ret < 0)
458 return ret;
459 }
460
461 for (i = 0; i < w->num_kcontrols; i++) {
462 k = &w->kcontrol_news[i];
463 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
464 sb = (void *) k->private_value;
465 bc = (struct skl_algo_data *)sb->dobj.private;
466
4ced1827 467 if (bc->set_params == SKL_PARAM_SET) {
abb74003 468 ret = skl_set_module_params(ctx,
0d682104 469 (u32 *)bc->params, bc->size,
abb74003
JK
470 bc->param_id, mconfig);
471 if (ret < 0)
472 return ret;
473 }
474 }
475 }
476
477 return 0;
478}
479
480/*
481 * some module param can set from user control and this is required as
482 * when module is initailzed. if module param is required in init it is
483 * identifed by set_param flag. if set_param flag is not set, then this
484 * parameter needs to set as part of module init.
485 */
486static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
487{
488 const struct snd_kcontrol_new *k;
489 struct soc_bytes_ext *sb;
490 struct skl_algo_data *bc;
491 struct skl_module_cfg *mconfig = w->priv;
492 int i;
493
494 for (i = 0; i < w->num_kcontrols; i++) {
495 k = &w->kcontrol_news[i];
496 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
497 sb = (struct soc_bytes_ext *)k->private_value;
498 bc = (struct skl_algo_data *)sb->dobj.private;
499
4ced1827 500 if (bc->set_params != SKL_PARAM_INIT)
abb74003
JK
501 continue;
502
503 mconfig->formats_config.caps = (u32 *)&bc->params;
0d682104 504 mconfig->formats_config.caps_size = bc->size;
abb74003
JK
505
506 break;
507 }
508 }
509
510 return 0;
511}
512
bb704a73
JK
513static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe,
514 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
515{
516 switch (mcfg->dev_type) {
517 case SKL_DEVICE_HDAHOST:
518 return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params);
519
520 case SKL_DEVICE_HDALINK:
521 return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params);
522 }
523
524 return 0;
525}
526
e4e2d2f4
JK
527/*
528 * Inside a pipe instance, we can have various modules. These modules need
529 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
530 * skl_init_module() routine, so invoke that for all modules in a pipeline
531 */
532static int
533skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
534{
535 struct skl_pipe_module *w_module;
536 struct snd_soc_dapm_widget *w;
537 struct skl_module_cfg *mconfig;
538 struct skl_sst *ctx = skl->skl_sst;
539 int ret = 0;
540
541 list_for_each_entry(w_module, &pipe->w_list, node) {
b26199ea 542 uuid_le *uuid_mod;
e4e2d2f4
JK
543 w = w_module->w;
544 mconfig = w->priv;
545
b7c50555
VK
546 /* check if module ids are populated */
547 if (mconfig->id.module_id < 0) {
a657ae7e
VK
548 dev_err(skl->skl_sst->dev,
549 "module %pUL id not populated\n",
550 (uuid_le *)mconfig->guid);
551 return -EIO;
b7c50555
VK
552 }
553
e4e2d2f4 554 /* check resource available */
9ba8ffef 555 if (!skl_is_pipe_mcps_avail(skl, mconfig))
e4e2d2f4
JK
556 return -ENOMEM;
557
6c5768b3
D
558 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
559 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
560 mconfig->id.module_id, mconfig->guid);
561 if (ret < 0)
562 return ret;
d643678b
JK
563
564 mconfig->m_state = SKL_MODULE_LOADED;
6c5768b3
D
565 }
566
bb704a73
JK
567 /* prepare the DMA if the module is gateway cpr */
568 ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig);
569 if (ret < 0)
570 return ret;
571
2d1419a3
JK
572 /* update blob if blob is null for be with default value */
573 skl_tplg_update_be_blob(w, ctx);
574
f7590d4f
JK
575 /*
576 * apply fix/conversion to module params based on
577 * FE/BE params
578 */
579 skl_tplg_update_module_params(w, ctx);
b26199ea
JK
580 uuid_mod = (uuid_le *)mconfig->guid;
581 mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod,
582 mconfig->id.instance_id);
ef2a352c
D
583 if (mconfig->id.pvt_id < 0)
584 return ret;
abb74003 585 skl_tplg_set_module_init_data(w);
9939a9c3 586 ret = skl_init_module(ctx, mconfig);
ef2a352c 587 if (ret < 0) {
b26199ea 588 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
e4e2d2f4 589 return ret;
ef2a352c 590 }
260eb73a 591 skl_tplg_alloc_pipe_mcps(skl, mconfig);
abb74003 592 ret = skl_tplg_set_module_params(w, ctx);
e4e2d2f4
JK
593 if (ret < 0)
594 return ret;
595 }
596
597 return 0;
598}
d93f8e55 599
6c5768b3
D
600static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
601 struct skl_pipe *pipe)
602{
b0fab9c6 603 int ret;
6c5768b3
D
604 struct skl_pipe_module *w_module = NULL;
605 struct skl_module_cfg *mconfig = NULL;
606
607 list_for_each_entry(w_module, &pipe->w_list, node) {
b26199ea 608 uuid_le *uuid_mod;
6c5768b3 609 mconfig = w_module->w->priv;
b26199ea 610 uuid_mod = (uuid_le *)mconfig->guid;
6c5768b3 611
d643678b 612 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
b0fab9c6
D
613 mconfig->m_state > SKL_MODULE_UNINIT) {
614 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
6c5768b3 615 mconfig->id.module_id);
b0fab9c6
D
616 if (ret < 0)
617 return -EIO;
618 }
b26199ea 619 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
6c5768b3
D
620 }
621
622 /* no modules to unload in this path, so return */
623 return 0;
624}
625
d93f8e55
VK
626/*
627 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
628 * need create the pipeline. So we do following:
629 * - check the resources
630 * - Create the pipeline
631 * - Initialize the modules in pipeline
632 * - finally bind all modules together
633 */
634static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
635 struct skl *skl)
636{
637 int ret;
638 struct skl_module_cfg *mconfig = w->priv;
639 struct skl_pipe_module *w_module;
640 struct skl_pipe *s_pipe = mconfig->pipe;
641 struct skl_module_cfg *src_module = NULL, *dst_module;
642 struct skl_sst *ctx = skl->skl_sst;
643
644 /* check resource available */
9ba8ffef 645 if (!skl_is_pipe_mcps_avail(skl, mconfig))
d93f8e55
VK
646 return -EBUSY;
647
9ba8ffef 648 if (!skl_is_pipe_mem_avail(skl, mconfig))
d93f8e55
VK
649 return -ENOMEM;
650
651 /*
652 * Create a list of modules for pipe.
653 * This list contains modules from source to sink
654 */
655 ret = skl_create_pipeline(ctx, mconfig->pipe);
656 if (ret < 0)
657 return ret;
658
260eb73a
D
659 skl_tplg_alloc_pipe_mem(skl, mconfig);
660 skl_tplg_alloc_pipe_mcps(skl, mconfig);
d93f8e55
VK
661
662 /* Init all pipe modules from source to sink */
663 ret = skl_tplg_init_pipe_modules(skl, s_pipe);
664 if (ret < 0)
665 return ret;
666
667 /* Bind modules from source to sink */
668 list_for_each_entry(w_module, &s_pipe->w_list, node) {
669 dst_module = w_module->w->priv;
670
671 if (src_module == NULL) {
672 src_module = dst_module;
673 continue;
674 }
675
676 ret = skl_bind_modules(ctx, src_module, dst_module);
677 if (ret < 0)
678 return ret;
679
680 src_module = dst_module;
681 }
682
683 return 0;
684}
685
bf3e5ef5
D
686static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params,
687 int size, struct skl_module_cfg *mcfg)
5e8f0ee4 688{
5e8f0ee4
D
689 int i, pvt_id;
690
bf3e5ef5
D
691 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
692 struct skl_kpb_params *kpb_params =
693 (struct skl_kpb_params *)params;
694 struct skl_mod_inst_map *inst = kpb_params->map;
5e8f0ee4 695
bf3e5ef5
D
696 for (i = 0; i < kpb_params->num_modules; i++) {
697 pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id,
698 inst->inst_id);
699 if (pvt_id < 0)
700 return -EINVAL;
701
702 inst->inst_id = pvt_id;
703 inst++;
704 }
5e8f0ee4 705 }
bf3e5ef5 706
5e8f0ee4
D
707 return 0;
708}
cc6a4044
JK
709/*
710 * Some modules require params to be set after the module is bound to
711 * all pins connected.
712 *
713 * The module provider initializes set_param flag for such modules and we
714 * send params after binding
715 */
716static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
717 struct skl_module_cfg *mcfg, struct skl_sst *ctx)
718{
719 int i, ret;
720 struct skl_module_cfg *mconfig = w->priv;
721 const struct snd_kcontrol_new *k;
722 struct soc_bytes_ext *sb;
723 struct skl_algo_data *bc;
724 struct skl_specific_cfg *sp_cfg;
bf3e5ef5 725 u32 *params;
cc6a4044
JK
726
727 /*
728 * check all out/in pins are in bind state.
729 * if so set the module param
730 */
731 for (i = 0; i < mcfg->max_out_queue; i++) {
732 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
733 return 0;
734 }
735
736 for (i = 0; i < mcfg->max_in_queue; i++) {
737 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
738 return 0;
739 }
740
741 if (mconfig->formats_config.caps_size > 0 &&
742 mconfig->formats_config.set_params == SKL_PARAM_BIND) {
743 sp_cfg = &mconfig->formats_config;
744 ret = skl_set_module_params(ctx, sp_cfg->caps,
745 sp_cfg->caps_size,
746 sp_cfg->param_id, mconfig);
747 if (ret < 0)
748 return ret;
749 }
750
751 for (i = 0; i < w->num_kcontrols; i++) {
752 k = &w->kcontrol_news[i];
753 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
754 sb = (void *) k->private_value;
755 bc = (struct skl_algo_data *)sb->dobj.private;
756
757 if (bc->set_params == SKL_PARAM_BIND) {
bf3e5ef5
D
758 params = kzalloc(bc->max, GFP_KERNEL);
759 if (!params)
760 return -ENOMEM;
761
762 memcpy(params, bc->params, bc->max);
763 skl_fill_sink_instance_id(ctx, params, bc->max,
764 mconfig);
765
766 ret = skl_set_module_params(ctx, params,
767 bc->max, bc->param_id, mconfig);
768 kfree(params);
769
cc6a4044
JK
770 if (ret < 0)
771 return ret;
772 }
773 }
774 }
775
776 return 0;
777}
778
8724ff17
JK
779static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
780 struct skl *skl,
6bd4cf85 781 struct snd_soc_dapm_widget *src_w,
8724ff17 782 struct skl_module_cfg *src_mconfig)
d93f8e55
VK
783{
784 struct snd_soc_dapm_path *p;
0ed95d76 785 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
8724ff17 786 struct skl_module_cfg *sink_mconfig;
d93f8e55 787 struct skl_sst *ctx = skl->skl_sst;
8724ff17 788 int ret;
d93f8e55 789
8724ff17 790 snd_soc_dapm_widget_for_each_sink_path(w, p) {
d93f8e55
VK
791 if (!p->connect)
792 continue;
793
794 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
795 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
796
0ed95d76 797 next_sink = p->sink;
6bd4cf85
JK
798
799 if (!is_skl_dsp_widget_type(p->sink))
800 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
801
d93f8e55
VK
802 /*
803 * here we will check widgets in sink pipelines, so that
804 * can be any widgets type and we are only interested if
805 * they are ones used for SKL so check that first
806 */
807 if ((p->sink->priv != NULL) &&
808 is_skl_dsp_widget_type(p->sink)) {
809
810 sink = p->sink;
d93f8e55
VK
811 sink_mconfig = sink->priv;
812
cc6a4044
JK
813 if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
814 sink_mconfig->m_state == SKL_MODULE_UNINIT)
815 continue;
816
d93f8e55
VK
817 /* Bind source to sink, mixin is always source */
818 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
819 if (ret)
820 return ret;
821
cc6a4044
JK
822 /* set module params after bind */
823 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
824 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
825
d93f8e55
VK
826 /* Start sinks pipe first */
827 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
d1730c3d
JK
828 if (sink_mconfig->pipe->conn_type !=
829 SKL_PIPE_CONN_TYPE_FE)
830 ret = skl_run_pipe(ctx,
831 sink_mconfig->pipe);
d93f8e55
VK
832 if (ret)
833 return ret;
834 }
d93f8e55
VK
835 }
836 }
837
8724ff17 838 if (!sink)
6bd4cf85 839 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
8724ff17
JK
840
841 return 0;
842}
843
844/*
845 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
846 * we need to do following:
847 * - Bind to sink pipeline
848 * Since the sink pipes can be running and we don't get mixer event on
849 * connect for already running mixer, we need to find the sink pipes
850 * here and bind to them. This way dynamic connect works.
851 * - Start sink pipeline, if not running
852 * - Then run current pipe
853 */
854static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
855 struct skl *skl)
856{
857 struct skl_module_cfg *src_mconfig;
858 struct skl_sst *ctx = skl->skl_sst;
859 int ret = 0;
860
861 src_mconfig = w->priv;
862
863 /*
864 * find which sink it is connected to, bind with the sink,
865 * if sink is not started, start sink pipe first, then start
866 * this pipe
867 */
6bd4cf85 868 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
d93f8e55
VK
869 if (ret)
870 return ret;
871
d93f8e55 872 /* Start source pipe last after starting all sinks */
d1730c3d
JK
873 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
874 return skl_run_pipe(ctx, src_mconfig->pipe);
d93f8e55
VK
875
876 return 0;
877}
878
8724ff17
JK
879static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
880 struct snd_soc_dapm_widget *w, struct skl *skl)
881{
882 struct snd_soc_dapm_path *p;
883 struct snd_soc_dapm_widget *src_w = NULL;
884 struct skl_sst *ctx = skl->skl_sst;
885
886 snd_soc_dapm_widget_for_each_source_path(w, p) {
887 src_w = p->source;
888 if (!p->connect)
889 continue;
890
891 dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
892 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
893
894 /*
895 * here we will check widgets in sink pipelines, so that can
896 * be any widgets type and we are only interested if they are
897 * ones used for SKL so check that first
898 */
899 if ((p->source->priv != NULL) &&
900 is_skl_dsp_widget_type(p->source)) {
901 return p->source;
902 }
903 }
904
905 if (src_w != NULL)
906 return skl_get_src_dsp_widget(src_w, skl);
907
908 return NULL;
909}
910
d93f8e55
VK
911/*
912 * in the Post-PMU event of mixer we need to do following:
913 * - Check if this pipe is running
914 * - if not, then
915 * - bind this pipeline to its source pipeline
916 * if source pipe is already running, this means it is a dynamic
917 * connection and we need to bind only to that pipe
918 * - start this pipeline
919 */
920static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
921 struct skl *skl)
922{
923 int ret = 0;
d93f8e55
VK
924 struct snd_soc_dapm_widget *source, *sink;
925 struct skl_module_cfg *src_mconfig, *sink_mconfig;
926 struct skl_sst *ctx = skl->skl_sst;
927 int src_pipe_started = 0;
928
929 sink = w;
930 sink_mconfig = sink->priv;
931
932 /*
933 * If source pipe is already started, that means source is driving
934 * one more sink before this sink got connected, Since source is
935 * started, bind this sink to source and start this pipe.
936 */
8724ff17
JK
937 source = skl_get_src_dsp_widget(w, skl);
938 if (source != NULL) {
939 src_mconfig = source->priv;
940 sink_mconfig = sink->priv;
941 src_pipe_started = 1;
d93f8e55
VK
942
943 /*
8724ff17
JK
944 * check pipe state, then no need to bind or start the
945 * pipe
d93f8e55 946 */
8724ff17
JK
947 if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
948 src_pipe_started = 0;
d93f8e55
VK
949 }
950
951 if (src_pipe_started) {
952 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
953 if (ret)
954 return ret;
955
cc6a4044
JK
956 /* set module params after bind */
957 skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
958 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
959
d1730c3d
JK
960 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
961 ret = skl_run_pipe(ctx, sink_mconfig->pipe);
d93f8e55
VK
962 }
963
964 return ret;
965}
966
967/*
968 * in the Pre-PMD event of mixer we need to do following:
969 * - Stop the pipe
970 * - find the source connections and remove that from dapm_path_list
971 * - unbind with source pipelines if still connected
972 */
973static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
974 struct skl *skl)
975{
d93f8e55 976 struct skl_module_cfg *src_mconfig, *sink_mconfig;
ce1b5551 977 int ret = 0, i;
d93f8e55
VK
978 struct skl_sst *ctx = skl->skl_sst;
979
ce1b5551 980 sink_mconfig = w->priv;
d93f8e55
VK
981
982 /* Stop the pipe */
983 ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
984 if (ret)
985 return ret;
986
ce1b5551
JK
987 for (i = 0; i < sink_mconfig->max_in_queue; i++) {
988 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
989 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
990 if (!src_mconfig)
991 continue;
d93f8e55 992
ce1b5551
JK
993 ret = skl_unbind_modules(ctx,
994 src_mconfig, sink_mconfig);
d93f8e55 995 }
d93f8e55
VK
996 }
997
998 return ret;
999}
1000
1001/*
1002 * in the Post-PMD event of mixer we need to do following:
1003 * - Free the mcps used
1004 * - Free the mem used
1005 * - Unbind the modules within the pipeline
1006 * - Delete the pipeline (modules are not required to be explicitly
1007 * deleted, pipeline delete is enough here
1008 */
1009static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1010 struct skl *skl)
1011{
1012 struct skl_module_cfg *mconfig = w->priv;
1013 struct skl_pipe_module *w_module;
1014 struct skl_module_cfg *src_module = NULL, *dst_module;
1015 struct skl_sst *ctx = skl->skl_sst;
1016 struct skl_pipe *s_pipe = mconfig->pipe;
d93f8e55 1017
260eb73a
D
1018 if (s_pipe->state == SKL_PIPE_INVALID)
1019 return -EINVAL;
1020
d93f8e55 1021 skl_tplg_free_pipe_mcps(skl, mconfig);
65976878 1022 skl_tplg_free_pipe_mem(skl, mconfig);
d93f8e55
VK
1023
1024 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1025 dst_module = w_module->w->priv;
1026
260eb73a
D
1027 if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
1028 skl_tplg_free_pipe_mcps(skl, dst_module);
d93f8e55
VK
1029 if (src_module == NULL) {
1030 src_module = dst_module;
1031 continue;
1032 }
1033
7ca42f5a 1034 skl_unbind_modules(ctx, src_module, dst_module);
d93f8e55
VK
1035 src_module = dst_module;
1036 }
1037
547cafa3 1038 skl_delete_pipe(ctx, mconfig->pipe);
d93f8e55 1039
6c5768b3 1040 return skl_tplg_unload_pipe_modules(ctx, s_pipe);
d93f8e55
VK
1041}
1042
1043/*
1044 * in the Post-PMD event of PGA we need to do following:
1045 * - Free the mcps used
1046 * - Stop the pipeline
1047 * - In source pipe is connected, unbind with source pipelines
1048 */
1049static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1050 struct skl *skl)
1051{
d93f8e55 1052 struct skl_module_cfg *src_mconfig, *sink_mconfig;
ce1b5551 1053 int ret = 0, i;
d93f8e55
VK
1054 struct skl_sst *ctx = skl->skl_sst;
1055
ce1b5551 1056 src_mconfig = w->priv;
d93f8e55 1057
d93f8e55
VK
1058 /* Stop the pipe since this is a mixin module */
1059 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
1060 if (ret)
1061 return ret;
1062
ce1b5551
JK
1063 for (i = 0; i < src_mconfig->max_out_queue; i++) {
1064 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1065 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1066 if (!sink_mconfig)
1067 continue;
1068 /*
1069 * This is a connecter and if path is found that means
1070 * unbind between source and sink has not happened yet
1071 */
ce1b5551
JK
1072 ret = skl_unbind_modules(ctx, src_mconfig,
1073 sink_mconfig);
d93f8e55
VK
1074 }
1075 }
1076
d93f8e55
VK
1077 return ret;
1078}
1079
d93f8e55
VK
1080/*
1081 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1082 * second one is required that is created as another pipe entity.
1083 * The mixer is responsible for pipe management and represent a pipeline
1084 * instance
1085 */
1086static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1087 struct snd_kcontrol *k, int event)
1088{
1089 struct snd_soc_dapm_context *dapm = w->dapm;
1090 struct skl *skl = get_skl_ctx(dapm->dev);
1091
1092 switch (event) {
1093 case SND_SOC_DAPM_PRE_PMU:
1094 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1095
1096 case SND_SOC_DAPM_POST_PMU:
1097 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1098
1099 case SND_SOC_DAPM_PRE_PMD:
1100 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1101
1102 case SND_SOC_DAPM_POST_PMD:
1103 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1104 }
1105
1106 return 0;
1107}
1108
1109/*
1110 * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1111 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1112 * the sink when it is running (two FE to one BE or one FE to two BE)
1113 * scenarios
1114 */
1115static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1116 struct snd_kcontrol *k, int event)
1117
1118{
1119 struct snd_soc_dapm_context *dapm = w->dapm;
1120 struct skl *skl = get_skl_ctx(dapm->dev);
1121
1122 switch (event) {
1123 case SND_SOC_DAPM_PRE_PMU:
1124 return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1125
1126 case SND_SOC_DAPM_POST_PMD:
1127 return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1128 }
1129
1130 return 0;
1131}
cfb0a873 1132
140adfba
JK
1133static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1134 unsigned int __user *data, unsigned int size)
1135{
1136 struct soc_bytes_ext *sb =
1137 (struct soc_bytes_ext *)kcontrol->private_value;
1138 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
7d9f2911
OA
1139 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1140 struct skl_module_cfg *mconfig = w->priv;
1141 struct skl *skl = get_skl_ctx(w->dapm->dev);
1142
1143 if (w->power)
1144 skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
0d682104 1145 bc->size, bc->param_id, mconfig);
140adfba 1146
41556f68
VK
1147 /* decrement size for TLV header */
1148 size -= 2 * sizeof(u32);
1149
1150 /* check size as we don't want to send kernel data */
1151 if (size > bc->max)
1152 size = bc->max;
1153
140adfba
JK
1154 if (bc->params) {
1155 if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1156 return -EFAULT;
e8bc3c99 1157 if (copy_to_user(data + 1, &size, sizeof(u32)))
140adfba 1158 return -EFAULT;
e8bc3c99 1159 if (copy_to_user(data + 2, bc->params, size))
140adfba
JK
1160 return -EFAULT;
1161 }
1162
1163 return 0;
1164}
1165
1166#define SKL_PARAM_VENDOR_ID 0xff
1167
1168static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1169 const unsigned int __user *data, unsigned int size)
1170{
1171 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1172 struct skl_module_cfg *mconfig = w->priv;
1173 struct soc_bytes_ext *sb =
1174 (struct soc_bytes_ext *)kcontrol->private_value;
1175 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1176 struct skl *skl = get_skl_ctx(w->dapm->dev);
1177
1178 if (ac->params) {
0d682104
D
1179 if (size > ac->max)
1180 return -EINVAL;
1181
1182 ac->size = size;
140adfba
JK
1183 /*
1184 * if the param_is is of type Vendor, firmware expects actual
1185 * parameter id and size from the control.
1186 */
1187 if (ac->param_id == SKL_PARAM_VENDOR_ID) {
1188 if (copy_from_user(ac->params, data, size))
1189 return -EFAULT;
1190 } else {
1191 if (copy_from_user(ac->params,
65b4bcb8 1192 data + 2, size))
140adfba
JK
1193 return -EFAULT;
1194 }
1195
1196 if (w->power)
1197 return skl_set_module_params(skl->skl_sst,
0d682104 1198 (u32 *)ac->params, ac->size,
140adfba
JK
1199 ac->param_id, mconfig);
1200 }
1201
1202 return 0;
1203}
1204
8871dcb9
JK
1205/*
1206 * Fill the dma id for host and link. In case of passthrough
1207 * pipeline, this will both host and link in the same
1208 * pipeline, so need to copy the link and host based on dev_type
1209 */
1210static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1211 struct skl_pipe_params *params)
1212{
1213 struct skl_pipe *pipe = mcfg->pipe;
1214
1215 if (pipe->passthru) {
1216 switch (mcfg->dev_type) {
1217 case SKL_DEVICE_HDALINK:
1218 pipe->p_params->link_dma_id = params->link_dma_id;
12c3be0e 1219 pipe->p_params->link_index = params->link_index;
7f975a38 1220 pipe->p_params->link_bps = params->link_bps;
8871dcb9
JK
1221 break;
1222
1223 case SKL_DEVICE_HDAHOST:
1224 pipe->p_params->host_dma_id = params->host_dma_id;
7f975a38 1225 pipe->p_params->host_bps = params->host_bps;
8871dcb9
JK
1226 break;
1227
1228 default:
1229 break;
1230 }
1231 pipe->p_params->s_fmt = params->s_fmt;
1232 pipe->p_params->ch = params->ch;
1233 pipe->p_params->s_freq = params->s_freq;
1234 pipe->p_params->stream = params->stream;
12c3be0e 1235 pipe->p_params->format = params->format;
8871dcb9
JK
1236
1237 } else {
1238 memcpy(pipe->p_params, params, sizeof(*params));
1239 }
1240}
1241
cfb0a873
VK
1242/*
1243 * The FE params are passed by hw_params of the DAI.
1244 * On hw_params, the params are stored in Gateway module of the FE and we
1245 * need to calculate the format in DSP module configuration, that
1246 * conversion is done here
1247 */
1248int skl_tplg_update_pipe_params(struct device *dev,
1249 struct skl_module_cfg *mconfig,
1250 struct skl_pipe_params *params)
1251{
cfb0a873
VK
1252 struct skl_module_fmt *format = NULL;
1253
8871dcb9 1254 skl_tplg_fill_dma_id(mconfig, params);
cfb0a873
VK
1255
1256 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
4cd9899f 1257 format = &mconfig->in_fmt[0];
cfb0a873 1258 else
4cd9899f 1259 format = &mconfig->out_fmt[0];
cfb0a873
VK
1260
1261 /* set the hw_params */
1262 format->s_freq = params->s_freq;
1263 format->channels = params->ch;
1264 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1265
1266 /*
1267 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1268 * container so update bit depth accordingly
1269 */
1270 switch (format->valid_bit_depth) {
1271 case SKL_DEPTH_16BIT:
1272 format->bit_depth = format->valid_bit_depth;
1273 break;
1274
1275 case SKL_DEPTH_24BIT:
6654f39e 1276 case SKL_DEPTH_32BIT:
cfb0a873
VK
1277 format->bit_depth = SKL_DEPTH_32BIT;
1278 break;
1279
1280 default:
1281 dev_err(dev, "Invalid bit depth %x for pipe\n",
1282 format->valid_bit_depth);
1283 return -EINVAL;
1284 }
1285
1286 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1287 mconfig->ibs = (format->s_freq / 1000) *
1288 (format->channels) *
1289 (format->bit_depth >> 3);
1290 } else {
1291 mconfig->obs = (format->s_freq / 1000) *
1292 (format->channels) *
1293 (format->bit_depth >> 3);
1294 }
1295
1296 return 0;
1297}
1298
1299/*
1300 * Query the module config for the FE DAI
1301 * This is used to find the hw_params set for that DAI and apply to FE
1302 * pipeline
1303 */
1304struct skl_module_cfg *
1305skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1306{
1307 struct snd_soc_dapm_widget *w;
1308 struct snd_soc_dapm_path *p = NULL;
1309
1310 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1311 w = dai->playback_widget;
f0900eb2 1312 snd_soc_dapm_widget_for_each_sink_path(w, p) {
cfb0a873 1313 if (p->connect && p->sink->power &&
a28f51db 1314 !is_skl_dsp_widget_type(p->sink))
cfb0a873
VK
1315 continue;
1316
1317 if (p->sink->priv) {
1318 dev_dbg(dai->dev, "set params for %s\n",
1319 p->sink->name);
1320 return p->sink->priv;
1321 }
1322 }
1323 } else {
1324 w = dai->capture_widget;
f0900eb2 1325 snd_soc_dapm_widget_for_each_source_path(w, p) {
cfb0a873 1326 if (p->connect && p->source->power &&
a28f51db 1327 !is_skl_dsp_widget_type(p->source))
cfb0a873
VK
1328 continue;
1329
1330 if (p->source->priv) {
1331 dev_dbg(dai->dev, "set params for %s\n",
1332 p->source->name);
1333 return p->source->priv;
1334 }
1335 }
1336 }
1337
1338 return NULL;
1339}
1340
718a42b5
D
1341static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1342 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1343{
1344 struct snd_soc_dapm_path *p;
1345 struct skl_module_cfg *mconfig = NULL;
1346
1347 snd_soc_dapm_widget_for_each_source_path(w, p) {
1348 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1349 if (p->connect &&
1350 (p->sink->id == snd_soc_dapm_aif_out) &&
1351 p->source->priv) {
1352 mconfig = p->source->priv;
1353 return mconfig;
1354 }
1355 mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1356 if (mconfig)
1357 return mconfig;
1358 }
1359 }
1360 return mconfig;
1361}
1362
1363static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1364 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1365{
1366 struct snd_soc_dapm_path *p;
1367 struct skl_module_cfg *mconfig = NULL;
1368
1369 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1370 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1371 if (p->connect &&
1372 (p->source->id == snd_soc_dapm_aif_in) &&
1373 p->sink->priv) {
1374 mconfig = p->sink->priv;
1375 return mconfig;
1376 }
1377 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1378 if (mconfig)
1379 return mconfig;
1380 }
1381 }
1382 return mconfig;
1383}
1384
1385struct skl_module_cfg *
1386skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1387{
1388 struct snd_soc_dapm_widget *w;
1389 struct skl_module_cfg *mconfig;
1390
1391 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1392 w = dai->playback_widget;
1393 mconfig = skl_get_mconfig_pb_cpr(dai, w);
1394 } else {
1395 w = dai->capture_widget;
1396 mconfig = skl_get_mconfig_cap_cpr(dai, w);
1397 }
1398 return mconfig;
1399}
1400
cfb0a873
VK
1401static u8 skl_tplg_be_link_type(int dev_type)
1402{
1403 int ret;
1404
1405 switch (dev_type) {
1406 case SKL_DEVICE_BT:
1407 ret = NHLT_LINK_SSP;
1408 break;
1409
1410 case SKL_DEVICE_DMIC:
1411 ret = NHLT_LINK_DMIC;
1412 break;
1413
1414 case SKL_DEVICE_I2S:
1415 ret = NHLT_LINK_SSP;
1416 break;
1417
1418 case SKL_DEVICE_HDALINK:
1419 ret = NHLT_LINK_HDA;
1420 break;
1421
1422 default:
1423 ret = NHLT_LINK_INVALID;
1424 break;
1425 }
1426
1427 return ret;
1428}
1429
1430/*
1431 * Fill the BE gateway parameters
1432 * The BE gateway expects a blob of parameters which are kept in the ACPI
1433 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1434 * The port can have multiple settings so pick based on the PCM
1435 * parameters
1436 */
1437static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1438 struct skl_module_cfg *mconfig,
1439 struct skl_pipe_params *params)
1440{
cfb0a873
VK
1441 struct nhlt_specific_cfg *cfg;
1442 struct skl *skl = get_skl_ctx(dai->dev);
1443 int link_type = skl_tplg_be_link_type(mconfig->dev_type);
db2f586b 1444 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
cfb0a873 1445
8871dcb9 1446 skl_tplg_fill_dma_id(mconfig, params);
cfb0a873 1447
b30c275e
JK
1448 if (link_type == NHLT_LINK_HDA)
1449 return 0;
1450
cfb0a873
VK
1451 /* update the blob based on virtual bus_id*/
1452 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1453 params->s_fmt, params->ch,
db2f586b
SV
1454 params->s_freq, params->stream,
1455 dev_type);
cfb0a873
VK
1456 if (cfg) {
1457 mconfig->formats_config.caps_size = cfg->size;
bc03281a 1458 mconfig->formats_config.caps = (u32 *) &cfg->caps;
cfb0a873
VK
1459 } else {
1460 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1461 mconfig->vbus_id, link_type,
1462 params->stream);
1463 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1464 params->ch, params->s_freq, params->s_fmt);
1465 return -EINVAL;
1466 }
1467
1468 return 0;
1469}
1470
1471static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1472 struct snd_soc_dapm_widget *w,
1473 struct skl_pipe_params *params)
1474{
1475 struct snd_soc_dapm_path *p;
4d8adccb 1476 int ret = -EIO;
cfb0a873 1477
f0900eb2 1478 snd_soc_dapm_widget_for_each_source_path(w, p) {
cfb0a873
VK
1479 if (p->connect && is_skl_dsp_widget_type(p->source) &&
1480 p->source->priv) {
1481
9a03cb49
JK
1482 ret = skl_tplg_be_fill_pipe_params(dai,
1483 p->source->priv, params);
1484 if (ret < 0)
1485 return ret;
cfb0a873 1486 } else {
9a03cb49
JK
1487 ret = skl_tplg_be_set_src_pipe_params(dai,
1488 p->source, params);
4d8adccb
SP
1489 if (ret < 0)
1490 return ret;
cfb0a873
VK
1491 }
1492 }
1493
4d8adccb 1494 return ret;
cfb0a873
VK
1495}
1496
1497static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1498 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1499{
1500 struct snd_soc_dapm_path *p = NULL;
4d8adccb 1501 int ret = -EIO;
cfb0a873 1502
f0900eb2 1503 snd_soc_dapm_widget_for_each_sink_path(w, p) {
cfb0a873
VK
1504 if (p->connect && is_skl_dsp_widget_type(p->sink) &&
1505 p->sink->priv) {
1506
9a03cb49
JK
1507 ret = skl_tplg_be_fill_pipe_params(dai,
1508 p->sink->priv, params);
1509 if (ret < 0)
1510 return ret;
cfb0a873 1511 } else {
4d8adccb 1512 ret = skl_tplg_be_set_sink_pipe_params(
cfb0a873 1513 dai, p->sink, params);
4d8adccb
SP
1514 if (ret < 0)
1515 return ret;
cfb0a873
VK
1516 }
1517 }
1518
4d8adccb 1519 return ret;
cfb0a873
VK
1520}
1521
1522/*
1523 * BE hw_params can be a source parameters (capture) or sink parameters
1524 * (playback). Based on sink and source we need to either find the source
1525 * list or the sink list and set the pipeline parameters
1526 */
1527int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1528 struct skl_pipe_params *params)
1529{
1530 struct snd_soc_dapm_widget *w;
1531
1532 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1533 w = dai->playback_widget;
1534
1535 return skl_tplg_be_set_src_pipe_params(dai, w, params);
1536
1537 } else {
1538 w = dai->capture_widget;
1539
1540 return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1541 }
1542
1543 return 0;
1544}
3af36706
VK
1545
1546static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1547 {SKL_MIXER_EVENT, skl_tplg_mixer_event},
9a1e3507 1548 {SKL_VMIXER_EVENT, skl_tplg_mixer_event},
3af36706
VK
1549 {SKL_PGA_EVENT, skl_tplg_pga_event},
1550};
1551
140adfba
JK
1552static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1553 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1554 skl_tplg_tlv_control_set},
1555};
1556
6277e832
SN
1557static int skl_tplg_fill_pipe_tkn(struct device *dev,
1558 struct skl_pipe *pipe, u32 tkn,
1559 u32 tkn_val)
3af36706 1560{
3af36706 1561
6277e832
SN
1562 switch (tkn) {
1563 case SKL_TKN_U32_PIPE_CONN_TYPE:
1564 pipe->conn_type = tkn_val;
1565 break;
1566
1567 case SKL_TKN_U32_PIPE_PRIORITY:
1568 pipe->pipe_priority = tkn_val;
1569 break;
1570
1571 case SKL_TKN_U32_PIPE_MEM_PGS:
1572 pipe->memory_pages = tkn_val;
1573 break;
1574
8a0cb236
VK
1575 case SKL_TKN_U32_PMODE:
1576 pipe->lp_mode = tkn_val;
1577 break;
1578
6277e832
SN
1579 default:
1580 dev_err(dev, "Token not handled %d\n", tkn);
1581 return -EINVAL;
3af36706 1582 }
6277e832
SN
1583
1584 return 0;
3af36706
VK
1585}
1586
1587/*
6277e832
SN
1588 * Add pipeline by parsing the relevant tokens
1589 * Return an existing pipe if the pipe already exists.
3af36706 1590 */
6277e832
SN
1591static int skl_tplg_add_pipe(struct device *dev,
1592 struct skl_module_cfg *mconfig, struct skl *skl,
1593 struct snd_soc_tplg_vendor_value_elem *tkn_elem)
3af36706
VK
1594{
1595 struct skl_pipeline *ppl;
1596 struct skl_pipe *pipe;
1597 struct skl_pipe_params *params;
1598
1599 list_for_each_entry(ppl, &skl->ppl_list, node) {
6277e832
SN
1600 if (ppl->pipe->ppl_id == tkn_elem->value) {
1601 mconfig->pipe = ppl->pipe;
1602 return EEXIST;
1603 }
3af36706
VK
1604 }
1605
1606 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
1607 if (!ppl)
6277e832 1608 return -ENOMEM;
3af36706
VK
1609
1610 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
1611 if (!pipe)
6277e832 1612 return -ENOMEM;
3af36706
VK
1613
1614 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
1615 if (!params)
6277e832 1616 return -ENOMEM;
3af36706 1617
3af36706 1618 pipe->p_params = params;
6277e832 1619 pipe->ppl_id = tkn_elem->value;
3af36706
VK
1620 INIT_LIST_HEAD(&pipe->w_list);
1621
1622 ppl->pipe = pipe;
1623 list_add(&ppl->node, &skl->ppl_list);
1624
6277e832
SN
1625 mconfig->pipe = pipe;
1626 mconfig->pipe->state = SKL_PIPE_INVALID;
1627
1628 return 0;
1629}
1630
1631static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
1632 struct skl_module_pin *m_pin,
1633 int pin_index, u32 value)
1634{
1635 switch (tkn) {
1636 case SKL_TKN_U32_PIN_MOD_ID:
1637 m_pin[pin_index].id.module_id = value;
1638 break;
1639
1640 case SKL_TKN_U32_PIN_INST_ID:
1641 m_pin[pin_index].id.instance_id = value;
1642 break;
1643
1644 default:
1645 dev_err(dev, "%d Not a pin token\n", value);
1646 return -EINVAL;
1647 }
1648
1649 return 0;
1650}
1651
1652/*
1653 * Parse for pin config specific tokens to fill up the
1654 * module private data
1655 */
1656static int skl_tplg_fill_pins_info(struct device *dev,
1657 struct skl_module_cfg *mconfig,
1658 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1659 int dir, int pin_count)
1660{
1661 int ret;
1662 struct skl_module_pin *m_pin;
1663
1664 switch (dir) {
1665 case SKL_DIR_IN:
1666 m_pin = mconfig->m_in_pin;
1667 break;
1668
1669 case SKL_DIR_OUT:
1670 m_pin = mconfig->m_out_pin;
1671 break;
1672
1673 default:
ecd286a9 1674 dev_err(dev, "Invalid direction value\n");
6277e832
SN
1675 return -EINVAL;
1676 }
1677
1678 ret = skl_tplg_fill_pin(dev, tkn_elem->token,
1679 m_pin, pin_count, tkn_elem->value);
1680
1681 if (ret < 0)
1682 return ret;
1683
1684 m_pin[pin_count].in_use = false;
1685 m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
1686
1687 return 0;
3af36706
VK
1688}
1689
6277e832
SN
1690/*
1691 * Fill up input/output module config format based
1692 * on the direction
1693 */
1694static int skl_tplg_fill_fmt(struct device *dev,
1695 struct skl_module_cfg *mconfig, u32 tkn,
1696 u32 value, u32 dir, u32 pin_count)
1697{
1698 struct skl_module_fmt *dst_fmt;
1699
1700 switch (dir) {
1701 case SKL_DIR_IN:
1702 dst_fmt = mconfig->in_fmt;
1703 dst_fmt += pin_count;
1704 break;
1705
1706 case SKL_DIR_OUT:
1707 dst_fmt = mconfig->out_fmt;
1708 dst_fmt += pin_count;
1709 break;
1710
1711 default:
ecd286a9 1712 dev_err(dev, "Invalid direction value\n");
6277e832
SN
1713 return -EINVAL;
1714 }
1715
1716 switch (tkn) {
1717 case SKL_TKN_U32_FMT_CH:
1718 dst_fmt->channels = value;
1719 break;
1720
1721 case SKL_TKN_U32_FMT_FREQ:
1722 dst_fmt->s_freq = value;
1723 break;
1724
1725 case SKL_TKN_U32_FMT_BIT_DEPTH:
1726 dst_fmt->bit_depth = value;
1727 break;
1728
1729 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1730 dst_fmt->valid_bit_depth = value;
1731 break;
1732
1733 case SKL_TKN_U32_FMT_CH_CONFIG:
1734 dst_fmt->ch_cfg = value;
1735 break;
1736
1737 case SKL_TKN_U32_FMT_INTERLEAVE:
1738 dst_fmt->interleaving_style = value;
1739 break;
1740
1741 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1742 dst_fmt->sample_type = value;
1743 break;
1744
1745 case SKL_TKN_U32_FMT_CH_MAP:
1746 dst_fmt->ch_map = value;
1747 break;
1748
1749 default:
ecd286a9 1750 dev_err(dev, "Invalid token %d\n", tkn);
6277e832
SN
1751 return -EINVAL;
1752 }
1753
1754 return 0;
1755}
1756
1757static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
1758 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
1759{
1760 if (uuid_tkn->token == SKL_TKN_UUID)
1761 memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
1762 else {
ecd286a9 1763 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
6277e832
SN
1764 return -EINVAL;
1765 }
1766
1767 return 0;
1768}
1769
1770static void skl_tplg_fill_pin_dynamic_val(
1771 struct skl_module_pin *mpin, u32 pin_count, u32 value)
4cd9899f
HS
1772{
1773 int i;
1774
6277e832
SN
1775 for (i = 0; i < pin_count; i++)
1776 mpin[i].is_dynamic = value;
1777}
1778
1779/*
1780 * Parse tokens to fill up the module private data
1781 */
1782static int skl_tplg_get_token(struct device *dev,
1783 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1784 struct skl *skl, struct skl_module_cfg *mconfig)
1785{
1786 int tkn_count = 0;
1787 int ret;
1788 static int is_pipe_exists;
1789 static int pin_index, dir;
1790
1791 if (tkn_elem->token > SKL_TKN_MAX)
1792 return -EINVAL;
1793
1794 switch (tkn_elem->token) {
1795 case SKL_TKN_U8_IN_QUEUE_COUNT:
1796 mconfig->max_in_queue = tkn_elem->value;
1797 mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
1798 sizeof(*mconfig->m_in_pin),
1799 GFP_KERNEL);
1800 if (!mconfig->m_in_pin)
1801 return -ENOMEM;
1802
1803 break;
1804
1805 case SKL_TKN_U8_OUT_QUEUE_COUNT:
1806 mconfig->max_out_queue = tkn_elem->value;
1807 mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
1808 sizeof(*mconfig->m_out_pin),
1809 GFP_KERNEL);
1810
1811 if (!mconfig->m_out_pin)
1812 return -ENOMEM;
1813
1814 break;
1815
1816 case SKL_TKN_U8_DYN_IN_PIN:
1817 if (!mconfig->m_in_pin)
1818 return -ENOMEM;
1819
1820 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
1821 mconfig->max_in_queue, tkn_elem->value);
1822
1823 break;
1824
1825 case SKL_TKN_U8_DYN_OUT_PIN:
1826 if (!mconfig->m_out_pin)
1827 return -ENOMEM;
1828
1829 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
1830 mconfig->max_out_queue, tkn_elem->value);
1831
1832 break;
1833
1834 case SKL_TKN_U8_TIME_SLOT:
1835 mconfig->time_slot = tkn_elem->value;
1836 break;
1837
1838 case SKL_TKN_U8_CORE_ID:
1839 mconfig->core_id = tkn_elem->value;
1840
1841 case SKL_TKN_U8_MOD_TYPE:
1842 mconfig->m_type = tkn_elem->value;
1843 break;
1844
1845 case SKL_TKN_U8_DEV_TYPE:
1846 mconfig->dev_type = tkn_elem->value;
1847 break;
1848
1849 case SKL_TKN_U8_HW_CONN_TYPE:
1850 mconfig->hw_conn_type = tkn_elem->value;
1851 break;
1852
1853 case SKL_TKN_U16_MOD_INST_ID:
1854 mconfig->id.instance_id =
1855 tkn_elem->value;
1856 break;
1857
1858 case SKL_TKN_U32_MEM_PAGES:
1859 mconfig->mem_pages = tkn_elem->value;
1860 break;
1861
1862 case SKL_TKN_U32_MAX_MCPS:
1863 mconfig->mcps = tkn_elem->value;
1864 break;
1865
1866 case SKL_TKN_U32_OBS:
1867 mconfig->obs = tkn_elem->value;
1868 break;
1869
1870 case SKL_TKN_U32_IBS:
1871 mconfig->ibs = tkn_elem->value;
1872 break;
1873
1874 case SKL_TKN_U32_VBUS_ID:
1875 mconfig->vbus_id = tkn_elem->value;
1876 break;
1877
1878 case SKL_TKN_U32_PARAMS_FIXUP:
1879 mconfig->params_fixup = tkn_elem->value;
1880 break;
1881
1882 case SKL_TKN_U32_CONVERTER:
1883 mconfig->converter = tkn_elem->value;
1884 break;
1885
6bd9dcf3
VK
1886 case SKL_TKL_U32_D0I3_CAPS:
1887 mconfig->d0i3_caps = tkn_elem->value;
1888 break;
1889
6277e832
SN
1890 case SKL_TKN_U32_PIPE_ID:
1891 ret = skl_tplg_add_pipe(dev,
1892 mconfig, skl, tkn_elem);
1893
1894 if (ret < 0)
1895 return is_pipe_exists;
1896
1897 if (ret == EEXIST)
1898 is_pipe_exists = 1;
1899
1900 break;
1901
1902 case SKL_TKN_U32_PIPE_CONN_TYPE:
1903 case SKL_TKN_U32_PIPE_PRIORITY:
1904 case SKL_TKN_U32_PIPE_MEM_PGS:
8a0cb236 1905 case SKL_TKN_U32_PMODE:
6277e832
SN
1906 if (is_pipe_exists) {
1907 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
1908 tkn_elem->token, tkn_elem->value);
1909 if (ret < 0)
1910 return ret;
1911 }
1912
1913 break;
1914
1915 /*
1916 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
1917 * direction and the pin count. The first four bits represent
1918 * direction and next four the pin count.
1919 */
1920 case SKL_TKN_U32_DIR_PIN_COUNT:
1921 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
1922 pin_index = (tkn_elem->value &
1923 SKL_PIN_COUNT_MASK) >> 4;
1924
1925 break;
1926
1927 case SKL_TKN_U32_FMT_CH:
1928 case SKL_TKN_U32_FMT_FREQ:
1929 case SKL_TKN_U32_FMT_BIT_DEPTH:
1930 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1931 case SKL_TKN_U32_FMT_CH_CONFIG:
1932 case SKL_TKN_U32_FMT_INTERLEAVE:
1933 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1934 case SKL_TKN_U32_FMT_CH_MAP:
1935 ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token,
1936 tkn_elem->value, dir, pin_index);
1937
1938 if (ret < 0)
1939 return ret;
1940
1941 break;
1942
1943 case SKL_TKN_U32_PIN_MOD_ID:
1944 case SKL_TKN_U32_PIN_INST_ID:
1945 ret = skl_tplg_fill_pins_info(dev,
1946 mconfig, tkn_elem, dir,
1947 pin_index);
1948 if (ret < 0)
1949 return ret;
1950
1951 break;
1952
1953 case SKL_TKN_U32_CAPS_SIZE:
1954 mconfig->formats_config.caps_size =
1955 tkn_elem->value;
1956
1957 break;
1958
1959 case SKL_TKN_U32_PROC_DOMAIN:
1960 mconfig->domain =
1961 tkn_elem->value;
1962
1963 break;
1964
1965 case SKL_TKN_U8_IN_PIN_TYPE:
1966 case SKL_TKN_U8_OUT_PIN_TYPE:
1967 case SKL_TKN_U8_CONN_TYPE:
1968 break;
1969
1970 default:
1971 dev_err(dev, "Token %d not handled\n",
1972 tkn_elem->token);
1973 return -EINVAL;
4cd9899f 1974 }
6277e832
SN
1975
1976 tkn_count++;
1977
1978 return tkn_count;
1979}
1980
1981/*
1982 * Parse the vendor array for specific tokens to construct
1983 * module private data
1984 */
1985static int skl_tplg_get_tokens(struct device *dev,
1986 char *pvt_data, struct skl *skl,
1987 struct skl_module_cfg *mconfig, int block_size)
1988{
1989 struct snd_soc_tplg_vendor_array *array;
1990 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
1991 int tkn_count = 0, ret;
1992 int off = 0, tuple_size = 0;
1993
1994 if (block_size <= 0)
1995 return -EINVAL;
1996
1997 while (tuple_size < block_size) {
1998 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
1999
2000 off += array->size;
2001
2002 switch (array->type) {
2003 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
ecd286a9 2004 dev_warn(dev, "no string tokens expected for skl tplg\n");
6277e832
SN
2005 continue;
2006
2007 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2008 ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
2009 if (ret < 0)
2010 return ret;
2011
2012 tuple_size += sizeof(*array->uuid);
2013
2014 continue;
2015
2016 default:
2017 tkn_elem = array->value;
2018 tkn_count = 0;
2019 break;
2020 }
2021
2022 while (tkn_count <= (array->num_elems - 1)) {
2023 ret = skl_tplg_get_token(dev, tkn_elem,
2024 skl, mconfig);
2025
2026 if (ret < 0)
2027 return ret;
2028
2029 tkn_count = tkn_count + ret;
2030 tkn_elem++;
2031 }
2032
2033 tuple_size += tkn_count * sizeof(*tkn_elem);
2034 }
2035
2036 return 0;
2037}
2038
2039/*
2040 * Every data block is preceded by a descriptor to read the number
2041 * of data blocks, they type of the block and it's size
2042 */
2043static int skl_tplg_get_desc_blocks(struct device *dev,
2044 struct snd_soc_tplg_vendor_array *array)
2045{
2046 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2047
2048 tkn_elem = array->value;
2049
2050 switch (tkn_elem->token) {
2051 case SKL_TKN_U8_NUM_BLOCKS:
2052 case SKL_TKN_U8_BLOCK_TYPE:
2053 case SKL_TKN_U16_BLOCK_SIZE:
2054 return tkn_elem->value;
2055
2056 default:
ecd286a9 2057 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
6277e832
SN
2058 break;
2059 }
2060
2061 return -EINVAL;
2062}
2063
2064/*
2065 * Parse the private data for the token and corresponding value.
2066 * The private data can have multiple data blocks. So, a data block
2067 * is preceded by a descriptor for number of blocks and a descriptor
2068 * for the type and size of the suceeding data block.
2069 */
2070static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2071 struct skl *skl, struct device *dev,
2072 struct skl_module_cfg *mconfig)
2073{
2074 struct snd_soc_tplg_vendor_array *array;
2075 int num_blocks, block_size = 0, block_type, off = 0;
2076 char *data;
2077 int ret;
2078
2079 /* Read the NUM_DATA_BLOCKS descriptor */
2080 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2081 ret = skl_tplg_get_desc_blocks(dev, array);
2082 if (ret < 0)
2083 return ret;
2084 num_blocks = ret;
2085
2086 off += array->size;
2087 array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
2088
2089 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2090 while (num_blocks > 0) {
2091 ret = skl_tplg_get_desc_blocks(dev, array);
2092
2093 if (ret < 0)
2094 return ret;
2095 block_type = ret;
2096 off += array->size;
2097
2098 array = (struct snd_soc_tplg_vendor_array *)
2099 (tplg_w->priv.data + off);
2100
2101 ret = skl_tplg_get_desc_blocks(dev, array);
2102
2103 if (ret < 0)
2104 return ret;
2105 block_size = ret;
2106 off += array->size;
2107
2108 array = (struct snd_soc_tplg_vendor_array *)
2109 (tplg_w->priv.data + off);
2110
2111 data = (tplg_w->priv.data + off);
2112
2113 if (block_type == SKL_TYPE_TUPLE) {
2114 ret = skl_tplg_get_tokens(dev, data,
2115 skl, mconfig, block_size);
2116
2117 if (ret < 0)
2118 return ret;
2119
2120 --num_blocks;
2121 } else {
2122 if (mconfig->formats_config.caps_size > 0)
2123 memcpy(mconfig->formats_config.caps, data,
2124 mconfig->formats_config.caps_size);
2125 --num_blocks;
2126 }
2127 }
2128
2129 return 0;
4cd9899f
HS
2130}
2131
fe3f4442
D
2132static void skl_clear_pin_config(struct snd_soc_platform *platform,
2133 struct snd_soc_dapm_widget *w)
2134{
2135 int i;
2136 struct skl_module_cfg *mconfig;
2137 struct skl_pipe *pipe;
2138
2139 if (!strncmp(w->dapm->component->name, platform->component.name,
2140 strlen(platform->component.name))) {
2141 mconfig = w->priv;
2142 pipe = mconfig->pipe;
2143 for (i = 0; i < mconfig->max_in_queue; i++) {
2144 mconfig->m_in_pin[i].in_use = false;
2145 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2146 }
2147 for (i = 0; i < mconfig->max_out_queue; i++) {
2148 mconfig->m_out_pin[i].in_use = false;
2149 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2150 }
2151 pipe->state = SKL_PIPE_INVALID;
2152 mconfig->m_state = SKL_MODULE_UNINIT;
2153 }
2154}
2155
2156void skl_cleanup_resources(struct skl *skl)
2157{
2158 struct skl_sst *ctx = skl->skl_sst;
2159 struct snd_soc_platform *soc_platform = skl->platform;
2160 struct snd_soc_dapm_widget *w;
2161 struct snd_soc_card *card;
2162
2163 if (soc_platform == NULL)
2164 return;
2165
2166 card = soc_platform->component.card;
2167 if (!card || !card->instantiated)
2168 return;
2169
2170 skl->resource.mem = 0;
2171 skl->resource.mcps = 0;
2172
2173 list_for_each_entry(w, &card->widgets, list) {
2174 if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
2175 skl_clear_pin_config(soc_platform, w);
2176 }
2177
2178 skl_clear_module_cnt(ctx->dsp);
2179}
2180
3af36706
VK
2181/*
2182 * Topology core widget load callback
2183 *
2184 * This is used to save the private data for each widget which gives
2185 * information to the driver about module and pipeline parameters which DSP
2186 * FW expects like ids, resource values, formats etc
2187 */
2188static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
b663a8c5
JK
2189 struct snd_soc_dapm_widget *w,
2190 struct snd_soc_tplg_dapm_widget *tplg_w)
3af36706
VK
2191{
2192 int ret;
2193 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2194 struct skl *skl = ebus_to_skl(ebus);
2195 struct hdac_bus *bus = ebus_to_hbus(ebus);
2196 struct skl_module_cfg *mconfig;
3af36706
VK
2197
2198 if (!tplg_w->priv.size)
2199 goto bind_event;
2200
2201 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
2202
2203 if (!mconfig)
2204 return -ENOMEM;
2205
2206 w->priv = mconfig;
09305da9 2207
b7c50555
VK
2208 /*
2209 * module binary can be loaded later, so set it to query when
2210 * module is load for a use case
2211 */
2212 mconfig->id.module_id = -1;
3af36706 2213
6277e832
SN
2214 /* Parse private data for tuples */
2215 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
2216 if (ret < 0)
2217 return ret;
3af36706
VK
2218bind_event:
2219 if (tplg_w->event_type == 0) {
3373f716 2220 dev_dbg(bus->dev, "ASoC: No event handler required\n");
3af36706
VK
2221 return 0;
2222 }
2223
2224 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
b663a8c5
JK
2225 ARRAY_SIZE(skl_tplg_widget_ops),
2226 tplg_w->event_type);
3af36706
VK
2227
2228 if (ret) {
2229 dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
2230 __func__, tplg_w->event_type);
2231 return -EINVAL;
2232 }
2233
2234 return 0;
2235}
2236
140adfba
JK
2237static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
2238 struct snd_soc_tplg_bytes_control *bc)
2239{
2240 struct skl_algo_data *ac;
2241 struct skl_dfw_algo_data *dfw_ac =
2242 (struct skl_dfw_algo_data *)bc->priv.data;
2243
2244 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
2245 if (!ac)
2246 return -ENOMEM;
2247
2248 /* Fill private data */
2249 ac->max = dfw_ac->max;
2250 ac->param_id = dfw_ac->param_id;
2251 ac->set_params = dfw_ac->set_params;
0d682104 2252 ac->size = dfw_ac->max;
140adfba
JK
2253
2254 if (ac->max) {
2255 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
2256 if (!ac->params)
2257 return -ENOMEM;
2258
edd7ea2d 2259 memcpy(ac->params, dfw_ac->params, ac->max);
140adfba
JK
2260 }
2261
2262 be->dobj.private = ac;
2263 return 0;
2264}
2265
2266static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
2267 struct snd_kcontrol_new *kctl,
2268 struct snd_soc_tplg_ctl_hdr *hdr)
2269{
2270 struct soc_bytes_ext *sb;
2271 struct snd_soc_tplg_bytes_control *tplg_bc;
2272 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2273 struct hdac_bus *bus = ebus_to_hbus(ebus);
2274
2275 switch (hdr->ops.info) {
2276 case SND_SOC_TPLG_CTL_BYTES:
2277 tplg_bc = container_of(hdr,
2278 struct snd_soc_tplg_bytes_control, hdr);
2279 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
2280 sb = (struct soc_bytes_ext *)kctl->private_value;
2281 if (tplg_bc->priv.size)
2282 return skl_init_algo_data(
2283 bus->dev, sb, tplg_bc);
2284 }
2285 break;
2286
2287 default:
2288 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
2289 hdr->ops.get, hdr->ops.put, hdr->ops.info);
2290 break;
2291 }
2292
2293 return 0;
2294}
2295
541070ce
SN
2296static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
2297 struct snd_soc_tplg_vendor_string_elem *str_elem,
eee0e16f 2298 struct skl *skl)
541070ce
SN
2299{
2300 int tkn_count = 0;
2301 static int ref_count;
2302
2303 switch (str_elem->token) {
2304 case SKL_TKN_STR_LIB_NAME:
eee0e16f 2305 if (ref_count > skl->skl_sst->lib_count - 1) {
541070ce
SN
2306 ref_count = 0;
2307 return -EINVAL;
2308 }
2309
eee0e16f
JK
2310 strncpy(skl->skl_sst->lib_info[ref_count].name,
2311 str_elem->string,
2312 ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name));
541070ce
SN
2313 ref_count++;
2314 tkn_count++;
2315 break;
2316
2317 default:
ecd286a9 2318 dev_err(dev, "Not a string token %d\n", str_elem->token);
541070ce
SN
2319 break;
2320 }
2321
2322 return tkn_count;
2323}
2324
2325static int skl_tplg_get_str_tkn(struct device *dev,
2326 struct snd_soc_tplg_vendor_array *array,
eee0e16f 2327 struct skl *skl)
541070ce
SN
2328{
2329 int tkn_count = 0, ret;
2330 struct snd_soc_tplg_vendor_string_elem *str_elem;
2331
2332 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
2333 while (tkn_count < array->num_elems) {
eee0e16f 2334 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
541070ce
SN
2335 str_elem++;
2336
2337 if (ret < 0)
2338 return ret;
2339
2340 tkn_count = tkn_count + ret;
2341 }
2342
2343 return tkn_count;
2344}
2345
2346static int skl_tplg_get_int_tkn(struct device *dev,
2347 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
eee0e16f 2348 struct skl *skl)
541070ce
SN
2349{
2350 int tkn_count = 0;
2351
2352 switch (tkn_elem->token) {
2353 case SKL_TKN_U32_LIB_COUNT:
eee0e16f 2354 skl->skl_sst->lib_count = tkn_elem->value;
541070ce
SN
2355 tkn_count++;
2356 break;
2357
2358 default:
ecd286a9 2359 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
541070ce
SN
2360 return -EINVAL;
2361 }
2362
2363 return tkn_count;
2364}
2365
2366/*
2367 * Fill the manifest structure by parsing the tokens based on the
2368 * type.
2369 */
2370static int skl_tplg_get_manifest_tkn(struct device *dev,
eee0e16f 2371 char *pvt_data, struct skl *skl,
541070ce
SN
2372 int block_size)
2373{
2374 int tkn_count = 0, ret;
2375 int off = 0, tuple_size = 0;
2376 struct snd_soc_tplg_vendor_array *array;
2377 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2378
2379 if (block_size <= 0)
2380 return -EINVAL;
2381
2382 while (tuple_size < block_size) {
2383 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2384 off += array->size;
2385 switch (array->type) {
2386 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
eee0e16f 2387 ret = skl_tplg_get_str_tkn(dev, array, skl);
541070ce
SN
2388
2389 if (ret < 0)
2390 return ret;
2391 tkn_count += ret;
2392
2393 tuple_size += tkn_count *
2394 sizeof(struct snd_soc_tplg_vendor_string_elem);
2395 continue;
2396
2397 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
ecd286a9 2398 dev_warn(dev, "no uuid tokens for skl tplf manifest\n");
541070ce
SN
2399 continue;
2400
2401 default:
2402 tkn_elem = array->value;
2403 tkn_count = 0;
2404 break;
2405 }
2406
2407 while (tkn_count <= array->num_elems - 1) {
2408 ret = skl_tplg_get_int_tkn(dev,
eee0e16f 2409 tkn_elem, skl);
541070ce
SN
2410 if (ret < 0)
2411 return ret;
2412
2413 tkn_count = tkn_count + ret;
2414 tkn_elem++;
2415 tuple_size += tkn_count *
2416 sizeof(struct snd_soc_tplg_vendor_value_elem);
2417 break;
2418 }
2419 tkn_count = 0;
2420 }
2421
2422 return 0;
2423}
2424
2425/*
2426 * Parse manifest private data for tokens. The private data block is
2427 * preceded by descriptors for type and size of data block.
2428 */
2429static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
eee0e16f 2430 struct device *dev, struct skl *skl)
541070ce
SN
2431{
2432 struct snd_soc_tplg_vendor_array *array;
2433 int num_blocks, block_size = 0, block_type, off = 0;
2434 char *data;
2435 int ret;
2436
2437 /* Read the NUM_DATA_BLOCKS descriptor */
2438 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
2439 ret = skl_tplg_get_desc_blocks(dev, array);
2440 if (ret < 0)
2441 return ret;
2442 num_blocks = ret;
2443
2444 off += array->size;
2445 array = (struct snd_soc_tplg_vendor_array *)
2446 (manifest->priv.data + off);
2447
2448 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2449 while (num_blocks > 0) {
2450 ret = skl_tplg_get_desc_blocks(dev, array);
2451
2452 if (ret < 0)
2453 return ret;
2454 block_type = ret;
2455 off += array->size;
2456
2457 array = (struct snd_soc_tplg_vendor_array *)
2458 (manifest->priv.data + off);
2459
2460 ret = skl_tplg_get_desc_blocks(dev, array);
2461
2462 if (ret < 0)
2463 return ret;
2464 block_size = ret;
2465 off += array->size;
2466
2467 array = (struct snd_soc_tplg_vendor_array *)
2468 (manifest->priv.data + off);
2469
2470 data = (manifest->priv.data + off);
2471
2472 if (block_type == SKL_TYPE_TUPLE) {
eee0e16f 2473 ret = skl_tplg_get_manifest_tkn(dev, data, skl,
541070ce
SN
2474 block_size);
2475
2476 if (ret < 0)
2477 return ret;
2478
2479 --num_blocks;
2480 } else {
2481 return -EINVAL;
2482 }
2483 }
2484
2485 return 0;
2486}
2487
15ecaba9
K
2488static int skl_manifest_load(struct snd_soc_component *cmpnt,
2489 struct snd_soc_tplg_manifest *manifest)
2490{
15ecaba9
K
2491 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2492 struct hdac_bus *bus = ebus_to_hbus(ebus);
2493 struct skl *skl = ebus_to_skl(ebus);
15ecaba9 2494
c15ad605
VK
2495 /* proceed only if we have private data defined */
2496 if (manifest->priv.size == 0)
2497 return 0;
2498
eee0e16f 2499 skl_tplg_get_manifest_data(manifest, bus->dev, skl);
15ecaba9 2500
eee0e16f 2501 if (skl->skl_sst->lib_count > SKL_MAX_LIB) {
15ecaba9 2502 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
eee0e16f
JK
2503 skl->skl_sst->lib_count);
2504 return -EINVAL;
15ecaba9
K
2505 }
2506
eee0e16f 2507 return 0;
15ecaba9
K
2508}
2509
3af36706
VK
2510static struct snd_soc_tplg_ops skl_tplg_ops = {
2511 .widget_load = skl_tplg_widget_load,
140adfba
JK
2512 .control_load = skl_tplg_control_load,
2513 .bytes_ext_ops = skl_tlv_ops,
2514 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
15ecaba9 2515 .manifest = skl_manifest_load,
3af36706
VK
2516};
2517
287af4f9
JK
2518/*
2519 * A pipe can have multiple modules, each of them will be a DAPM widget as
2520 * well. While managing a pipeline we need to get the list of all the
2521 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
2522 * helps to get the SKL type widgets in that pipeline
2523 */
2524static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
2525{
2526 struct snd_soc_dapm_widget *w;
2527 struct skl_module_cfg *mcfg = NULL;
2528 struct skl_pipe_module *p_module = NULL;
2529 struct skl_pipe *pipe;
2530
2531 list_for_each_entry(w, &platform->component.card->widgets, list) {
2532 if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
2533 mcfg = w->priv;
2534 pipe = mcfg->pipe;
2535
2536 p_module = devm_kzalloc(platform->dev,
2537 sizeof(*p_module), GFP_KERNEL);
2538 if (!p_module)
2539 return -ENOMEM;
2540
2541 p_module->w = w;
2542 list_add_tail(&p_module->node, &pipe->w_list);
2543 }
2544 }
2545
2546 return 0;
2547}
2548
f0aa94fa
JK
2549static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
2550{
2551 struct skl_pipe_module *w_module;
2552 struct snd_soc_dapm_widget *w;
2553 struct skl_module_cfg *mconfig;
2554 bool host_found = false, link_found = false;
2555
2556 list_for_each_entry(w_module, &pipe->w_list, node) {
2557 w = w_module->w;
2558 mconfig = w->priv;
2559
2560 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
2561 host_found = true;
2562 else if (mconfig->dev_type != SKL_DEVICE_NONE)
2563 link_found = true;
2564 }
2565
2566 if (host_found && link_found)
2567 pipe->passthru = true;
2568 else
2569 pipe->passthru = false;
2570}
2571
3af36706
VK
2572/* This will be read from topology manifest, currently defined here */
2573#define SKL_MAX_MCPS 30000000
2574#define SKL_FW_MAX_MEM 1000000
2575
2576/*
2577 * SKL topology init routine
2578 */
2579int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
2580{
2581 int ret;
2582 const struct firmware *fw;
2583 struct hdac_bus *bus = ebus_to_hbus(ebus);
2584 struct skl *skl = ebus_to_skl(ebus);
f0aa94fa 2585 struct skl_pipeline *ppl;
3af36706 2586
4b235c43 2587 ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3af36706 2588 if (ret < 0) {
b663a8c5 2589 dev_err(bus->dev, "tplg fw %s load failed with %d\n",
4b235c43
VK
2590 skl->tplg_name, ret);
2591 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
2592 if (ret < 0) {
2593 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
2594 "dfw_sst.bin", ret);
2595 return ret;
2596 }
3af36706
VK
2597 }
2598
2599 /*
2600 * The complete tplg for SKL is loaded as index 0, we don't use
2601 * any other index
2602 */
b663a8c5
JK
2603 ret = snd_soc_tplg_component_load(&platform->component,
2604 &skl_tplg_ops, fw, 0);
3af36706
VK
2605 if (ret < 0) {
2606 dev_err(bus->dev, "tplg component load failed%d\n", ret);
c14a82c7 2607 release_firmware(fw);
3af36706
VK
2608 return -EINVAL;
2609 }
2610
2611 skl->resource.max_mcps = SKL_MAX_MCPS;
2612 skl->resource.max_mem = SKL_FW_MAX_MEM;
2613
d8018361 2614 skl->tplg = fw;
287af4f9
JK
2615 ret = skl_tplg_create_pipe_widget_list(platform);
2616 if (ret < 0)
2617 return ret;
d8018361 2618
f0aa94fa
JK
2619 list_for_each_entry(ppl, &skl->ppl_list, node)
2620 skl_tplg_set_pipe_type(skl, ppl->pipe);
d8018361 2621
3af36706
VK
2622 return 0;
2623}