]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - sound/soc/intel/skylake/skl-topology.c
Merge remote-tracking branches 'spi/topic/devprop', 'spi/topic/fsl', 'spi/topic/fsl...
[mirror_ubuntu-bionic-kernel.git] / sound / soc / intel / skylake / skl-topology.c
CommitLineData
e4e2d2f4
JK
1/*
2 * skl-topology.c - Implements Platform component ALSA controls/widget
3 * handlers.
4 *
5 * Copyright (C) 2014-2015 Intel Corp
6 * Author: Jeeja KP <jeeja.kp@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/firmware.h>
22#include <sound/soc.h>
23#include <sound/soc-topology.h>
6277e832 24#include <uapi/sound/snd_sst_tokens.h>
e4e2d2f4
JK
25#include "skl-sst-dsp.h"
26#include "skl-sst-ipc.h"
27#include "skl-topology.h"
28#include "skl.h"
29#include "skl-tplg-interface.h"
6c5768b3
D
30#include "../common/sst-dsp.h"
31#include "../common/sst-dsp-priv.h"
e4e2d2f4 32
f7590d4f
JK
33#define SKL_CH_FIXUP_MASK (1 << 0)
34#define SKL_RATE_FIXUP_MASK (1 << 1)
35#define SKL_FMT_FIXUP_MASK (1 << 2)
6277e832
SN
36#define SKL_IN_DIR_BIT_MASK BIT(0)
37#define SKL_PIN_COUNT_MASK GENMASK(7, 4)
f7590d4f 38
a83e3b4c
VK
39void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
40{
41 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
42
43 switch (caps) {
44 case SKL_D0I3_NONE:
45 d0i3->non_d0i3++;
46 break;
47
48 case SKL_D0I3_STREAMING:
49 d0i3->streaming++;
50 break;
51
52 case SKL_D0I3_NON_STREAMING:
53 d0i3->non_streaming++;
54 break;
55 }
56}
57
58void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
59{
60 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
61
62 switch (caps) {
63 case SKL_D0I3_NONE:
64 d0i3->non_d0i3--;
65 break;
66
67 case SKL_D0I3_STREAMING:
68 d0i3->streaming--;
69 break;
70
71 case SKL_D0I3_NON_STREAMING:
72 d0i3->non_streaming--;
73 break;
74 }
75}
76
e4e2d2f4
JK
77/*
78 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
79 * ignore. This helpers checks if the SKL driver handles this widget type
80 */
81static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
82{
83 switch (w->id) {
84 case snd_soc_dapm_dai_link:
85 case snd_soc_dapm_dai_in:
86 case snd_soc_dapm_aif_in:
87 case snd_soc_dapm_aif_out:
88 case snd_soc_dapm_dai_out:
89 case snd_soc_dapm_switch:
90 return false;
91 default:
92 return true;
93 }
94}
95
96/*
97 * Each pipelines needs memory to be allocated. Check if we have free memory
9ba8ffef 98 * from available pool.
e4e2d2f4 99 */
9ba8ffef 100static bool skl_is_pipe_mem_avail(struct skl *skl,
e4e2d2f4
JK
101 struct skl_module_cfg *mconfig)
102{
103 struct skl_sst *ctx = skl->skl_sst;
104
105 if (skl->resource.mem + mconfig->pipe->memory_pages >
106 skl->resource.max_mem) {
107 dev_err(ctx->dev,
108 "%s: module_id %d instance %d\n", __func__,
109 mconfig->id.module_id,
110 mconfig->id.instance_id);
111 dev_err(ctx->dev,
112 "exceeds ppl memory available %d mem %d\n",
113 skl->resource.max_mem, skl->resource.mem);
114 return false;
9ba8ffef
D
115 } else {
116 return true;
e4e2d2f4 117 }
9ba8ffef 118}
e4e2d2f4 119
9ba8ffef
D
120/*
121 * Add the mem to the mem pool. This is freed when pipe is deleted.
122 * Note: DSP does actual memory management we only keep track for complete
123 * pool
124 */
125static void skl_tplg_alloc_pipe_mem(struct skl *skl,
126 struct skl_module_cfg *mconfig)
127{
e4e2d2f4 128 skl->resource.mem += mconfig->pipe->memory_pages;
e4e2d2f4
JK
129}
130
131/*
132 * Pipeline needs needs DSP CPU resources for computation, this is
133 * quantified in MCPS (Million Clocks Per Second) required for module/pipe
134 *
135 * Each pipelines needs mcps to be allocated. Check if we have mcps for this
9ba8ffef 136 * pipe.
e4e2d2f4 137 */
9ba8ffef
D
138
139static bool skl_is_pipe_mcps_avail(struct skl *skl,
e4e2d2f4
JK
140 struct skl_module_cfg *mconfig)
141{
142 struct skl_sst *ctx = skl->skl_sst;
143
144 if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) {
145 dev_err(ctx->dev,
146 "%s: module_id %d instance %d\n", __func__,
147 mconfig->id.module_id, mconfig->id.instance_id);
148 dev_err(ctx->dev,
7ca42f5a 149 "exceeds ppl mcps available %d > mem %d\n",
e4e2d2f4
JK
150 skl->resource.max_mcps, skl->resource.mcps);
151 return false;
9ba8ffef
D
152 } else {
153 return true;
e4e2d2f4 154 }
9ba8ffef 155}
e4e2d2f4 156
9ba8ffef
D
157static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
158 struct skl_module_cfg *mconfig)
159{
e4e2d2f4 160 skl->resource.mcps += mconfig->mcps;
e4e2d2f4
JK
161}
162
163/*
164 * Free the mcps when tearing down
165 */
166static void
167skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
168{
169 skl->resource.mcps -= mconfig->mcps;
170}
171
172/*
173 * Free the memory when tearing down
174 */
175static void
176skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
177{
178 skl->resource.mem -= mconfig->pipe->memory_pages;
179}
180
f7590d4f
JK
181
182static void skl_dump_mconfig(struct skl_sst *ctx,
183 struct skl_module_cfg *mcfg)
184{
185 dev_dbg(ctx->dev, "Dumping config\n");
186 dev_dbg(ctx->dev, "Input Format:\n");
4cd9899f
HS
187 dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels);
188 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq);
189 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg);
190 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth);
f7590d4f 191 dev_dbg(ctx->dev, "Output Format:\n");
4cd9899f
HS
192 dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels);
193 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq);
194 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth);
195 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
f7590d4f
JK
196}
197
ea5a137d
SP
198static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
199{
200 int slot_map = 0xFFFFFFFF;
201 int start_slot = 0;
202 int i;
203
204 for (i = 0; i < chs; i++) {
205 /*
206 * For 2 channels with starting slot as 0, slot map will
207 * look like 0xFFFFFF10.
208 */
209 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
210 start_slot++;
211 }
212 fmt->ch_map = slot_map;
213}
214
f7590d4f
JK
215static void skl_tplg_update_params(struct skl_module_fmt *fmt,
216 struct skl_pipe_params *params, int fixup)
217{
218 if (fixup & SKL_RATE_FIXUP_MASK)
219 fmt->s_freq = params->s_freq;
ea5a137d 220 if (fixup & SKL_CH_FIXUP_MASK) {
f7590d4f 221 fmt->channels = params->ch;
ea5a137d
SP
222 skl_tplg_update_chmap(fmt, fmt->channels);
223 }
98256f83
JK
224 if (fixup & SKL_FMT_FIXUP_MASK) {
225 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
226
227 /*
228 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
229 * container so update bit depth accordingly
230 */
231 switch (fmt->valid_bit_depth) {
232 case SKL_DEPTH_16BIT:
233 fmt->bit_depth = fmt->valid_bit_depth;
234 break;
235
236 default:
237 fmt->bit_depth = SKL_DEPTH_32BIT;
238 break;
239 }
240 }
241
f7590d4f
JK
242}
243
244/*
245 * A pipeline may have modules which impact the pcm parameters, like SRC,
246 * channel converter, format converter.
247 * We need to calculate the output params by applying the 'fixup'
248 * Topology will tell driver which type of fixup is to be applied by
249 * supplying the fixup mask, so based on that we calculate the output
250 *
251 * Now In FE the pcm hw_params is source/target format. Same is applicable
252 * for BE with its hw_params invoked.
253 * here based on FE, BE pipeline and direction we calculate the input and
254 * outfix and then apply that for a module
255 */
256static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
257 struct skl_pipe_params *params, bool is_fe)
258{
259 int in_fixup, out_fixup;
260 struct skl_module_fmt *in_fmt, *out_fmt;
261
4cd9899f
HS
262 /* Fixups will be applied to pin 0 only */
263 in_fmt = &m_cfg->in_fmt[0];
264 out_fmt = &m_cfg->out_fmt[0];
f7590d4f
JK
265
266 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
267 if (is_fe) {
268 in_fixup = m_cfg->params_fixup;
269 out_fixup = (~m_cfg->converter) &
270 m_cfg->params_fixup;
271 } else {
272 out_fixup = m_cfg->params_fixup;
273 in_fixup = (~m_cfg->converter) &
274 m_cfg->params_fixup;
275 }
276 } else {
277 if (is_fe) {
278 out_fixup = m_cfg->params_fixup;
279 in_fixup = (~m_cfg->converter) &
280 m_cfg->params_fixup;
281 } else {
282 in_fixup = m_cfg->params_fixup;
283 out_fixup = (~m_cfg->converter) &
284 m_cfg->params_fixup;
285 }
286 }
287
288 skl_tplg_update_params(in_fmt, params, in_fixup);
289 skl_tplg_update_params(out_fmt, params, out_fixup);
290}
291
292/*
293 * A module needs input and output buffers, which are dependent upon pcm
294 * params, so once we have calculate params, we need buffer calculation as
295 * well.
296 */
297static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
298 struct skl_module_cfg *mcfg)
299{
300 int multiplier = 1;
4cd9899f 301 struct skl_module_fmt *in_fmt, *out_fmt;
f0c8e1d9 302 int in_rate, out_rate;
4cd9899f
HS
303
304
305 /* Since fixups is applied to pin 0 only, ibs, obs needs
306 * change for pin 0 only
307 */
308 in_fmt = &mcfg->in_fmt[0];
309 out_fmt = &mcfg->out_fmt[0];
f7590d4f
JK
310
311 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
312 multiplier = 5;
f0c8e1d9
SP
313
314 if (in_fmt->s_freq % 1000)
315 in_rate = (in_fmt->s_freq / 1000) + 1;
316 else
317 in_rate = (in_fmt->s_freq / 1000);
318
319 mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
320 (mcfg->in_fmt->bit_depth >> 3) *
321 multiplier;
322
323 if (mcfg->out_fmt->s_freq % 1000)
324 out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
325 else
326 out_rate = (mcfg->out_fmt->s_freq / 1000);
327
328 mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
329 (mcfg->out_fmt->bit_depth >> 3) *
330 multiplier;
f7590d4f
JK
331}
332
db2f586b
SV
333static u8 skl_tplg_be_dev_type(int dev_type)
334{
335 int ret;
336
337 switch (dev_type) {
338 case SKL_DEVICE_BT:
339 ret = NHLT_DEVICE_BT;
340 break;
341
342 case SKL_DEVICE_DMIC:
343 ret = NHLT_DEVICE_DMIC;
344 break;
345
346 case SKL_DEVICE_I2S:
347 ret = NHLT_DEVICE_I2S;
348 break;
349
350 default:
351 ret = NHLT_DEVICE_INVALID;
352 break;
353 }
354
355 return ret;
356}
357
2d1419a3
JK
358static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
359 struct skl_sst *ctx)
360{
361 struct skl_module_cfg *m_cfg = w->priv;
362 int link_type, dir;
363 u32 ch, s_freq, s_fmt;
364 struct nhlt_specific_cfg *cfg;
365 struct skl *skl = get_skl_ctx(ctx->dev);
db2f586b 366 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
2d1419a3
JK
367
368 /* check if we already have blob */
369 if (m_cfg->formats_config.caps_size > 0)
370 return 0;
371
c7c6c736 372 dev_dbg(ctx->dev, "Applying default cfg blob\n");
2d1419a3
JK
373 switch (m_cfg->dev_type) {
374 case SKL_DEVICE_DMIC:
375 link_type = NHLT_LINK_DMIC;
c7c6c736 376 dir = SNDRV_PCM_STREAM_CAPTURE;
2d1419a3
JK
377 s_freq = m_cfg->in_fmt[0].s_freq;
378 s_fmt = m_cfg->in_fmt[0].bit_depth;
379 ch = m_cfg->in_fmt[0].channels;
380 break;
381
382 case SKL_DEVICE_I2S:
383 link_type = NHLT_LINK_SSP;
384 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
c7c6c736 385 dir = SNDRV_PCM_STREAM_PLAYBACK;
2d1419a3
JK
386 s_freq = m_cfg->out_fmt[0].s_freq;
387 s_fmt = m_cfg->out_fmt[0].bit_depth;
388 ch = m_cfg->out_fmt[0].channels;
c7c6c736
JK
389 } else {
390 dir = SNDRV_PCM_STREAM_CAPTURE;
391 s_freq = m_cfg->in_fmt[0].s_freq;
392 s_fmt = m_cfg->in_fmt[0].bit_depth;
393 ch = m_cfg->in_fmt[0].channels;
2d1419a3
JK
394 }
395 break;
396
397 default:
398 return -EINVAL;
399 }
400
401 /* update the blob based on virtual bus_id and default params */
402 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
db2f586b 403 s_fmt, ch, s_freq, dir, dev_type);
2d1419a3
JK
404 if (cfg) {
405 m_cfg->formats_config.caps_size = cfg->size;
406 m_cfg->formats_config.caps = (u32 *) &cfg->caps;
407 } else {
408 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
409 m_cfg->vbus_id, link_type, dir);
410 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
411 ch, s_freq, s_fmt);
412 return -EIO;
413 }
414
415 return 0;
416}
417
f7590d4f
JK
418static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
419 struct skl_sst *ctx)
420{
421 struct skl_module_cfg *m_cfg = w->priv;
422 struct skl_pipe_params *params = m_cfg->pipe->p_params;
423 int p_conn_type = m_cfg->pipe->conn_type;
424 bool is_fe;
425
426 if (!m_cfg->params_fixup)
427 return;
428
429 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
430 w->name);
431
432 skl_dump_mconfig(ctx, m_cfg);
433
434 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
435 is_fe = true;
436 else
437 is_fe = false;
438
439 skl_tplg_update_params_fixup(m_cfg, params, is_fe);
440 skl_tplg_update_buffer_size(ctx, m_cfg);
441
442 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
443 w->name);
444
445 skl_dump_mconfig(ctx, m_cfg);
446}
447
abb74003
JK
448/*
449 * some modules can have multiple params set from user control and
450 * need to be set after module is initialized. If set_param flag is
451 * set module params will be done after module is initialised.
452 */
453static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
454 struct skl_sst *ctx)
455{
456 int i, ret;
457 struct skl_module_cfg *mconfig = w->priv;
458 const struct snd_kcontrol_new *k;
459 struct soc_bytes_ext *sb;
460 struct skl_algo_data *bc;
461 struct skl_specific_cfg *sp_cfg;
462
463 if (mconfig->formats_config.caps_size > 0 &&
4ced1827 464 mconfig->formats_config.set_params == SKL_PARAM_SET) {
abb74003
JK
465 sp_cfg = &mconfig->formats_config;
466 ret = skl_set_module_params(ctx, sp_cfg->caps,
467 sp_cfg->caps_size,
468 sp_cfg->param_id, mconfig);
469 if (ret < 0)
470 return ret;
471 }
472
473 for (i = 0; i < w->num_kcontrols; i++) {
474 k = &w->kcontrol_news[i];
475 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
476 sb = (void *) k->private_value;
477 bc = (struct skl_algo_data *)sb->dobj.private;
478
4ced1827 479 if (bc->set_params == SKL_PARAM_SET) {
abb74003 480 ret = skl_set_module_params(ctx,
0d682104 481 (u32 *)bc->params, bc->size,
abb74003
JK
482 bc->param_id, mconfig);
483 if (ret < 0)
484 return ret;
485 }
486 }
487 }
488
489 return 0;
490}
491
492/*
493 * some module param can set from user control and this is required as
494 * when module is initailzed. if module param is required in init it is
495 * identifed by set_param flag. if set_param flag is not set, then this
496 * parameter needs to set as part of module init.
497 */
498static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
499{
500 const struct snd_kcontrol_new *k;
501 struct soc_bytes_ext *sb;
502 struct skl_algo_data *bc;
503 struct skl_module_cfg *mconfig = w->priv;
504 int i;
505
506 for (i = 0; i < w->num_kcontrols; i++) {
507 k = &w->kcontrol_news[i];
508 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
509 sb = (struct soc_bytes_ext *)k->private_value;
510 bc = (struct skl_algo_data *)sb->dobj.private;
511
4ced1827 512 if (bc->set_params != SKL_PARAM_INIT)
abb74003
JK
513 continue;
514
d1a6fe41 515 mconfig->formats_config.caps = (u32 *)bc->params;
0d682104 516 mconfig->formats_config.caps_size = bc->size;
abb74003
JK
517
518 break;
519 }
520 }
521
522 return 0;
523}
524
bb704a73
JK
525static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe,
526 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
527{
528 switch (mcfg->dev_type) {
529 case SKL_DEVICE_HDAHOST:
530 return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params);
531
532 case SKL_DEVICE_HDALINK:
533 return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params);
534 }
535
536 return 0;
537}
538
e4e2d2f4
JK
539/*
540 * Inside a pipe instance, we can have various modules. These modules need
541 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
542 * skl_init_module() routine, so invoke that for all modules in a pipeline
543 */
544static int
545skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
546{
547 struct skl_pipe_module *w_module;
548 struct snd_soc_dapm_widget *w;
549 struct skl_module_cfg *mconfig;
550 struct skl_sst *ctx = skl->skl_sst;
551 int ret = 0;
552
553 list_for_each_entry(w_module, &pipe->w_list, node) {
554 w = w_module->w;
555 mconfig = w->priv;
556
b7c50555
VK
557 /* check if module ids are populated */
558 if (mconfig->id.module_id < 0) {
a657ae7e
VK
559 dev_err(skl->skl_sst->dev,
560 "module %pUL id not populated\n",
561 (uuid_le *)mconfig->guid);
562 return -EIO;
b7c50555
VK
563 }
564
e4e2d2f4 565 /* check resource available */
9ba8ffef 566 if (!skl_is_pipe_mcps_avail(skl, mconfig))
e4e2d2f4
JK
567 return -ENOMEM;
568
6c5768b3
D
569 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
570 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
571 mconfig->id.module_id, mconfig->guid);
572 if (ret < 0)
573 return ret;
d643678b
JK
574
575 mconfig->m_state = SKL_MODULE_LOADED;
6c5768b3
D
576 }
577
bb704a73
JK
578 /* prepare the DMA if the module is gateway cpr */
579 ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig);
580 if (ret < 0)
581 return ret;
582
2d1419a3
JK
583 /* update blob if blob is null for be with default value */
584 skl_tplg_update_be_blob(w, ctx);
585
f7590d4f
JK
586 /*
587 * apply fix/conversion to module params based on
588 * FE/BE params
589 */
590 skl_tplg_update_module_params(w, ctx);
ef2a352c
D
591 mconfig->id.pvt_id = skl_get_pvt_id(ctx, mconfig);
592 if (mconfig->id.pvt_id < 0)
593 return ret;
abb74003 594 skl_tplg_set_module_init_data(w);
9939a9c3 595 ret = skl_init_module(ctx, mconfig);
ef2a352c
D
596 if (ret < 0) {
597 skl_put_pvt_id(ctx, mconfig);
e4e2d2f4 598 return ret;
ef2a352c 599 }
260eb73a 600 skl_tplg_alloc_pipe_mcps(skl, mconfig);
abb74003 601 ret = skl_tplg_set_module_params(w, ctx);
e4e2d2f4
JK
602 if (ret < 0)
603 return ret;
604 }
605
606 return 0;
607}
d93f8e55 608
6c5768b3
D
609static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
610 struct skl_pipe *pipe)
611{
b0fab9c6 612 int ret;
6c5768b3
D
613 struct skl_pipe_module *w_module = NULL;
614 struct skl_module_cfg *mconfig = NULL;
615
616 list_for_each_entry(w_module, &pipe->w_list, node) {
617 mconfig = w_module->w->priv;
618
d643678b 619 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
b0fab9c6
D
620 mconfig->m_state > SKL_MODULE_UNINIT) {
621 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
6c5768b3 622 mconfig->id.module_id);
b0fab9c6
D
623 if (ret < 0)
624 return -EIO;
625 }
ef2a352c 626 skl_put_pvt_id(ctx, mconfig);
6c5768b3
D
627 }
628
629 /* no modules to unload in this path, so return */
630 return 0;
631}
632
d93f8e55
VK
633/*
634 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
635 * need create the pipeline. So we do following:
636 * - check the resources
637 * - Create the pipeline
638 * - Initialize the modules in pipeline
639 * - finally bind all modules together
640 */
641static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
642 struct skl *skl)
643{
644 int ret;
645 struct skl_module_cfg *mconfig = w->priv;
646 struct skl_pipe_module *w_module;
647 struct skl_pipe *s_pipe = mconfig->pipe;
648 struct skl_module_cfg *src_module = NULL, *dst_module;
649 struct skl_sst *ctx = skl->skl_sst;
650
651 /* check resource available */
9ba8ffef 652 if (!skl_is_pipe_mcps_avail(skl, mconfig))
d93f8e55
VK
653 return -EBUSY;
654
9ba8ffef 655 if (!skl_is_pipe_mem_avail(skl, mconfig))
d93f8e55
VK
656 return -ENOMEM;
657
658 /*
659 * Create a list of modules for pipe.
660 * This list contains modules from source to sink
661 */
662 ret = skl_create_pipeline(ctx, mconfig->pipe);
663 if (ret < 0)
664 return ret;
665
260eb73a
D
666 skl_tplg_alloc_pipe_mem(skl, mconfig);
667 skl_tplg_alloc_pipe_mcps(skl, mconfig);
d93f8e55
VK
668
669 /* Init all pipe modules from source to sink */
670 ret = skl_tplg_init_pipe_modules(skl, s_pipe);
671 if (ret < 0)
672 return ret;
673
674 /* Bind modules from source to sink */
675 list_for_each_entry(w_module, &s_pipe->w_list, node) {
676 dst_module = w_module->w->priv;
677
678 if (src_module == NULL) {
679 src_module = dst_module;
680 continue;
681 }
682
683 ret = skl_bind_modules(ctx, src_module, dst_module);
684 if (ret < 0)
685 return ret;
686
687 src_module = dst_module;
688 }
689
690 return 0;
691}
692
5e8f0ee4
D
693static int skl_fill_sink_instance_id(struct skl_sst *ctx,
694 struct skl_algo_data *alg_data)
695{
696 struct skl_kpb_params *params = (struct skl_kpb_params *)alg_data->params;
697 struct skl_mod_inst_map *inst;
698 int i, pvt_id;
699
700 inst = params->map;
701
702 for (i = 0; i < params->num_modules; i++) {
703 pvt_id = skl_get_pvt_instance_id_map(ctx,
704 inst->mod_id, inst->inst_id);
705 if (pvt_id < 0)
706 return -EINVAL;
707 inst->inst_id = pvt_id;
708 inst++;
709 }
710 return 0;
711}
712
cc6a4044
JK
713/*
714 * Some modules require params to be set after the module is bound to
715 * all pins connected.
716 *
717 * The module provider initializes set_param flag for such modules and we
718 * send params after binding
719 */
720static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
721 struct skl_module_cfg *mcfg, struct skl_sst *ctx)
722{
723 int i, ret;
724 struct skl_module_cfg *mconfig = w->priv;
725 const struct snd_kcontrol_new *k;
726 struct soc_bytes_ext *sb;
727 struct skl_algo_data *bc;
728 struct skl_specific_cfg *sp_cfg;
729
730 /*
731 * check all out/in pins are in bind state.
732 * if so set the module param
733 */
734 for (i = 0; i < mcfg->max_out_queue; i++) {
735 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
736 return 0;
737 }
738
739 for (i = 0; i < mcfg->max_in_queue; i++) {
740 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
741 return 0;
742 }
743
744 if (mconfig->formats_config.caps_size > 0 &&
745 mconfig->formats_config.set_params == SKL_PARAM_BIND) {
746 sp_cfg = &mconfig->formats_config;
747 ret = skl_set_module_params(ctx, sp_cfg->caps,
748 sp_cfg->caps_size,
749 sp_cfg->param_id, mconfig);
750 if (ret < 0)
751 return ret;
752 }
753
754 for (i = 0; i < w->num_kcontrols; i++) {
755 k = &w->kcontrol_news[i];
756 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
757 sb = (void *) k->private_value;
758 bc = (struct skl_algo_data *)sb->dobj.private;
759
760 if (bc->set_params == SKL_PARAM_BIND) {
5e8f0ee4
D
761 if (mconfig->m_type == SKL_MODULE_TYPE_KPB)
762 skl_fill_sink_instance_id(ctx, bc);
cc6a4044
JK
763 ret = skl_set_module_params(ctx,
764 (u32 *)bc->params, bc->max,
765 bc->param_id, mconfig);
766 if (ret < 0)
767 return ret;
768 }
769 }
770 }
771
772 return 0;
773}
774
8724ff17
JK
775static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
776 struct skl *skl,
6bd4cf85 777 struct snd_soc_dapm_widget *src_w,
8724ff17 778 struct skl_module_cfg *src_mconfig)
d93f8e55
VK
779{
780 struct snd_soc_dapm_path *p;
0ed95d76 781 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
8724ff17 782 struct skl_module_cfg *sink_mconfig;
d93f8e55 783 struct skl_sst *ctx = skl->skl_sst;
8724ff17 784 int ret;
d93f8e55 785
8724ff17 786 snd_soc_dapm_widget_for_each_sink_path(w, p) {
d93f8e55
VK
787 if (!p->connect)
788 continue;
789
790 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
791 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
792
0ed95d76 793 next_sink = p->sink;
6bd4cf85
JK
794
795 if (!is_skl_dsp_widget_type(p->sink))
796 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
797
d93f8e55
VK
798 /*
799 * here we will check widgets in sink pipelines, so that
800 * can be any widgets type and we are only interested if
801 * they are ones used for SKL so check that first
802 */
803 if ((p->sink->priv != NULL) &&
804 is_skl_dsp_widget_type(p->sink)) {
805
806 sink = p->sink;
d93f8e55
VK
807 sink_mconfig = sink->priv;
808
cc6a4044
JK
809 if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
810 sink_mconfig->m_state == SKL_MODULE_UNINIT)
811 continue;
812
d93f8e55
VK
813 /* Bind source to sink, mixin is always source */
814 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
815 if (ret)
816 return ret;
817
cc6a4044
JK
818 /* set module params after bind */
819 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
820 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
821
d93f8e55
VK
822 /* Start sinks pipe first */
823 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
d1730c3d
JK
824 if (sink_mconfig->pipe->conn_type !=
825 SKL_PIPE_CONN_TYPE_FE)
826 ret = skl_run_pipe(ctx,
827 sink_mconfig->pipe);
d93f8e55
VK
828 if (ret)
829 return ret;
830 }
d93f8e55
VK
831 }
832 }
833
8724ff17 834 if (!sink)
6bd4cf85 835 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
8724ff17
JK
836
837 return 0;
838}
839
840/*
841 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
842 * we need to do following:
843 * - Bind to sink pipeline
844 * Since the sink pipes can be running and we don't get mixer event on
845 * connect for already running mixer, we need to find the sink pipes
846 * here and bind to them. This way dynamic connect works.
847 * - Start sink pipeline, if not running
848 * - Then run current pipe
849 */
850static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
851 struct skl *skl)
852{
853 struct skl_module_cfg *src_mconfig;
854 struct skl_sst *ctx = skl->skl_sst;
855 int ret = 0;
856
857 src_mconfig = w->priv;
858
859 /*
860 * find which sink it is connected to, bind with the sink,
861 * if sink is not started, start sink pipe first, then start
862 * this pipe
863 */
6bd4cf85 864 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
d93f8e55
VK
865 if (ret)
866 return ret;
867
d93f8e55 868 /* Start source pipe last after starting all sinks */
d1730c3d
JK
869 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
870 return skl_run_pipe(ctx, src_mconfig->pipe);
d93f8e55
VK
871
872 return 0;
873}
874
8724ff17
JK
875static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
876 struct snd_soc_dapm_widget *w, struct skl *skl)
877{
878 struct snd_soc_dapm_path *p;
879 struct snd_soc_dapm_widget *src_w = NULL;
880 struct skl_sst *ctx = skl->skl_sst;
881
882 snd_soc_dapm_widget_for_each_source_path(w, p) {
883 src_w = p->source;
884 if (!p->connect)
885 continue;
886
887 dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
888 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
889
890 /*
891 * here we will check widgets in sink pipelines, so that can
892 * be any widgets type and we are only interested if they are
893 * ones used for SKL so check that first
894 */
895 if ((p->source->priv != NULL) &&
896 is_skl_dsp_widget_type(p->source)) {
897 return p->source;
898 }
899 }
900
901 if (src_w != NULL)
902 return skl_get_src_dsp_widget(src_w, skl);
903
904 return NULL;
905}
906
d93f8e55
VK
907/*
908 * in the Post-PMU event of mixer we need to do following:
909 * - Check if this pipe is running
910 * - if not, then
911 * - bind this pipeline to its source pipeline
912 * if source pipe is already running, this means it is a dynamic
913 * connection and we need to bind only to that pipe
914 * - start this pipeline
915 */
916static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
917 struct skl *skl)
918{
919 int ret = 0;
d93f8e55
VK
920 struct snd_soc_dapm_widget *source, *sink;
921 struct skl_module_cfg *src_mconfig, *sink_mconfig;
922 struct skl_sst *ctx = skl->skl_sst;
923 int src_pipe_started = 0;
924
925 sink = w;
926 sink_mconfig = sink->priv;
927
928 /*
929 * If source pipe is already started, that means source is driving
930 * one more sink before this sink got connected, Since source is
931 * started, bind this sink to source and start this pipe.
932 */
8724ff17
JK
933 source = skl_get_src_dsp_widget(w, skl);
934 if (source != NULL) {
935 src_mconfig = source->priv;
936 sink_mconfig = sink->priv;
937 src_pipe_started = 1;
d93f8e55
VK
938
939 /*
8724ff17
JK
940 * check pipe state, then no need to bind or start the
941 * pipe
d93f8e55 942 */
8724ff17
JK
943 if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
944 src_pipe_started = 0;
d93f8e55
VK
945 }
946
947 if (src_pipe_started) {
948 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
949 if (ret)
950 return ret;
951
cc6a4044
JK
952 /* set module params after bind */
953 skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
954 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
955
d1730c3d
JK
956 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
957 ret = skl_run_pipe(ctx, sink_mconfig->pipe);
d93f8e55
VK
958 }
959
960 return ret;
961}
962
963/*
964 * in the Pre-PMD event of mixer we need to do following:
965 * - Stop the pipe
966 * - find the source connections and remove that from dapm_path_list
967 * - unbind with source pipelines if still connected
968 */
969static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
970 struct skl *skl)
971{
d93f8e55 972 struct skl_module_cfg *src_mconfig, *sink_mconfig;
ce1b5551 973 int ret = 0, i;
d93f8e55
VK
974 struct skl_sst *ctx = skl->skl_sst;
975
ce1b5551 976 sink_mconfig = w->priv;
d93f8e55
VK
977
978 /* Stop the pipe */
979 ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
980 if (ret)
981 return ret;
982
ce1b5551
JK
983 for (i = 0; i < sink_mconfig->max_in_queue; i++) {
984 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
985 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
986 if (!src_mconfig)
987 continue;
988 /*
989 * If path_found == 1, that means pmd for source
990 * pipe has not occurred, source is connected to
991 * some other sink. so its responsibility of sink
992 * to unbind itself from source.
993 */
994 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
995 if (ret < 0)
996 return ret;
d93f8e55 997
ce1b5551
JK
998 ret = skl_unbind_modules(ctx,
999 src_mconfig, sink_mconfig);
d93f8e55 1000 }
d93f8e55
VK
1001 }
1002
1003 return ret;
1004}
1005
1006/*
1007 * in the Post-PMD event of mixer we need to do following:
1008 * - Free the mcps used
1009 * - Free the mem used
1010 * - Unbind the modules within the pipeline
1011 * - Delete the pipeline (modules are not required to be explicitly
1012 * deleted, pipeline delete is enough here
1013 */
1014static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1015 struct skl *skl)
1016{
1017 struct skl_module_cfg *mconfig = w->priv;
1018 struct skl_pipe_module *w_module;
1019 struct skl_module_cfg *src_module = NULL, *dst_module;
1020 struct skl_sst *ctx = skl->skl_sst;
1021 struct skl_pipe *s_pipe = mconfig->pipe;
d93f8e55 1022
260eb73a
D
1023 if (s_pipe->state == SKL_PIPE_INVALID)
1024 return -EINVAL;
1025
d93f8e55 1026 skl_tplg_free_pipe_mcps(skl, mconfig);
65976878 1027 skl_tplg_free_pipe_mem(skl, mconfig);
d93f8e55
VK
1028
1029 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1030 dst_module = w_module->w->priv;
1031
260eb73a
D
1032 if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
1033 skl_tplg_free_pipe_mcps(skl, dst_module);
d93f8e55
VK
1034 if (src_module == NULL) {
1035 src_module = dst_module;
1036 continue;
1037 }
1038
7ca42f5a 1039 skl_unbind_modules(ctx, src_module, dst_module);
d93f8e55
VK
1040 src_module = dst_module;
1041 }
1042
547cafa3 1043 skl_delete_pipe(ctx, mconfig->pipe);
d93f8e55 1044
6c5768b3 1045 return skl_tplg_unload_pipe_modules(ctx, s_pipe);
d93f8e55
VK
1046}
1047
1048/*
1049 * in the Post-PMD event of PGA we need to do following:
1050 * - Free the mcps used
1051 * - Stop the pipeline
1052 * - In source pipe is connected, unbind with source pipelines
1053 */
1054static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1055 struct skl *skl)
1056{
d93f8e55 1057 struct skl_module_cfg *src_mconfig, *sink_mconfig;
ce1b5551 1058 int ret = 0, i;
d93f8e55
VK
1059 struct skl_sst *ctx = skl->skl_sst;
1060
ce1b5551 1061 src_mconfig = w->priv;
d93f8e55 1062
d93f8e55
VK
1063 /* Stop the pipe since this is a mixin module */
1064 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
1065 if (ret)
1066 return ret;
1067
ce1b5551
JK
1068 for (i = 0; i < src_mconfig->max_out_queue; i++) {
1069 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1070 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1071 if (!sink_mconfig)
1072 continue;
1073 /*
1074 * This is a connecter and if path is found that means
1075 * unbind between source and sink has not happened yet
1076 */
ce1b5551
JK
1077 ret = skl_unbind_modules(ctx, src_mconfig,
1078 sink_mconfig);
d93f8e55
VK
1079 }
1080 }
1081
d93f8e55
VK
1082 return ret;
1083}
1084
1085/*
1086 * In modelling, we assume there will be ONLY one mixer in a pipeline. If
1087 * mixer is not required then it is treated as static mixer aka vmixer with
1088 * a hard path to source module
1089 * So we don't need to check if source is started or not as hard path puts
1090 * dependency on each other
1091 */
1092static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w,
1093 struct snd_kcontrol *k, int event)
1094{
1095 struct snd_soc_dapm_context *dapm = w->dapm;
1096 struct skl *skl = get_skl_ctx(dapm->dev);
1097
1098 switch (event) {
1099 case SND_SOC_DAPM_PRE_PMU:
1100 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1101
de1fedf2
JK
1102 case SND_SOC_DAPM_POST_PMU:
1103 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1104
1105 case SND_SOC_DAPM_PRE_PMD:
1106 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1107
d93f8e55
VK
1108 case SND_SOC_DAPM_POST_PMD:
1109 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1110 }
1111
1112 return 0;
1113}
1114
1115/*
1116 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1117 * second one is required that is created as another pipe entity.
1118 * The mixer is responsible for pipe management and represent a pipeline
1119 * instance
1120 */
1121static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1122 struct snd_kcontrol *k, int event)
1123{
1124 struct snd_soc_dapm_context *dapm = w->dapm;
1125 struct skl *skl = get_skl_ctx(dapm->dev);
1126
1127 switch (event) {
1128 case SND_SOC_DAPM_PRE_PMU:
1129 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1130
1131 case SND_SOC_DAPM_POST_PMU:
1132 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1133
1134 case SND_SOC_DAPM_PRE_PMD:
1135 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1136
1137 case SND_SOC_DAPM_POST_PMD:
1138 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1139 }
1140
1141 return 0;
1142}
1143
1144/*
1145 * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1146 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1147 * the sink when it is running (two FE to one BE or one FE to two BE)
1148 * scenarios
1149 */
1150static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1151 struct snd_kcontrol *k, int event)
1152
1153{
1154 struct snd_soc_dapm_context *dapm = w->dapm;
1155 struct skl *skl = get_skl_ctx(dapm->dev);
1156
1157 switch (event) {
1158 case SND_SOC_DAPM_PRE_PMU:
1159 return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1160
1161 case SND_SOC_DAPM_POST_PMD:
1162 return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1163 }
1164
1165 return 0;
1166}
cfb0a873 1167
140adfba
JK
1168static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1169 unsigned int __user *data, unsigned int size)
1170{
1171 struct soc_bytes_ext *sb =
1172 (struct soc_bytes_ext *)kcontrol->private_value;
1173 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
7d9f2911
OA
1174 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1175 struct skl_module_cfg *mconfig = w->priv;
1176 struct skl *skl = get_skl_ctx(w->dapm->dev);
1177
1178 if (w->power)
1179 skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
0d682104 1180 bc->size, bc->param_id, mconfig);
140adfba 1181
41556f68
VK
1182 /* decrement size for TLV header */
1183 size -= 2 * sizeof(u32);
1184
1185 /* check size as we don't want to send kernel data */
1186 if (size > bc->max)
1187 size = bc->max;
1188
140adfba
JK
1189 if (bc->params) {
1190 if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1191 return -EFAULT;
e8bc3c99 1192 if (copy_to_user(data + 1, &size, sizeof(u32)))
140adfba 1193 return -EFAULT;
e8bc3c99 1194 if (copy_to_user(data + 2, bc->params, size))
140adfba
JK
1195 return -EFAULT;
1196 }
1197
1198 return 0;
1199}
1200
1201#define SKL_PARAM_VENDOR_ID 0xff
1202
1203static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1204 const unsigned int __user *data, unsigned int size)
1205{
1206 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1207 struct skl_module_cfg *mconfig = w->priv;
1208 struct soc_bytes_ext *sb =
1209 (struct soc_bytes_ext *)kcontrol->private_value;
1210 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1211 struct skl *skl = get_skl_ctx(w->dapm->dev);
1212
1213 if (ac->params) {
0d682104
D
1214 if (size > ac->max)
1215 return -EINVAL;
1216
1217 ac->size = size;
140adfba
JK
1218 /*
1219 * if the param_is is of type Vendor, firmware expects actual
1220 * parameter id and size from the control.
1221 */
1222 if (ac->param_id == SKL_PARAM_VENDOR_ID) {
1223 if (copy_from_user(ac->params, data, size))
1224 return -EFAULT;
1225 } else {
1226 if (copy_from_user(ac->params,
65b4bcb8 1227 data + 2, size))
140adfba
JK
1228 return -EFAULT;
1229 }
1230
1231 if (w->power)
1232 return skl_set_module_params(skl->skl_sst,
0d682104 1233 (u32 *)ac->params, ac->size,
140adfba
JK
1234 ac->param_id, mconfig);
1235 }
1236
1237 return 0;
1238}
1239
8871dcb9
JK
1240/*
1241 * Fill the dma id for host and link. In case of passthrough
1242 * pipeline, this will both host and link in the same
1243 * pipeline, so need to copy the link and host based on dev_type
1244 */
1245static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1246 struct skl_pipe_params *params)
1247{
1248 struct skl_pipe *pipe = mcfg->pipe;
1249
1250 if (pipe->passthru) {
1251 switch (mcfg->dev_type) {
1252 case SKL_DEVICE_HDALINK:
1253 pipe->p_params->link_dma_id = params->link_dma_id;
12c3be0e 1254 pipe->p_params->link_index = params->link_index;
8871dcb9
JK
1255 break;
1256
1257 case SKL_DEVICE_HDAHOST:
1258 pipe->p_params->host_dma_id = params->host_dma_id;
1259 break;
1260
1261 default:
1262 break;
1263 }
1264 pipe->p_params->s_fmt = params->s_fmt;
1265 pipe->p_params->ch = params->ch;
1266 pipe->p_params->s_freq = params->s_freq;
1267 pipe->p_params->stream = params->stream;
12c3be0e 1268 pipe->p_params->format = params->format;
8871dcb9
JK
1269
1270 } else {
1271 memcpy(pipe->p_params, params, sizeof(*params));
1272 }
1273}
1274
cfb0a873
VK
1275/*
1276 * The FE params are passed by hw_params of the DAI.
1277 * On hw_params, the params are stored in Gateway module of the FE and we
1278 * need to calculate the format in DSP module configuration, that
1279 * conversion is done here
1280 */
1281int skl_tplg_update_pipe_params(struct device *dev,
1282 struct skl_module_cfg *mconfig,
1283 struct skl_pipe_params *params)
1284{
cfb0a873
VK
1285 struct skl_module_fmt *format = NULL;
1286
8871dcb9 1287 skl_tplg_fill_dma_id(mconfig, params);
cfb0a873
VK
1288
1289 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
4cd9899f 1290 format = &mconfig->in_fmt[0];
cfb0a873 1291 else
4cd9899f 1292 format = &mconfig->out_fmt[0];
cfb0a873
VK
1293
1294 /* set the hw_params */
1295 format->s_freq = params->s_freq;
1296 format->channels = params->ch;
1297 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1298
1299 /*
1300 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1301 * container so update bit depth accordingly
1302 */
1303 switch (format->valid_bit_depth) {
1304 case SKL_DEPTH_16BIT:
1305 format->bit_depth = format->valid_bit_depth;
1306 break;
1307
1308 case SKL_DEPTH_24BIT:
6654f39e 1309 case SKL_DEPTH_32BIT:
cfb0a873
VK
1310 format->bit_depth = SKL_DEPTH_32BIT;
1311 break;
1312
1313 default:
1314 dev_err(dev, "Invalid bit depth %x for pipe\n",
1315 format->valid_bit_depth);
1316 return -EINVAL;
1317 }
1318
1319 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1320 mconfig->ibs = (format->s_freq / 1000) *
1321 (format->channels) *
1322 (format->bit_depth >> 3);
1323 } else {
1324 mconfig->obs = (format->s_freq / 1000) *
1325 (format->channels) *
1326 (format->bit_depth >> 3);
1327 }
1328
1329 return 0;
1330}
1331
1332/*
1333 * Query the module config for the FE DAI
1334 * This is used to find the hw_params set for that DAI and apply to FE
1335 * pipeline
1336 */
1337struct skl_module_cfg *
1338skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1339{
1340 struct snd_soc_dapm_widget *w;
1341 struct snd_soc_dapm_path *p = NULL;
1342
1343 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1344 w = dai->playback_widget;
f0900eb2 1345 snd_soc_dapm_widget_for_each_sink_path(w, p) {
cfb0a873 1346 if (p->connect && p->sink->power &&
a28f51db 1347 !is_skl_dsp_widget_type(p->sink))
cfb0a873
VK
1348 continue;
1349
1350 if (p->sink->priv) {
1351 dev_dbg(dai->dev, "set params for %s\n",
1352 p->sink->name);
1353 return p->sink->priv;
1354 }
1355 }
1356 } else {
1357 w = dai->capture_widget;
f0900eb2 1358 snd_soc_dapm_widget_for_each_source_path(w, p) {
cfb0a873 1359 if (p->connect && p->source->power &&
a28f51db 1360 !is_skl_dsp_widget_type(p->source))
cfb0a873
VK
1361 continue;
1362
1363 if (p->source->priv) {
1364 dev_dbg(dai->dev, "set params for %s\n",
1365 p->source->name);
1366 return p->source->priv;
1367 }
1368 }
1369 }
1370
1371 return NULL;
1372}
1373
718a42b5
D
1374static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1375 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1376{
1377 struct snd_soc_dapm_path *p;
1378 struct skl_module_cfg *mconfig = NULL;
1379
1380 snd_soc_dapm_widget_for_each_source_path(w, p) {
1381 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1382 if (p->connect &&
1383 (p->sink->id == snd_soc_dapm_aif_out) &&
1384 p->source->priv) {
1385 mconfig = p->source->priv;
1386 return mconfig;
1387 }
1388 mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1389 if (mconfig)
1390 return mconfig;
1391 }
1392 }
1393 return mconfig;
1394}
1395
1396static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1397 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1398{
1399 struct snd_soc_dapm_path *p;
1400 struct skl_module_cfg *mconfig = NULL;
1401
1402 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1403 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1404 if (p->connect &&
1405 (p->source->id == snd_soc_dapm_aif_in) &&
1406 p->sink->priv) {
1407 mconfig = p->sink->priv;
1408 return mconfig;
1409 }
1410 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1411 if (mconfig)
1412 return mconfig;
1413 }
1414 }
1415 return mconfig;
1416}
1417
1418struct skl_module_cfg *
1419skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1420{
1421 struct snd_soc_dapm_widget *w;
1422 struct skl_module_cfg *mconfig;
1423
1424 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1425 w = dai->playback_widget;
1426 mconfig = skl_get_mconfig_pb_cpr(dai, w);
1427 } else {
1428 w = dai->capture_widget;
1429 mconfig = skl_get_mconfig_cap_cpr(dai, w);
1430 }
1431 return mconfig;
1432}
1433
cfb0a873
VK
1434static u8 skl_tplg_be_link_type(int dev_type)
1435{
1436 int ret;
1437
1438 switch (dev_type) {
1439 case SKL_DEVICE_BT:
1440 ret = NHLT_LINK_SSP;
1441 break;
1442
1443 case SKL_DEVICE_DMIC:
1444 ret = NHLT_LINK_DMIC;
1445 break;
1446
1447 case SKL_DEVICE_I2S:
1448 ret = NHLT_LINK_SSP;
1449 break;
1450
1451 case SKL_DEVICE_HDALINK:
1452 ret = NHLT_LINK_HDA;
1453 break;
1454
1455 default:
1456 ret = NHLT_LINK_INVALID;
1457 break;
1458 }
1459
1460 return ret;
1461}
1462
1463/*
1464 * Fill the BE gateway parameters
1465 * The BE gateway expects a blob of parameters which are kept in the ACPI
1466 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1467 * The port can have multiple settings so pick based on the PCM
1468 * parameters
1469 */
1470static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1471 struct skl_module_cfg *mconfig,
1472 struct skl_pipe_params *params)
1473{
cfb0a873
VK
1474 struct nhlt_specific_cfg *cfg;
1475 struct skl *skl = get_skl_ctx(dai->dev);
1476 int link_type = skl_tplg_be_link_type(mconfig->dev_type);
db2f586b 1477 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
cfb0a873 1478
8871dcb9 1479 skl_tplg_fill_dma_id(mconfig, params);
cfb0a873 1480
b30c275e
JK
1481 if (link_type == NHLT_LINK_HDA)
1482 return 0;
1483
cfb0a873
VK
1484 /* update the blob based on virtual bus_id*/
1485 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1486 params->s_fmt, params->ch,
db2f586b
SV
1487 params->s_freq, params->stream,
1488 dev_type);
cfb0a873
VK
1489 if (cfg) {
1490 mconfig->formats_config.caps_size = cfg->size;
bc03281a 1491 mconfig->formats_config.caps = (u32 *) &cfg->caps;
cfb0a873
VK
1492 } else {
1493 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1494 mconfig->vbus_id, link_type,
1495 params->stream);
1496 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1497 params->ch, params->s_freq, params->s_fmt);
1498 return -EINVAL;
1499 }
1500
1501 return 0;
1502}
1503
1504static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1505 struct snd_soc_dapm_widget *w,
1506 struct skl_pipe_params *params)
1507{
1508 struct snd_soc_dapm_path *p;
4d8adccb 1509 int ret = -EIO;
cfb0a873 1510
f0900eb2 1511 snd_soc_dapm_widget_for_each_source_path(w, p) {
cfb0a873
VK
1512 if (p->connect && is_skl_dsp_widget_type(p->source) &&
1513 p->source->priv) {
1514
9a03cb49
JK
1515 ret = skl_tplg_be_fill_pipe_params(dai,
1516 p->source->priv, params);
1517 if (ret < 0)
1518 return ret;
cfb0a873 1519 } else {
9a03cb49
JK
1520 ret = skl_tplg_be_set_src_pipe_params(dai,
1521 p->source, params);
4d8adccb
SP
1522 if (ret < 0)
1523 return ret;
cfb0a873
VK
1524 }
1525 }
1526
4d8adccb 1527 return ret;
cfb0a873
VK
1528}
1529
1530static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1531 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1532{
1533 struct snd_soc_dapm_path *p = NULL;
4d8adccb 1534 int ret = -EIO;
cfb0a873 1535
f0900eb2 1536 snd_soc_dapm_widget_for_each_sink_path(w, p) {
cfb0a873
VK
1537 if (p->connect && is_skl_dsp_widget_type(p->sink) &&
1538 p->sink->priv) {
1539
9a03cb49
JK
1540 ret = skl_tplg_be_fill_pipe_params(dai,
1541 p->sink->priv, params);
1542 if (ret < 0)
1543 return ret;
cfb0a873 1544 } else {
4d8adccb 1545 ret = skl_tplg_be_set_sink_pipe_params(
cfb0a873 1546 dai, p->sink, params);
4d8adccb
SP
1547 if (ret < 0)
1548 return ret;
cfb0a873
VK
1549 }
1550 }
1551
4d8adccb 1552 return ret;
cfb0a873
VK
1553}
1554
1555/*
1556 * BE hw_params can be a source parameters (capture) or sink parameters
1557 * (playback). Based on sink and source we need to either find the source
1558 * list or the sink list and set the pipeline parameters
1559 */
1560int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1561 struct skl_pipe_params *params)
1562{
1563 struct snd_soc_dapm_widget *w;
1564
1565 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1566 w = dai->playback_widget;
1567
1568 return skl_tplg_be_set_src_pipe_params(dai, w, params);
1569
1570 } else {
1571 w = dai->capture_widget;
1572
1573 return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1574 }
1575
1576 return 0;
1577}
3af36706
VK
1578
1579static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1580 {SKL_MIXER_EVENT, skl_tplg_mixer_event},
1581 {SKL_VMIXER_EVENT, skl_tplg_vmixer_event},
1582 {SKL_PGA_EVENT, skl_tplg_pga_event},
1583};
1584
140adfba
JK
1585static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1586 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1587 skl_tplg_tlv_control_set},
1588};
1589
6277e832
SN
1590static int skl_tplg_fill_pipe_tkn(struct device *dev,
1591 struct skl_pipe *pipe, u32 tkn,
1592 u32 tkn_val)
3af36706 1593{
3af36706 1594
6277e832
SN
1595 switch (tkn) {
1596 case SKL_TKN_U32_PIPE_CONN_TYPE:
1597 pipe->conn_type = tkn_val;
1598 break;
1599
1600 case SKL_TKN_U32_PIPE_PRIORITY:
1601 pipe->pipe_priority = tkn_val;
1602 break;
1603
1604 case SKL_TKN_U32_PIPE_MEM_PGS:
1605 pipe->memory_pages = tkn_val;
1606 break;
1607
8a0cb236
VK
1608 case SKL_TKN_U32_PMODE:
1609 pipe->lp_mode = tkn_val;
1610 break;
1611
6277e832
SN
1612 default:
1613 dev_err(dev, "Token not handled %d\n", tkn);
1614 return -EINVAL;
3af36706 1615 }
6277e832
SN
1616
1617 return 0;
3af36706
VK
1618}
1619
1620/*
6277e832
SN
1621 * Add pipeline by parsing the relevant tokens
1622 * Return an existing pipe if the pipe already exists.
3af36706 1623 */
6277e832
SN
1624static int skl_tplg_add_pipe(struct device *dev,
1625 struct skl_module_cfg *mconfig, struct skl *skl,
1626 struct snd_soc_tplg_vendor_value_elem *tkn_elem)
3af36706
VK
1627{
1628 struct skl_pipeline *ppl;
1629 struct skl_pipe *pipe;
1630 struct skl_pipe_params *params;
1631
1632 list_for_each_entry(ppl, &skl->ppl_list, node) {
6277e832
SN
1633 if (ppl->pipe->ppl_id == tkn_elem->value) {
1634 mconfig->pipe = ppl->pipe;
1635 return EEXIST;
1636 }
3af36706
VK
1637 }
1638
1639 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
1640 if (!ppl)
6277e832 1641 return -ENOMEM;
3af36706
VK
1642
1643 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
1644 if (!pipe)
6277e832 1645 return -ENOMEM;
3af36706
VK
1646
1647 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
1648 if (!params)
6277e832 1649 return -ENOMEM;
3af36706 1650
3af36706 1651 pipe->p_params = params;
6277e832 1652 pipe->ppl_id = tkn_elem->value;
3af36706
VK
1653 INIT_LIST_HEAD(&pipe->w_list);
1654
1655 ppl->pipe = pipe;
1656 list_add(&ppl->node, &skl->ppl_list);
1657
6277e832
SN
1658 mconfig->pipe = pipe;
1659 mconfig->pipe->state = SKL_PIPE_INVALID;
1660
1661 return 0;
1662}
1663
1664static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
1665 struct skl_module_pin *m_pin,
1666 int pin_index, u32 value)
1667{
1668 switch (tkn) {
1669 case SKL_TKN_U32_PIN_MOD_ID:
1670 m_pin[pin_index].id.module_id = value;
1671 break;
1672
1673 case SKL_TKN_U32_PIN_INST_ID:
1674 m_pin[pin_index].id.instance_id = value;
1675 break;
1676
1677 default:
1678 dev_err(dev, "%d Not a pin token\n", value);
1679 return -EINVAL;
1680 }
1681
1682 return 0;
1683}
1684
1685/*
1686 * Parse for pin config specific tokens to fill up the
1687 * module private data
1688 */
1689static int skl_tplg_fill_pins_info(struct device *dev,
1690 struct skl_module_cfg *mconfig,
1691 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1692 int dir, int pin_count)
1693{
1694 int ret;
1695 struct skl_module_pin *m_pin;
1696
1697 switch (dir) {
1698 case SKL_DIR_IN:
1699 m_pin = mconfig->m_in_pin;
1700 break;
1701
1702 case SKL_DIR_OUT:
1703 m_pin = mconfig->m_out_pin;
1704 break;
1705
1706 default:
ecd286a9 1707 dev_err(dev, "Invalid direction value\n");
6277e832
SN
1708 return -EINVAL;
1709 }
1710
1711 ret = skl_tplg_fill_pin(dev, tkn_elem->token,
1712 m_pin, pin_count, tkn_elem->value);
1713
1714 if (ret < 0)
1715 return ret;
1716
1717 m_pin[pin_count].in_use = false;
1718 m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
1719
1720 return 0;
3af36706
VK
1721}
1722
6277e832
SN
1723/*
1724 * Fill up input/output module config format based
1725 * on the direction
1726 */
1727static int skl_tplg_fill_fmt(struct device *dev,
1728 struct skl_module_cfg *mconfig, u32 tkn,
1729 u32 value, u32 dir, u32 pin_count)
1730{
1731 struct skl_module_fmt *dst_fmt;
1732
1733 switch (dir) {
1734 case SKL_DIR_IN:
1735 dst_fmt = mconfig->in_fmt;
1736 dst_fmt += pin_count;
1737 break;
1738
1739 case SKL_DIR_OUT:
1740 dst_fmt = mconfig->out_fmt;
1741 dst_fmt += pin_count;
1742 break;
1743
1744 default:
ecd286a9 1745 dev_err(dev, "Invalid direction value\n");
6277e832
SN
1746 return -EINVAL;
1747 }
1748
1749 switch (tkn) {
1750 case SKL_TKN_U32_FMT_CH:
1751 dst_fmt->channels = value;
1752 break;
1753
1754 case SKL_TKN_U32_FMT_FREQ:
1755 dst_fmt->s_freq = value;
1756 break;
1757
1758 case SKL_TKN_U32_FMT_BIT_DEPTH:
1759 dst_fmt->bit_depth = value;
1760 break;
1761
1762 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1763 dst_fmt->valid_bit_depth = value;
1764 break;
1765
1766 case SKL_TKN_U32_FMT_CH_CONFIG:
1767 dst_fmt->ch_cfg = value;
1768 break;
1769
1770 case SKL_TKN_U32_FMT_INTERLEAVE:
1771 dst_fmt->interleaving_style = value;
1772 break;
1773
1774 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1775 dst_fmt->sample_type = value;
1776 break;
1777
1778 case SKL_TKN_U32_FMT_CH_MAP:
1779 dst_fmt->ch_map = value;
1780 break;
1781
1782 default:
ecd286a9 1783 dev_err(dev, "Invalid token %d\n", tkn);
6277e832
SN
1784 return -EINVAL;
1785 }
1786
1787 return 0;
1788}
1789
1790static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
1791 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
1792{
1793 if (uuid_tkn->token == SKL_TKN_UUID)
1794 memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
1795 else {
ecd286a9 1796 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
6277e832
SN
1797 return -EINVAL;
1798 }
1799
1800 return 0;
1801}
1802
1803static void skl_tplg_fill_pin_dynamic_val(
1804 struct skl_module_pin *mpin, u32 pin_count, u32 value)
4cd9899f
HS
1805{
1806 int i;
1807
6277e832
SN
1808 for (i = 0; i < pin_count; i++)
1809 mpin[i].is_dynamic = value;
1810}
1811
1812/*
1813 * Parse tokens to fill up the module private data
1814 */
1815static int skl_tplg_get_token(struct device *dev,
1816 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1817 struct skl *skl, struct skl_module_cfg *mconfig)
1818{
1819 int tkn_count = 0;
1820 int ret;
1821 static int is_pipe_exists;
1822 static int pin_index, dir;
1823
1824 if (tkn_elem->token > SKL_TKN_MAX)
1825 return -EINVAL;
1826
1827 switch (tkn_elem->token) {
1828 case SKL_TKN_U8_IN_QUEUE_COUNT:
1829 mconfig->max_in_queue = tkn_elem->value;
1830 mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
1831 sizeof(*mconfig->m_in_pin),
1832 GFP_KERNEL);
1833 if (!mconfig->m_in_pin)
1834 return -ENOMEM;
1835
1836 break;
1837
1838 case SKL_TKN_U8_OUT_QUEUE_COUNT:
1839 mconfig->max_out_queue = tkn_elem->value;
1840 mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
1841 sizeof(*mconfig->m_out_pin),
1842 GFP_KERNEL);
1843
1844 if (!mconfig->m_out_pin)
1845 return -ENOMEM;
1846
1847 break;
1848
1849 case SKL_TKN_U8_DYN_IN_PIN:
1850 if (!mconfig->m_in_pin)
1851 return -ENOMEM;
1852
1853 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
1854 mconfig->max_in_queue, tkn_elem->value);
1855
1856 break;
1857
1858 case SKL_TKN_U8_DYN_OUT_PIN:
1859 if (!mconfig->m_out_pin)
1860 return -ENOMEM;
1861
1862 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
1863 mconfig->max_out_queue, tkn_elem->value);
1864
1865 break;
1866
1867 case SKL_TKN_U8_TIME_SLOT:
1868 mconfig->time_slot = tkn_elem->value;
1869 break;
1870
1871 case SKL_TKN_U8_CORE_ID:
1872 mconfig->core_id = tkn_elem->value;
1873
1874 case SKL_TKN_U8_MOD_TYPE:
1875 mconfig->m_type = tkn_elem->value;
1876 break;
1877
1878 case SKL_TKN_U8_DEV_TYPE:
1879 mconfig->dev_type = tkn_elem->value;
1880 break;
1881
1882 case SKL_TKN_U8_HW_CONN_TYPE:
1883 mconfig->hw_conn_type = tkn_elem->value;
1884 break;
1885
1886 case SKL_TKN_U16_MOD_INST_ID:
1887 mconfig->id.instance_id =
1888 tkn_elem->value;
1889 break;
1890
1891 case SKL_TKN_U32_MEM_PAGES:
1892 mconfig->mem_pages = tkn_elem->value;
1893 break;
1894
1895 case SKL_TKN_U32_MAX_MCPS:
1896 mconfig->mcps = tkn_elem->value;
1897 break;
1898
1899 case SKL_TKN_U32_OBS:
1900 mconfig->obs = tkn_elem->value;
1901 break;
1902
1903 case SKL_TKN_U32_IBS:
1904 mconfig->ibs = tkn_elem->value;
1905 break;
1906
1907 case SKL_TKN_U32_VBUS_ID:
1908 mconfig->vbus_id = tkn_elem->value;
1909 break;
1910
1911 case SKL_TKN_U32_PARAMS_FIXUP:
1912 mconfig->params_fixup = tkn_elem->value;
1913 break;
1914
1915 case SKL_TKN_U32_CONVERTER:
1916 mconfig->converter = tkn_elem->value;
1917 break;
1918
6bd9dcf3
VK
1919 case SKL_TKL_U32_D0I3_CAPS:
1920 mconfig->d0i3_caps = tkn_elem->value;
1921 break;
1922
6277e832
SN
1923 case SKL_TKN_U32_PIPE_ID:
1924 ret = skl_tplg_add_pipe(dev,
1925 mconfig, skl, tkn_elem);
1926
1927 if (ret < 0)
1928 return is_pipe_exists;
1929
1930 if (ret == EEXIST)
1931 is_pipe_exists = 1;
1932
1933 break;
1934
1935 case SKL_TKN_U32_PIPE_CONN_TYPE:
1936 case SKL_TKN_U32_PIPE_PRIORITY:
1937 case SKL_TKN_U32_PIPE_MEM_PGS:
8a0cb236 1938 case SKL_TKN_U32_PMODE:
6277e832
SN
1939 if (is_pipe_exists) {
1940 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
1941 tkn_elem->token, tkn_elem->value);
1942 if (ret < 0)
1943 return ret;
1944 }
1945
1946 break;
1947
1948 /*
1949 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
1950 * direction and the pin count. The first four bits represent
1951 * direction and next four the pin count.
1952 */
1953 case SKL_TKN_U32_DIR_PIN_COUNT:
1954 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
1955 pin_index = (tkn_elem->value &
1956 SKL_PIN_COUNT_MASK) >> 4;
1957
1958 break;
1959
1960 case SKL_TKN_U32_FMT_CH:
1961 case SKL_TKN_U32_FMT_FREQ:
1962 case SKL_TKN_U32_FMT_BIT_DEPTH:
1963 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1964 case SKL_TKN_U32_FMT_CH_CONFIG:
1965 case SKL_TKN_U32_FMT_INTERLEAVE:
1966 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1967 case SKL_TKN_U32_FMT_CH_MAP:
1968 ret = skl_tplg_fill_fmt(dev, mconfig, tkn_elem->token,
1969 tkn_elem->value, dir, pin_index);
1970
1971 if (ret < 0)
1972 return ret;
1973
1974 break;
1975
1976 case SKL_TKN_U32_PIN_MOD_ID:
1977 case SKL_TKN_U32_PIN_INST_ID:
1978 ret = skl_tplg_fill_pins_info(dev,
1979 mconfig, tkn_elem, dir,
1980 pin_index);
1981 if (ret < 0)
1982 return ret;
1983
1984 break;
1985
1986 case SKL_TKN_U32_CAPS_SIZE:
1987 mconfig->formats_config.caps_size =
1988 tkn_elem->value;
1989
1990 break;
1991
1992 case SKL_TKN_U32_PROC_DOMAIN:
1993 mconfig->domain =
1994 tkn_elem->value;
1995
1996 break;
1997
1998 case SKL_TKN_U8_IN_PIN_TYPE:
1999 case SKL_TKN_U8_OUT_PIN_TYPE:
2000 case SKL_TKN_U8_CONN_TYPE:
2001 break;
2002
2003 default:
2004 dev_err(dev, "Token %d not handled\n",
2005 tkn_elem->token);
2006 return -EINVAL;
4cd9899f 2007 }
6277e832
SN
2008
2009 tkn_count++;
2010
2011 return tkn_count;
2012}
2013
2014/*
2015 * Parse the vendor array for specific tokens to construct
2016 * module private data
2017 */
2018static int skl_tplg_get_tokens(struct device *dev,
2019 char *pvt_data, struct skl *skl,
2020 struct skl_module_cfg *mconfig, int block_size)
2021{
2022 struct snd_soc_tplg_vendor_array *array;
2023 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2024 int tkn_count = 0, ret;
2025 int off = 0, tuple_size = 0;
2026
2027 if (block_size <= 0)
2028 return -EINVAL;
2029
2030 while (tuple_size < block_size) {
2031 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2032
2033 off += array->size;
2034
2035 switch (array->type) {
2036 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
ecd286a9 2037 dev_warn(dev, "no string tokens expected for skl tplg\n");
6277e832
SN
2038 continue;
2039
2040 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2041 ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
2042 if (ret < 0)
2043 return ret;
2044
2045 tuple_size += sizeof(*array->uuid);
2046
2047 continue;
2048
2049 default:
2050 tkn_elem = array->value;
2051 tkn_count = 0;
2052 break;
2053 }
2054
2055 while (tkn_count <= (array->num_elems - 1)) {
2056 ret = skl_tplg_get_token(dev, tkn_elem,
2057 skl, mconfig);
2058
2059 if (ret < 0)
2060 return ret;
2061
2062 tkn_count = tkn_count + ret;
2063 tkn_elem++;
2064 }
2065
2066 tuple_size += tkn_count * sizeof(*tkn_elem);
2067 }
2068
2069 return 0;
2070}
2071
2072/*
2073 * Every data block is preceded by a descriptor to read the number
2074 * of data blocks, they type of the block and it's size
2075 */
2076static int skl_tplg_get_desc_blocks(struct device *dev,
2077 struct snd_soc_tplg_vendor_array *array)
2078{
2079 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2080
2081 tkn_elem = array->value;
2082
2083 switch (tkn_elem->token) {
2084 case SKL_TKN_U8_NUM_BLOCKS:
2085 case SKL_TKN_U8_BLOCK_TYPE:
2086 case SKL_TKN_U16_BLOCK_SIZE:
2087 return tkn_elem->value;
2088
2089 default:
ecd286a9 2090 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
6277e832
SN
2091 break;
2092 }
2093
2094 return -EINVAL;
2095}
2096
2097/*
2098 * Parse the private data for the token and corresponding value.
2099 * The private data can have multiple data blocks. So, a data block
2100 * is preceded by a descriptor for number of blocks and a descriptor
2101 * for the type and size of the suceeding data block.
2102 */
2103static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2104 struct skl *skl, struct device *dev,
2105 struct skl_module_cfg *mconfig)
2106{
2107 struct snd_soc_tplg_vendor_array *array;
2108 int num_blocks, block_size = 0, block_type, off = 0;
2109 char *data;
2110 int ret;
2111
2112 /* Read the NUM_DATA_BLOCKS descriptor */
2113 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2114 ret = skl_tplg_get_desc_blocks(dev, array);
2115 if (ret < 0)
2116 return ret;
2117 num_blocks = ret;
2118
2119 off += array->size;
2120 array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
2121
2122 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2123 while (num_blocks > 0) {
2124 ret = skl_tplg_get_desc_blocks(dev, array);
2125
2126 if (ret < 0)
2127 return ret;
2128 block_type = ret;
2129 off += array->size;
2130
2131 array = (struct snd_soc_tplg_vendor_array *)
2132 (tplg_w->priv.data + off);
2133
2134 ret = skl_tplg_get_desc_blocks(dev, array);
2135
2136 if (ret < 0)
2137 return ret;
2138 block_size = ret;
2139 off += array->size;
2140
2141 array = (struct snd_soc_tplg_vendor_array *)
2142 (tplg_w->priv.data + off);
2143
2144 data = (tplg_w->priv.data + off);
2145
2146 if (block_type == SKL_TYPE_TUPLE) {
2147 ret = skl_tplg_get_tokens(dev, data,
2148 skl, mconfig, block_size);
2149
2150 if (ret < 0)
2151 return ret;
2152
2153 --num_blocks;
2154 } else {
2155 if (mconfig->formats_config.caps_size > 0)
2156 memcpy(mconfig->formats_config.caps, data,
2157 mconfig->formats_config.caps_size);
2158 --num_blocks;
2159 }
2160 }
2161
2162 return 0;
4cd9899f
HS
2163}
2164
fe3f4442
D
2165static void skl_clear_pin_config(struct snd_soc_platform *platform,
2166 struct snd_soc_dapm_widget *w)
2167{
2168 int i;
2169 struct skl_module_cfg *mconfig;
2170 struct skl_pipe *pipe;
2171
2172 if (!strncmp(w->dapm->component->name, platform->component.name,
2173 strlen(platform->component.name))) {
2174 mconfig = w->priv;
2175 pipe = mconfig->pipe;
2176 for (i = 0; i < mconfig->max_in_queue; i++) {
2177 mconfig->m_in_pin[i].in_use = false;
2178 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2179 }
2180 for (i = 0; i < mconfig->max_out_queue; i++) {
2181 mconfig->m_out_pin[i].in_use = false;
2182 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2183 }
2184 pipe->state = SKL_PIPE_INVALID;
2185 mconfig->m_state = SKL_MODULE_UNINIT;
2186 }
2187}
2188
2189void skl_cleanup_resources(struct skl *skl)
2190{
2191 struct skl_sst *ctx = skl->skl_sst;
2192 struct snd_soc_platform *soc_platform = skl->platform;
2193 struct snd_soc_dapm_widget *w;
2194 struct snd_soc_card *card;
2195
2196 if (soc_platform == NULL)
2197 return;
2198
2199 card = soc_platform->component.card;
2200 if (!card || !card->instantiated)
2201 return;
2202
2203 skl->resource.mem = 0;
2204 skl->resource.mcps = 0;
2205
2206 list_for_each_entry(w, &card->widgets, list) {
2207 if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
2208 skl_clear_pin_config(soc_platform, w);
2209 }
2210
2211 skl_clear_module_cnt(ctx->dsp);
2212}
2213
3af36706
VK
2214/*
2215 * Topology core widget load callback
2216 *
2217 * This is used to save the private data for each widget which gives
2218 * information to the driver about module and pipeline parameters which DSP
2219 * FW expects like ids, resource values, formats etc
2220 */
2221static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
b663a8c5
JK
2222 struct snd_soc_dapm_widget *w,
2223 struct snd_soc_tplg_dapm_widget *tplg_w)
3af36706
VK
2224{
2225 int ret;
2226 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2227 struct skl *skl = ebus_to_skl(ebus);
2228 struct hdac_bus *bus = ebus_to_hbus(ebus);
2229 struct skl_module_cfg *mconfig;
3af36706
VK
2230
2231 if (!tplg_w->priv.size)
2232 goto bind_event;
2233
2234 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
2235
2236 if (!mconfig)
2237 return -ENOMEM;
2238
2239 w->priv = mconfig;
09305da9 2240
b7c50555
VK
2241 /*
2242 * module binary can be loaded later, so set it to query when
2243 * module is load for a use case
2244 */
2245 mconfig->id.module_id = -1;
3af36706 2246
6277e832
SN
2247 /* Parse private data for tuples */
2248 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
2249 if (ret < 0)
2250 return ret;
3af36706
VK
2251bind_event:
2252 if (tplg_w->event_type == 0) {
3373f716 2253 dev_dbg(bus->dev, "ASoC: No event handler required\n");
3af36706
VK
2254 return 0;
2255 }
2256
2257 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
b663a8c5
JK
2258 ARRAY_SIZE(skl_tplg_widget_ops),
2259 tplg_w->event_type);
3af36706
VK
2260
2261 if (ret) {
2262 dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
2263 __func__, tplg_w->event_type);
2264 return -EINVAL;
2265 }
2266
2267 return 0;
2268}
2269
140adfba
JK
2270static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
2271 struct snd_soc_tplg_bytes_control *bc)
2272{
2273 struct skl_algo_data *ac;
2274 struct skl_dfw_algo_data *dfw_ac =
2275 (struct skl_dfw_algo_data *)bc->priv.data;
2276
2277 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
2278 if (!ac)
2279 return -ENOMEM;
2280
2281 /* Fill private data */
2282 ac->max = dfw_ac->max;
2283 ac->param_id = dfw_ac->param_id;
2284 ac->set_params = dfw_ac->set_params;
0d682104 2285 ac->size = dfw_ac->max;
140adfba
JK
2286
2287 if (ac->max) {
2288 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
2289 if (!ac->params)
2290 return -ENOMEM;
2291
edd7ea2d 2292 memcpy(ac->params, dfw_ac->params, ac->max);
140adfba
JK
2293 }
2294
2295 be->dobj.private = ac;
2296 return 0;
2297}
2298
2299static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
2300 struct snd_kcontrol_new *kctl,
2301 struct snd_soc_tplg_ctl_hdr *hdr)
2302{
2303 struct soc_bytes_ext *sb;
2304 struct snd_soc_tplg_bytes_control *tplg_bc;
2305 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2306 struct hdac_bus *bus = ebus_to_hbus(ebus);
2307
2308 switch (hdr->ops.info) {
2309 case SND_SOC_TPLG_CTL_BYTES:
2310 tplg_bc = container_of(hdr,
2311 struct snd_soc_tplg_bytes_control, hdr);
2312 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
2313 sb = (struct soc_bytes_ext *)kctl->private_value;
2314 if (tplg_bc->priv.size)
2315 return skl_init_algo_data(
2316 bus->dev, sb, tplg_bc);
2317 }
2318 break;
2319
2320 default:
2321 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
2322 hdr->ops.get, hdr->ops.put, hdr->ops.info);
2323 break;
2324 }
2325
2326 return 0;
2327}
2328
541070ce
SN
2329static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
2330 struct snd_soc_tplg_vendor_string_elem *str_elem,
eee0e16f 2331 struct skl *skl)
541070ce
SN
2332{
2333 int tkn_count = 0;
2334 static int ref_count;
2335
2336 switch (str_elem->token) {
2337 case SKL_TKN_STR_LIB_NAME:
eee0e16f 2338 if (ref_count > skl->skl_sst->lib_count - 1) {
541070ce
SN
2339 ref_count = 0;
2340 return -EINVAL;
2341 }
2342
eee0e16f
JK
2343 strncpy(skl->skl_sst->lib_info[ref_count].name,
2344 str_elem->string,
2345 ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name));
541070ce
SN
2346 ref_count++;
2347 tkn_count++;
2348 break;
2349
2350 default:
ecd286a9 2351 dev_err(dev, "Not a string token %d\n", str_elem->token);
541070ce
SN
2352 break;
2353 }
2354
2355 return tkn_count;
2356}
2357
2358static int skl_tplg_get_str_tkn(struct device *dev,
2359 struct snd_soc_tplg_vendor_array *array,
eee0e16f 2360 struct skl *skl)
541070ce
SN
2361{
2362 int tkn_count = 0, ret;
2363 struct snd_soc_tplg_vendor_string_elem *str_elem;
2364
2365 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
2366 while (tkn_count < array->num_elems) {
eee0e16f 2367 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
541070ce
SN
2368 str_elem++;
2369
2370 if (ret < 0)
2371 return ret;
2372
2373 tkn_count = tkn_count + ret;
2374 }
2375
2376 return tkn_count;
2377}
2378
2379static int skl_tplg_get_int_tkn(struct device *dev,
2380 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
eee0e16f 2381 struct skl *skl)
541070ce
SN
2382{
2383 int tkn_count = 0;
2384
2385 switch (tkn_elem->token) {
2386 case SKL_TKN_U32_LIB_COUNT:
eee0e16f 2387 skl->skl_sst->lib_count = tkn_elem->value;
541070ce
SN
2388 tkn_count++;
2389 break;
2390
2391 default:
ecd286a9 2392 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
541070ce
SN
2393 return -EINVAL;
2394 }
2395
2396 return tkn_count;
2397}
2398
2399/*
2400 * Fill the manifest structure by parsing the tokens based on the
2401 * type.
2402 */
2403static int skl_tplg_get_manifest_tkn(struct device *dev,
eee0e16f 2404 char *pvt_data, struct skl *skl,
541070ce
SN
2405 int block_size)
2406{
2407 int tkn_count = 0, ret;
2408 int off = 0, tuple_size = 0;
2409 struct snd_soc_tplg_vendor_array *array;
2410 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2411
2412 if (block_size <= 0)
2413 return -EINVAL;
2414
2415 while (tuple_size < block_size) {
2416 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2417 off += array->size;
2418 switch (array->type) {
2419 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
eee0e16f 2420 ret = skl_tplg_get_str_tkn(dev, array, skl);
541070ce
SN
2421
2422 if (ret < 0)
2423 return ret;
2424 tkn_count += ret;
2425
2426 tuple_size += tkn_count *
2427 sizeof(struct snd_soc_tplg_vendor_string_elem);
2428 continue;
2429
2430 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
ecd286a9 2431 dev_warn(dev, "no uuid tokens for skl tplf manifest\n");
541070ce
SN
2432 continue;
2433
2434 default:
2435 tkn_elem = array->value;
2436 tkn_count = 0;
2437 break;
2438 }
2439
2440 while (tkn_count <= array->num_elems - 1) {
2441 ret = skl_tplg_get_int_tkn(dev,
eee0e16f 2442 tkn_elem, skl);
541070ce
SN
2443 if (ret < 0)
2444 return ret;
2445
2446 tkn_count = tkn_count + ret;
2447 tkn_elem++;
2448 tuple_size += tkn_count *
2449 sizeof(struct snd_soc_tplg_vendor_value_elem);
2450 break;
2451 }
2452 tkn_count = 0;
2453 }
2454
2455 return 0;
2456}
2457
2458/*
2459 * Parse manifest private data for tokens. The private data block is
2460 * preceded by descriptors for type and size of data block.
2461 */
2462static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
eee0e16f 2463 struct device *dev, struct skl *skl)
541070ce
SN
2464{
2465 struct snd_soc_tplg_vendor_array *array;
2466 int num_blocks, block_size = 0, block_type, off = 0;
2467 char *data;
2468 int ret;
2469
2470 /* Read the NUM_DATA_BLOCKS descriptor */
2471 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
2472 ret = skl_tplg_get_desc_blocks(dev, array);
2473 if (ret < 0)
2474 return ret;
2475 num_blocks = ret;
2476
2477 off += array->size;
2478 array = (struct snd_soc_tplg_vendor_array *)
2479 (manifest->priv.data + off);
2480
2481 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2482 while (num_blocks > 0) {
2483 ret = skl_tplg_get_desc_blocks(dev, array);
2484
2485 if (ret < 0)
2486 return ret;
2487 block_type = ret;
2488 off += array->size;
2489
2490 array = (struct snd_soc_tplg_vendor_array *)
2491 (manifest->priv.data + off);
2492
2493 ret = skl_tplg_get_desc_blocks(dev, array);
2494
2495 if (ret < 0)
2496 return ret;
2497 block_size = ret;
2498 off += array->size;
2499
2500 array = (struct snd_soc_tplg_vendor_array *)
2501 (manifest->priv.data + off);
2502
2503 data = (manifest->priv.data + off);
2504
2505 if (block_type == SKL_TYPE_TUPLE) {
eee0e16f 2506 ret = skl_tplg_get_manifest_tkn(dev, data, skl,
541070ce
SN
2507 block_size);
2508
2509 if (ret < 0)
2510 return ret;
2511
2512 --num_blocks;
2513 } else {
2514 return -EINVAL;
2515 }
2516 }
2517
2518 return 0;
2519}
2520
15ecaba9
K
2521static int skl_manifest_load(struct snd_soc_component *cmpnt,
2522 struct snd_soc_tplg_manifest *manifest)
2523{
15ecaba9
K
2524 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2525 struct hdac_bus *bus = ebus_to_hbus(ebus);
2526 struct skl *skl = ebus_to_skl(ebus);
15ecaba9 2527
c15ad605
VK
2528 /* proceed only if we have private data defined */
2529 if (manifest->priv.size == 0)
2530 return 0;
2531
eee0e16f 2532 skl_tplg_get_manifest_data(manifest, bus->dev, skl);
15ecaba9 2533
eee0e16f 2534 if (skl->skl_sst->lib_count > SKL_MAX_LIB) {
15ecaba9 2535 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
eee0e16f
JK
2536 skl->skl_sst->lib_count);
2537 return -EINVAL;
15ecaba9
K
2538 }
2539
eee0e16f 2540 return 0;
15ecaba9
K
2541}
2542
3af36706
VK
2543static struct snd_soc_tplg_ops skl_tplg_ops = {
2544 .widget_load = skl_tplg_widget_load,
140adfba
JK
2545 .control_load = skl_tplg_control_load,
2546 .bytes_ext_ops = skl_tlv_ops,
2547 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
15ecaba9 2548 .manifest = skl_manifest_load,
3af36706
VK
2549};
2550
287af4f9
JK
2551/*
2552 * A pipe can have multiple modules, each of them will be a DAPM widget as
2553 * well. While managing a pipeline we need to get the list of all the
2554 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
2555 * helps to get the SKL type widgets in that pipeline
2556 */
2557static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
2558{
2559 struct snd_soc_dapm_widget *w;
2560 struct skl_module_cfg *mcfg = NULL;
2561 struct skl_pipe_module *p_module = NULL;
2562 struct skl_pipe *pipe;
2563
2564 list_for_each_entry(w, &platform->component.card->widgets, list) {
2565 if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
2566 mcfg = w->priv;
2567 pipe = mcfg->pipe;
2568
2569 p_module = devm_kzalloc(platform->dev,
2570 sizeof(*p_module), GFP_KERNEL);
2571 if (!p_module)
2572 return -ENOMEM;
2573
2574 p_module->w = w;
2575 list_add_tail(&p_module->node, &pipe->w_list);
2576 }
2577 }
2578
2579 return 0;
2580}
2581
f0aa94fa
JK
2582static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
2583{
2584 struct skl_pipe_module *w_module;
2585 struct snd_soc_dapm_widget *w;
2586 struct skl_module_cfg *mconfig;
2587 bool host_found = false, link_found = false;
2588
2589 list_for_each_entry(w_module, &pipe->w_list, node) {
2590 w = w_module->w;
2591 mconfig = w->priv;
2592
2593 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
2594 host_found = true;
2595 else if (mconfig->dev_type != SKL_DEVICE_NONE)
2596 link_found = true;
2597 }
2598
2599 if (host_found && link_found)
2600 pipe->passthru = true;
2601 else
2602 pipe->passthru = false;
2603}
2604
3af36706
VK
2605/* This will be read from topology manifest, currently defined here */
2606#define SKL_MAX_MCPS 30000000
2607#define SKL_FW_MAX_MEM 1000000
2608
2609/*
2610 * SKL topology init routine
2611 */
2612int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
2613{
2614 int ret;
2615 const struct firmware *fw;
2616 struct hdac_bus *bus = ebus_to_hbus(ebus);
2617 struct skl *skl = ebus_to_skl(ebus);
f0aa94fa 2618 struct skl_pipeline *ppl;
3af36706 2619
4b235c43 2620 ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3af36706 2621 if (ret < 0) {
b663a8c5 2622 dev_err(bus->dev, "tplg fw %s load failed with %d\n",
4b235c43
VK
2623 skl->tplg_name, ret);
2624 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
2625 if (ret < 0) {
2626 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
2627 "dfw_sst.bin", ret);
2628 return ret;
2629 }
3af36706
VK
2630 }
2631
2632 /*
2633 * The complete tplg for SKL is loaded as index 0, we don't use
2634 * any other index
2635 */
b663a8c5
JK
2636 ret = snd_soc_tplg_component_load(&platform->component,
2637 &skl_tplg_ops, fw, 0);
3af36706
VK
2638 if (ret < 0) {
2639 dev_err(bus->dev, "tplg component load failed%d\n", ret);
c14a82c7 2640 release_firmware(fw);
3af36706
VK
2641 return -EINVAL;
2642 }
2643
2644 skl->resource.max_mcps = SKL_MAX_MCPS;
2645 skl->resource.max_mem = SKL_FW_MAX_MEM;
2646
d8018361 2647 skl->tplg = fw;
287af4f9
JK
2648 ret = skl_tplg_create_pipe_widget_list(platform);
2649 if (ret < 0)
2650 return ret;
d8018361 2651
f0aa94fa
JK
2652 list_for_each_entry(ppl, &skl->ppl_list, node)
2653 skl_tplg_set_pipe_type(skl, ppl->pipe);
d8018361 2654
3af36706
VK
2655 return 0;
2656}