]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - sound/soc/intel/skylake/skl-topology.c
ASoC: Intel: Skylake: Add driver structures to be filled from topology manifest
[mirror_ubuntu-eoan-kernel.git] / sound / soc / intel / skylake / skl-topology.c
CommitLineData
e4e2d2f4
JK
1/*
2 * skl-topology.c - Implements Platform component ALSA controls/widget
3 * handlers.
4 *
5 * Copyright (C) 2014-2015 Intel Corp
6 * Author: Jeeja KP <jeeja.kp@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/firmware.h>
22#include <sound/soc.h>
23#include <sound/soc-topology.h>
6277e832 24#include <uapi/sound/snd_sst_tokens.h>
e4e2d2f4
JK
25#include "skl-sst-dsp.h"
26#include "skl-sst-ipc.h"
27#include "skl-topology.h"
28#include "skl.h"
29#include "skl-tplg-interface.h"
6c5768b3
D
30#include "../common/sst-dsp.h"
31#include "../common/sst-dsp-priv.h"
e4e2d2f4 32
f7590d4f
JK
33#define SKL_CH_FIXUP_MASK (1 << 0)
34#define SKL_RATE_FIXUP_MASK (1 << 1)
35#define SKL_FMT_FIXUP_MASK (1 << 2)
6277e832
SN
36#define SKL_IN_DIR_BIT_MASK BIT(0)
37#define SKL_PIN_COUNT_MASK GENMASK(7, 4)
f7590d4f 38
7a1b749b
D
39static const int mic_mono_list[] = {
400, 1, 2, 3,
41};
42static const int mic_stereo_list[][SKL_CH_STEREO] = {
43{0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3},
44};
45static const int mic_trio_list[][SKL_CH_TRIO] = {
46{0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3},
47};
48static const int mic_quatro_list[][SKL_CH_QUATRO] = {
49{0, 1, 2, 3},
50};
51
a83e3b4c
VK
52void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
53{
54 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
55
56 switch (caps) {
57 case SKL_D0I3_NONE:
58 d0i3->non_d0i3++;
59 break;
60
61 case SKL_D0I3_STREAMING:
62 d0i3->streaming++;
63 break;
64
65 case SKL_D0I3_NON_STREAMING:
66 d0i3->non_streaming++;
67 break;
68 }
69}
70
71void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
72{
73 struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
74
75 switch (caps) {
76 case SKL_D0I3_NONE:
77 d0i3->non_d0i3--;
78 break;
79
80 case SKL_D0I3_STREAMING:
81 d0i3->streaming--;
82 break;
83
84 case SKL_D0I3_NON_STREAMING:
85 d0i3->non_streaming--;
86 break;
87 }
88}
89
e4e2d2f4
JK
90/*
91 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
92 * ignore. This helpers checks if the SKL driver handles this widget type
93 */
94static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
95{
96 switch (w->id) {
97 case snd_soc_dapm_dai_link:
98 case snd_soc_dapm_dai_in:
99 case snd_soc_dapm_aif_in:
100 case snd_soc_dapm_aif_out:
101 case snd_soc_dapm_dai_out:
102 case snd_soc_dapm_switch:
103 return false;
104 default:
105 return true;
106 }
107}
108
109/*
110 * Each pipelines needs memory to be allocated. Check if we have free memory
9ba8ffef 111 * from available pool.
e4e2d2f4 112 */
9ba8ffef 113static bool skl_is_pipe_mem_avail(struct skl *skl,
e4e2d2f4
JK
114 struct skl_module_cfg *mconfig)
115{
116 struct skl_sst *ctx = skl->skl_sst;
117
118 if (skl->resource.mem + mconfig->pipe->memory_pages >
119 skl->resource.max_mem) {
120 dev_err(ctx->dev,
121 "%s: module_id %d instance %d\n", __func__,
122 mconfig->id.module_id,
123 mconfig->id.instance_id);
124 dev_err(ctx->dev,
125 "exceeds ppl memory available %d mem %d\n",
126 skl->resource.max_mem, skl->resource.mem);
127 return false;
9ba8ffef
D
128 } else {
129 return true;
e4e2d2f4 130 }
9ba8ffef 131}
e4e2d2f4 132
9ba8ffef
D
133/*
134 * Add the mem to the mem pool. This is freed when pipe is deleted.
135 * Note: DSP does actual memory management we only keep track for complete
136 * pool
137 */
138static void skl_tplg_alloc_pipe_mem(struct skl *skl,
139 struct skl_module_cfg *mconfig)
140{
e4e2d2f4 141 skl->resource.mem += mconfig->pipe->memory_pages;
e4e2d2f4
JK
142}
143
144/*
145 * Pipeline needs needs DSP CPU resources for computation, this is
146 * quantified in MCPS (Million Clocks Per Second) required for module/pipe
147 *
148 * Each pipelines needs mcps to be allocated. Check if we have mcps for this
9ba8ffef 149 * pipe.
e4e2d2f4 150 */
9ba8ffef
D
151
152static bool skl_is_pipe_mcps_avail(struct skl *skl,
e4e2d2f4
JK
153 struct skl_module_cfg *mconfig)
154{
155 struct skl_sst *ctx = skl->skl_sst;
156
157 if (skl->resource.mcps + mconfig->mcps > skl->resource.max_mcps) {
158 dev_err(ctx->dev,
159 "%s: module_id %d instance %d\n", __func__,
160 mconfig->id.module_id, mconfig->id.instance_id);
161 dev_err(ctx->dev,
7ca42f5a 162 "exceeds ppl mcps available %d > mem %d\n",
e4e2d2f4
JK
163 skl->resource.max_mcps, skl->resource.mcps);
164 return false;
9ba8ffef
D
165 } else {
166 return true;
e4e2d2f4 167 }
9ba8ffef 168}
e4e2d2f4 169
9ba8ffef
D
170static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
171 struct skl_module_cfg *mconfig)
172{
e4e2d2f4 173 skl->resource.mcps += mconfig->mcps;
e4e2d2f4
JK
174}
175
176/*
177 * Free the mcps when tearing down
178 */
179static void
180skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
181{
182 skl->resource.mcps -= mconfig->mcps;
183}
184
185/*
186 * Free the memory when tearing down
187 */
188static void
189skl_tplg_free_pipe_mem(struct skl *skl, struct skl_module_cfg *mconfig)
190{
191 skl->resource.mem -= mconfig->pipe->memory_pages;
192}
193
f7590d4f
JK
194
195static void skl_dump_mconfig(struct skl_sst *ctx,
196 struct skl_module_cfg *mcfg)
197{
198 dev_dbg(ctx->dev, "Dumping config\n");
199 dev_dbg(ctx->dev, "Input Format:\n");
4cd9899f
HS
200 dev_dbg(ctx->dev, "channels = %d\n", mcfg->in_fmt[0].channels);
201 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->in_fmt[0].s_freq);
202 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->in_fmt[0].ch_cfg);
203 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->in_fmt[0].valid_bit_depth);
f7590d4f 204 dev_dbg(ctx->dev, "Output Format:\n");
4cd9899f
HS
205 dev_dbg(ctx->dev, "channels = %d\n", mcfg->out_fmt[0].channels);
206 dev_dbg(ctx->dev, "s_freq = %d\n", mcfg->out_fmt[0].s_freq);
207 dev_dbg(ctx->dev, "valid bit depth = %d\n", mcfg->out_fmt[0].valid_bit_depth);
208 dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
f7590d4f
JK
209}
210
ea5a137d
SP
211static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
212{
213 int slot_map = 0xFFFFFFFF;
214 int start_slot = 0;
215 int i;
216
217 for (i = 0; i < chs; i++) {
218 /*
219 * For 2 channels with starting slot as 0, slot map will
220 * look like 0xFFFFFF10.
221 */
222 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
223 start_slot++;
224 }
225 fmt->ch_map = slot_map;
226}
227
f7590d4f
JK
228static void skl_tplg_update_params(struct skl_module_fmt *fmt,
229 struct skl_pipe_params *params, int fixup)
230{
231 if (fixup & SKL_RATE_FIXUP_MASK)
232 fmt->s_freq = params->s_freq;
ea5a137d 233 if (fixup & SKL_CH_FIXUP_MASK) {
f7590d4f 234 fmt->channels = params->ch;
ea5a137d
SP
235 skl_tplg_update_chmap(fmt, fmt->channels);
236 }
98256f83
JK
237 if (fixup & SKL_FMT_FIXUP_MASK) {
238 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
239
240 /*
241 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
242 * container so update bit depth accordingly
243 */
244 switch (fmt->valid_bit_depth) {
245 case SKL_DEPTH_16BIT:
246 fmt->bit_depth = fmt->valid_bit_depth;
247 break;
248
249 default:
250 fmt->bit_depth = SKL_DEPTH_32BIT;
251 break;
252 }
253 }
254
f7590d4f
JK
255}
256
257/*
258 * A pipeline may have modules which impact the pcm parameters, like SRC,
259 * channel converter, format converter.
260 * We need to calculate the output params by applying the 'fixup'
261 * Topology will tell driver which type of fixup is to be applied by
262 * supplying the fixup mask, so based on that we calculate the output
263 *
264 * Now In FE the pcm hw_params is source/target format. Same is applicable
265 * for BE with its hw_params invoked.
266 * here based on FE, BE pipeline and direction we calculate the input and
267 * outfix and then apply that for a module
268 */
269static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
270 struct skl_pipe_params *params, bool is_fe)
271{
272 int in_fixup, out_fixup;
273 struct skl_module_fmt *in_fmt, *out_fmt;
274
4cd9899f
HS
275 /* Fixups will be applied to pin 0 only */
276 in_fmt = &m_cfg->in_fmt[0];
277 out_fmt = &m_cfg->out_fmt[0];
f7590d4f
JK
278
279 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
280 if (is_fe) {
281 in_fixup = m_cfg->params_fixup;
282 out_fixup = (~m_cfg->converter) &
283 m_cfg->params_fixup;
284 } else {
285 out_fixup = m_cfg->params_fixup;
286 in_fixup = (~m_cfg->converter) &
287 m_cfg->params_fixup;
288 }
289 } else {
290 if (is_fe) {
291 out_fixup = m_cfg->params_fixup;
292 in_fixup = (~m_cfg->converter) &
293 m_cfg->params_fixup;
294 } else {
295 in_fixup = m_cfg->params_fixup;
296 out_fixup = (~m_cfg->converter) &
297 m_cfg->params_fixup;
298 }
299 }
300
301 skl_tplg_update_params(in_fmt, params, in_fixup);
302 skl_tplg_update_params(out_fmt, params, out_fixup);
303}
304
305/*
306 * A module needs input and output buffers, which are dependent upon pcm
307 * params, so once we have calculate params, we need buffer calculation as
308 * well.
309 */
310static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
311 struct skl_module_cfg *mcfg)
312{
313 int multiplier = 1;
4cd9899f 314 struct skl_module_fmt *in_fmt, *out_fmt;
4cd9899f
HS
315
316 /* Since fixups is applied to pin 0 only, ibs, obs needs
317 * change for pin 0 only
318 */
319 in_fmt = &mcfg->in_fmt[0];
320 out_fmt = &mcfg->out_fmt[0];
f7590d4f
JK
321
322 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
323 multiplier = 5;
f0c8e1d9 324
8e15e762 325 mcfg->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
998d6fb5 326 in_fmt->channels * (in_fmt->bit_depth >> 3) *
f0c8e1d9
SP
327 multiplier;
328
998d6fb5
TS
329 mcfg->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
330 out_fmt->channels * (out_fmt->bit_depth >> 3) *
f0c8e1d9 331 multiplier;
f7590d4f
JK
332}
333
db2f586b
SV
334static u8 skl_tplg_be_dev_type(int dev_type)
335{
336 int ret;
337
338 switch (dev_type) {
339 case SKL_DEVICE_BT:
340 ret = NHLT_DEVICE_BT;
341 break;
342
343 case SKL_DEVICE_DMIC:
344 ret = NHLT_DEVICE_DMIC;
345 break;
346
347 case SKL_DEVICE_I2S:
348 ret = NHLT_DEVICE_I2S;
349 break;
350
351 default:
352 ret = NHLT_DEVICE_INVALID;
353 break;
354 }
355
356 return ret;
357}
358
2d1419a3
JK
359static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
360 struct skl_sst *ctx)
361{
362 struct skl_module_cfg *m_cfg = w->priv;
363 int link_type, dir;
364 u32 ch, s_freq, s_fmt;
365 struct nhlt_specific_cfg *cfg;
366 struct skl *skl = get_skl_ctx(ctx->dev);
db2f586b 367 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
2d1419a3
JK
368
369 /* check if we already have blob */
370 if (m_cfg->formats_config.caps_size > 0)
371 return 0;
372
c7c6c736 373 dev_dbg(ctx->dev, "Applying default cfg blob\n");
2d1419a3
JK
374 switch (m_cfg->dev_type) {
375 case SKL_DEVICE_DMIC:
376 link_type = NHLT_LINK_DMIC;
c7c6c736 377 dir = SNDRV_PCM_STREAM_CAPTURE;
2d1419a3
JK
378 s_freq = m_cfg->in_fmt[0].s_freq;
379 s_fmt = m_cfg->in_fmt[0].bit_depth;
380 ch = m_cfg->in_fmt[0].channels;
381 break;
382
383 case SKL_DEVICE_I2S:
384 link_type = NHLT_LINK_SSP;
385 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
c7c6c736 386 dir = SNDRV_PCM_STREAM_PLAYBACK;
2d1419a3
JK
387 s_freq = m_cfg->out_fmt[0].s_freq;
388 s_fmt = m_cfg->out_fmt[0].bit_depth;
389 ch = m_cfg->out_fmt[0].channels;
c7c6c736
JK
390 } else {
391 dir = SNDRV_PCM_STREAM_CAPTURE;
392 s_freq = m_cfg->in_fmt[0].s_freq;
393 s_fmt = m_cfg->in_fmt[0].bit_depth;
394 ch = m_cfg->in_fmt[0].channels;
2d1419a3
JK
395 }
396 break;
397
398 default:
399 return -EINVAL;
400 }
401
402 /* update the blob based on virtual bus_id and default params */
403 cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
db2f586b 404 s_fmt, ch, s_freq, dir, dev_type);
2d1419a3
JK
405 if (cfg) {
406 m_cfg->formats_config.caps_size = cfg->size;
407 m_cfg->formats_config.caps = (u32 *) &cfg->caps;
408 } else {
409 dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
410 m_cfg->vbus_id, link_type, dir);
411 dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
412 ch, s_freq, s_fmt);
413 return -EIO;
414 }
415
416 return 0;
417}
418
f7590d4f
JK
419static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
420 struct skl_sst *ctx)
421{
422 struct skl_module_cfg *m_cfg = w->priv;
423 struct skl_pipe_params *params = m_cfg->pipe->p_params;
424 int p_conn_type = m_cfg->pipe->conn_type;
425 bool is_fe;
426
427 if (!m_cfg->params_fixup)
428 return;
429
430 dev_dbg(ctx->dev, "Mconfig for widget=%s BEFORE updation\n",
431 w->name);
432
433 skl_dump_mconfig(ctx, m_cfg);
434
435 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
436 is_fe = true;
437 else
438 is_fe = false;
439
440 skl_tplg_update_params_fixup(m_cfg, params, is_fe);
441 skl_tplg_update_buffer_size(ctx, m_cfg);
442
443 dev_dbg(ctx->dev, "Mconfig for widget=%s AFTER updation\n",
444 w->name);
445
446 skl_dump_mconfig(ctx, m_cfg);
447}
448
abb74003
JK
449/*
450 * some modules can have multiple params set from user control and
451 * need to be set after module is initialized. If set_param flag is
452 * set module params will be done after module is initialised.
453 */
454static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
455 struct skl_sst *ctx)
456{
457 int i, ret;
458 struct skl_module_cfg *mconfig = w->priv;
459 const struct snd_kcontrol_new *k;
460 struct soc_bytes_ext *sb;
461 struct skl_algo_data *bc;
462 struct skl_specific_cfg *sp_cfg;
463
464 if (mconfig->formats_config.caps_size > 0 &&
4ced1827 465 mconfig->formats_config.set_params == SKL_PARAM_SET) {
abb74003
JK
466 sp_cfg = &mconfig->formats_config;
467 ret = skl_set_module_params(ctx, sp_cfg->caps,
468 sp_cfg->caps_size,
469 sp_cfg->param_id, mconfig);
470 if (ret < 0)
471 return ret;
472 }
473
474 for (i = 0; i < w->num_kcontrols; i++) {
475 k = &w->kcontrol_news[i];
476 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
477 sb = (void *) k->private_value;
478 bc = (struct skl_algo_data *)sb->dobj.private;
479
4ced1827 480 if (bc->set_params == SKL_PARAM_SET) {
abb74003 481 ret = skl_set_module_params(ctx,
0d682104 482 (u32 *)bc->params, bc->size,
abb74003
JK
483 bc->param_id, mconfig);
484 if (ret < 0)
485 return ret;
486 }
487 }
488 }
489
490 return 0;
491}
492
493/*
494 * some module param can set from user control and this is required as
495 * when module is initailzed. if module param is required in init it is
496 * identifed by set_param flag. if set_param flag is not set, then this
497 * parameter needs to set as part of module init.
498 */
499static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
500{
501 const struct snd_kcontrol_new *k;
502 struct soc_bytes_ext *sb;
503 struct skl_algo_data *bc;
504 struct skl_module_cfg *mconfig = w->priv;
505 int i;
506
507 for (i = 0; i < w->num_kcontrols; i++) {
508 k = &w->kcontrol_news[i];
509 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
510 sb = (struct soc_bytes_ext *)k->private_value;
511 bc = (struct skl_algo_data *)sb->dobj.private;
512
4ced1827 513 if (bc->set_params != SKL_PARAM_INIT)
abb74003
JK
514 continue;
515
d1a6fe41 516 mconfig->formats_config.caps = (u32 *)bc->params;
0d682104 517 mconfig->formats_config.caps_size = bc->size;
abb74003
JK
518
519 break;
520 }
521 }
522
523 return 0;
524}
525
bb704a73
JK
526static int skl_tplg_module_prepare(struct skl_sst *ctx, struct skl_pipe *pipe,
527 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
528{
529 switch (mcfg->dev_type) {
530 case SKL_DEVICE_HDAHOST:
531 return skl_pcm_host_dma_prepare(ctx->dev, pipe->p_params);
532
533 case SKL_DEVICE_HDALINK:
534 return skl_pcm_link_dma_prepare(ctx->dev, pipe->p_params);
535 }
536
537 return 0;
538}
539
e4e2d2f4
JK
540/*
541 * Inside a pipe instance, we can have various modules. These modules need
542 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
543 * skl_init_module() routine, so invoke that for all modules in a pipeline
544 */
545static int
546skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
547{
548 struct skl_pipe_module *w_module;
549 struct snd_soc_dapm_widget *w;
550 struct skl_module_cfg *mconfig;
551 struct skl_sst *ctx = skl->skl_sst;
552 int ret = 0;
553
554 list_for_each_entry(w_module, &pipe->w_list, node) {
b26199ea 555 uuid_le *uuid_mod;
e4e2d2f4
JK
556 w = w_module->w;
557 mconfig = w->priv;
558
b7c50555
VK
559 /* check if module ids are populated */
560 if (mconfig->id.module_id < 0) {
a657ae7e
VK
561 dev_err(skl->skl_sst->dev,
562 "module %pUL id not populated\n",
563 (uuid_le *)mconfig->guid);
564 return -EIO;
b7c50555
VK
565 }
566
e4e2d2f4 567 /* check resource available */
9ba8ffef 568 if (!skl_is_pipe_mcps_avail(skl, mconfig))
e4e2d2f4
JK
569 return -ENOMEM;
570
6c5768b3
D
571 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
572 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
573 mconfig->id.module_id, mconfig->guid);
574 if (ret < 0)
575 return ret;
d643678b
JK
576
577 mconfig->m_state = SKL_MODULE_LOADED;
6c5768b3
D
578 }
579
bb704a73
JK
580 /* prepare the DMA if the module is gateway cpr */
581 ret = skl_tplg_module_prepare(ctx, pipe, w, mconfig);
582 if (ret < 0)
583 return ret;
584
2d1419a3
JK
585 /* update blob if blob is null for be with default value */
586 skl_tplg_update_be_blob(w, ctx);
587
f7590d4f
JK
588 /*
589 * apply fix/conversion to module params based on
590 * FE/BE params
591 */
592 skl_tplg_update_module_params(w, ctx);
b26199ea
JK
593 uuid_mod = (uuid_le *)mconfig->guid;
594 mconfig->id.pvt_id = skl_get_pvt_id(ctx, uuid_mod,
595 mconfig->id.instance_id);
ef2a352c
D
596 if (mconfig->id.pvt_id < 0)
597 return ret;
abb74003 598 skl_tplg_set_module_init_data(w);
4147a6e5
PS
599
600 ret = skl_dsp_get_core(ctx->dsp, mconfig->core_id);
601 if (ret < 0) {
602 dev_err(ctx->dev, "Failed to wake up core %d ret=%d\n",
603 mconfig->core_id, ret);
604 return ret;
605 }
606
9939a9c3 607 ret = skl_init_module(ctx, mconfig);
ef2a352c 608 if (ret < 0) {
b26199ea 609 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
4147a6e5 610 goto err;
ef2a352c 611 }
260eb73a 612 skl_tplg_alloc_pipe_mcps(skl, mconfig);
abb74003 613 ret = skl_tplg_set_module_params(w, ctx);
e4e2d2f4 614 if (ret < 0)
4147a6e5 615 goto err;
e4e2d2f4
JK
616 }
617
618 return 0;
4147a6e5
PS
619err:
620 skl_dsp_put_core(ctx->dsp, mconfig->core_id);
621 return ret;
e4e2d2f4 622}
d93f8e55 623
6c5768b3
D
624static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
625 struct skl_pipe *pipe)
626{
4147a6e5 627 int ret = 0;
6c5768b3
D
628 struct skl_pipe_module *w_module = NULL;
629 struct skl_module_cfg *mconfig = NULL;
630
631 list_for_each_entry(w_module, &pipe->w_list, node) {
b26199ea 632 uuid_le *uuid_mod;
6c5768b3 633 mconfig = w_module->w->priv;
b26199ea 634 uuid_mod = (uuid_le *)mconfig->guid;
6c5768b3 635
d643678b 636 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
b0fab9c6
D
637 mconfig->m_state > SKL_MODULE_UNINIT) {
638 ret = ctx->dsp->fw_ops.unload_mod(ctx->dsp,
6c5768b3 639 mconfig->id.module_id);
b0fab9c6
D
640 if (ret < 0)
641 return -EIO;
642 }
b26199ea 643 skl_put_pvt_id(ctx, uuid_mod, &mconfig->id.pvt_id);
4147a6e5
PS
644
645 ret = skl_dsp_put_core(ctx->dsp, mconfig->core_id);
646 if (ret < 0) {
647 /* don't return; continue with other modules */
648 dev_err(ctx->dev, "Failed to sleep core %d ret=%d\n",
649 mconfig->core_id, ret);
650 }
6c5768b3
D
651 }
652
653 /* no modules to unload in this path, so return */
4147a6e5 654 return ret;
6c5768b3
D
655}
656
d93f8e55
VK
657/*
658 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
659 * need create the pipeline. So we do following:
660 * - check the resources
661 * - Create the pipeline
662 * - Initialize the modules in pipeline
663 * - finally bind all modules together
664 */
665static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
666 struct skl *skl)
667{
668 int ret;
669 struct skl_module_cfg *mconfig = w->priv;
670 struct skl_pipe_module *w_module;
671 struct skl_pipe *s_pipe = mconfig->pipe;
b8c722dd 672 struct skl_module_cfg *src_module = NULL, *dst_module, *module;
d93f8e55 673 struct skl_sst *ctx = skl->skl_sst;
b8c722dd 674 struct skl_module_deferred_bind *modules;
d93f8e55
VK
675
676 /* check resource available */
9ba8ffef 677 if (!skl_is_pipe_mcps_avail(skl, mconfig))
d93f8e55
VK
678 return -EBUSY;
679
9ba8ffef 680 if (!skl_is_pipe_mem_avail(skl, mconfig))
d93f8e55
VK
681 return -ENOMEM;
682
683 /*
684 * Create a list of modules for pipe.
685 * This list contains modules from source to sink
686 */
687 ret = skl_create_pipeline(ctx, mconfig->pipe);
688 if (ret < 0)
689 return ret;
690
260eb73a
D
691 skl_tplg_alloc_pipe_mem(skl, mconfig);
692 skl_tplg_alloc_pipe_mcps(skl, mconfig);
d93f8e55
VK
693
694 /* Init all pipe modules from source to sink */
695 ret = skl_tplg_init_pipe_modules(skl, s_pipe);
696 if (ret < 0)
697 return ret;
698
699 /* Bind modules from source to sink */
700 list_for_each_entry(w_module, &s_pipe->w_list, node) {
701 dst_module = w_module->w->priv;
702
703 if (src_module == NULL) {
704 src_module = dst_module;
705 continue;
706 }
707
708 ret = skl_bind_modules(ctx, src_module, dst_module);
709 if (ret < 0)
710 return ret;
711
712 src_module = dst_module;
713 }
714
b8c722dd
JK
715 /*
716 * When the destination module is initialized, check for these modules
717 * in deferred bind list. If found, bind them.
718 */
719 list_for_each_entry(w_module, &s_pipe->w_list, node) {
720 if (list_empty(&skl->bind_list))
721 break;
722
723 list_for_each_entry(modules, &skl->bind_list, node) {
724 module = w_module->w->priv;
725 if (modules->dst == module)
726 skl_bind_modules(ctx, modules->src,
727 modules->dst);
728 }
729 }
730
d93f8e55
VK
731 return 0;
732}
733
bf3e5ef5
D
734static int skl_fill_sink_instance_id(struct skl_sst *ctx, u32 *params,
735 int size, struct skl_module_cfg *mcfg)
5e8f0ee4 736{
5e8f0ee4
D
737 int i, pvt_id;
738
bf3e5ef5
D
739 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
740 struct skl_kpb_params *kpb_params =
741 (struct skl_kpb_params *)params;
742 struct skl_mod_inst_map *inst = kpb_params->map;
5e8f0ee4 743
bf3e5ef5
D
744 for (i = 0; i < kpb_params->num_modules; i++) {
745 pvt_id = skl_get_pvt_instance_id_map(ctx, inst->mod_id,
746 inst->inst_id);
747 if (pvt_id < 0)
748 return -EINVAL;
749
750 inst->inst_id = pvt_id;
751 inst++;
752 }
5e8f0ee4 753 }
bf3e5ef5 754
5e8f0ee4
D
755 return 0;
756}
cc6a4044
JK
757/*
758 * Some modules require params to be set after the module is bound to
759 * all pins connected.
760 *
761 * The module provider initializes set_param flag for such modules and we
762 * send params after binding
763 */
764static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
765 struct skl_module_cfg *mcfg, struct skl_sst *ctx)
766{
767 int i, ret;
768 struct skl_module_cfg *mconfig = w->priv;
769 const struct snd_kcontrol_new *k;
770 struct soc_bytes_ext *sb;
771 struct skl_algo_data *bc;
772 struct skl_specific_cfg *sp_cfg;
bf3e5ef5 773 u32 *params;
cc6a4044
JK
774
775 /*
776 * check all out/in pins are in bind state.
777 * if so set the module param
778 */
779 for (i = 0; i < mcfg->max_out_queue; i++) {
780 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
781 return 0;
782 }
783
784 for (i = 0; i < mcfg->max_in_queue; i++) {
785 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
786 return 0;
787 }
788
789 if (mconfig->formats_config.caps_size > 0 &&
790 mconfig->formats_config.set_params == SKL_PARAM_BIND) {
791 sp_cfg = &mconfig->formats_config;
792 ret = skl_set_module_params(ctx, sp_cfg->caps,
793 sp_cfg->caps_size,
794 sp_cfg->param_id, mconfig);
795 if (ret < 0)
796 return ret;
797 }
798
799 for (i = 0; i < w->num_kcontrols; i++) {
800 k = &w->kcontrol_news[i];
801 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
802 sb = (void *) k->private_value;
803 bc = (struct skl_algo_data *)sb->dobj.private;
804
805 if (bc->set_params == SKL_PARAM_BIND) {
bf3e5ef5
D
806 params = kzalloc(bc->max, GFP_KERNEL);
807 if (!params)
808 return -ENOMEM;
809
810 memcpy(params, bc->params, bc->max);
811 skl_fill_sink_instance_id(ctx, params, bc->max,
812 mconfig);
813
814 ret = skl_set_module_params(ctx, params,
815 bc->max, bc->param_id, mconfig);
816 kfree(params);
817
cc6a4044
JK
818 if (ret < 0)
819 return ret;
820 }
821 }
822 }
823
824 return 0;
825}
826
b8c722dd
JK
827
828static int skl_tplg_module_add_deferred_bind(struct skl *skl,
829 struct skl_module_cfg *src, struct skl_module_cfg *dst)
830{
831 struct skl_module_deferred_bind *m_list, *modules;
832 int i;
833
834 /* only supported for module with static pin connection */
835 for (i = 0; i < dst->max_in_queue; i++) {
836 struct skl_module_pin *pin = &dst->m_in_pin[i];
837
838 if (pin->is_dynamic)
839 continue;
840
841 if ((pin->id.module_id == src->id.module_id) &&
842 (pin->id.instance_id == src->id.instance_id)) {
843
844 if (!list_empty(&skl->bind_list)) {
845 list_for_each_entry(modules, &skl->bind_list, node) {
846 if (modules->src == src && modules->dst == dst)
847 return 0;
848 }
849 }
850
851 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL);
852 if (!m_list)
853 return -ENOMEM;
854
855 m_list->src = src;
856 m_list->dst = dst;
857
858 list_add(&m_list->node, &skl->bind_list);
859 }
860 }
861
862 return 0;
863}
864
8724ff17
JK
865static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
866 struct skl *skl,
6bd4cf85 867 struct snd_soc_dapm_widget *src_w,
8724ff17 868 struct skl_module_cfg *src_mconfig)
d93f8e55
VK
869{
870 struct snd_soc_dapm_path *p;
0ed95d76 871 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
8724ff17 872 struct skl_module_cfg *sink_mconfig;
d93f8e55 873 struct skl_sst *ctx = skl->skl_sst;
8724ff17 874 int ret;
d93f8e55 875
8724ff17 876 snd_soc_dapm_widget_for_each_sink_path(w, p) {
d93f8e55
VK
877 if (!p->connect)
878 continue;
879
880 dev_dbg(ctx->dev, "%s: src widget=%s\n", __func__, w->name);
881 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
882
0ed95d76 883 next_sink = p->sink;
6bd4cf85
JK
884
885 if (!is_skl_dsp_widget_type(p->sink))
886 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
887
d93f8e55
VK
888 /*
889 * here we will check widgets in sink pipelines, so that
890 * can be any widgets type and we are only interested if
891 * they are ones used for SKL so check that first
892 */
893 if ((p->sink->priv != NULL) &&
894 is_skl_dsp_widget_type(p->sink)) {
895
896 sink = p->sink;
d93f8e55
VK
897 sink_mconfig = sink->priv;
898
b8c722dd
JK
899 /*
900 * Modules other than PGA leaf can be connected
901 * directly or via switch to a module in another
902 * pipeline. EX: reference path
903 * when the path is enabled, the dst module that needs
904 * to be bound may not be initialized. if the module is
905 * not initialized, add these modules in the deferred
906 * bind list and when the dst module is initialised,
907 * bind this module to the dst_module in deferred list.
908 */
909 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE)
910 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) {
911
912 ret = skl_tplg_module_add_deferred_bind(skl,
913 src_mconfig, sink_mconfig);
914
915 if (ret < 0)
916 return ret;
917
918 }
919
920
cc6a4044
JK
921 if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
922 sink_mconfig->m_state == SKL_MODULE_UNINIT)
923 continue;
924
d93f8e55
VK
925 /* Bind source to sink, mixin is always source */
926 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
927 if (ret)
928 return ret;
929
cc6a4044
JK
930 /* set module params after bind */
931 skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
932 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
933
d93f8e55
VK
934 /* Start sinks pipe first */
935 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
d1730c3d
JK
936 if (sink_mconfig->pipe->conn_type !=
937 SKL_PIPE_CONN_TYPE_FE)
938 ret = skl_run_pipe(ctx,
939 sink_mconfig->pipe);
d93f8e55
VK
940 if (ret)
941 return ret;
942 }
d93f8e55
VK
943 }
944 }
945
10a5439f 946 if (!sink && next_sink)
6bd4cf85 947 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
8724ff17
JK
948
949 return 0;
950}
951
952/*
953 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
954 * we need to do following:
955 * - Bind to sink pipeline
956 * Since the sink pipes can be running and we don't get mixer event on
957 * connect for already running mixer, we need to find the sink pipes
958 * here and bind to them. This way dynamic connect works.
959 * - Start sink pipeline, if not running
960 * - Then run current pipe
961 */
962static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
963 struct skl *skl)
964{
965 struct skl_module_cfg *src_mconfig;
966 struct skl_sst *ctx = skl->skl_sst;
967 int ret = 0;
968
969 src_mconfig = w->priv;
970
971 /*
972 * find which sink it is connected to, bind with the sink,
973 * if sink is not started, start sink pipe first, then start
974 * this pipe
975 */
6bd4cf85 976 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
d93f8e55
VK
977 if (ret)
978 return ret;
979
d93f8e55 980 /* Start source pipe last after starting all sinks */
d1730c3d
JK
981 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
982 return skl_run_pipe(ctx, src_mconfig->pipe);
d93f8e55
VK
983
984 return 0;
985}
986
8724ff17
JK
987static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
988 struct snd_soc_dapm_widget *w, struct skl *skl)
989{
990 struct snd_soc_dapm_path *p;
991 struct snd_soc_dapm_widget *src_w = NULL;
992 struct skl_sst *ctx = skl->skl_sst;
993
994 snd_soc_dapm_widget_for_each_source_path(w, p) {
995 src_w = p->source;
996 if (!p->connect)
997 continue;
998
999 dev_dbg(ctx->dev, "sink widget=%s\n", w->name);
1000 dev_dbg(ctx->dev, "src widget=%s\n", p->source->name);
1001
1002 /*
1003 * here we will check widgets in sink pipelines, so that can
1004 * be any widgets type and we are only interested if they are
1005 * ones used for SKL so check that first
1006 */
1007 if ((p->source->priv != NULL) &&
1008 is_skl_dsp_widget_type(p->source)) {
1009 return p->source;
1010 }
1011 }
1012
1013 if (src_w != NULL)
1014 return skl_get_src_dsp_widget(src_w, skl);
1015
1016 return NULL;
1017}
1018
d93f8e55
VK
1019/*
1020 * in the Post-PMU event of mixer we need to do following:
1021 * - Check if this pipe is running
1022 * - if not, then
1023 * - bind this pipeline to its source pipeline
1024 * if source pipe is already running, this means it is a dynamic
1025 * connection and we need to bind only to that pipe
1026 * - start this pipeline
1027 */
1028static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
1029 struct skl *skl)
1030{
1031 int ret = 0;
d93f8e55
VK
1032 struct snd_soc_dapm_widget *source, *sink;
1033 struct skl_module_cfg *src_mconfig, *sink_mconfig;
1034 struct skl_sst *ctx = skl->skl_sst;
1035 int src_pipe_started = 0;
1036
1037 sink = w;
1038 sink_mconfig = sink->priv;
1039
1040 /*
1041 * If source pipe is already started, that means source is driving
1042 * one more sink before this sink got connected, Since source is
1043 * started, bind this sink to source and start this pipe.
1044 */
8724ff17
JK
1045 source = skl_get_src_dsp_widget(w, skl);
1046 if (source != NULL) {
1047 src_mconfig = source->priv;
1048 sink_mconfig = sink->priv;
1049 src_pipe_started = 1;
d93f8e55
VK
1050
1051 /*
8724ff17
JK
1052 * check pipe state, then no need to bind or start the
1053 * pipe
d93f8e55 1054 */
8724ff17
JK
1055 if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
1056 src_pipe_started = 0;
d93f8e55
VK
1057 }
1058
1059 if (src_pipe_started) {
1060 ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
1061 if (ret)
1062 return ret;
1063
cc6a4044
JK
1064 /* set module params after bind */
1065 skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
1066 skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
1067
d1730c3d
JK
1068 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1069 ret = skl_run_pipe(ctx, sink_mconfig->pipe);
d93f8e55
VK
1070 }
1071
1072 return ret;
1073}
1074
1075/*
1076 * in the Pre-PMD event of mixer we need to do following:
1077 * - Stop the pipe
1078 * - find the source connections and remove that from dapm_path_list
1079 * - unbind with source pipelines if still connected
1080 */
1081static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
1082 struct skl *skl)
1083{
d93f8e55 1084 struct skl_module_cfg *src_mconfig, *sink_mconfig;
ce1b5551 1085 int ret = 0, i;
d93f8e55
VK
1086 struct skl_sst *ctx = skl->skl_sst;
1087
ce1b5551 1088 sink_mconfig = w->priv;
d93f8e55
VK
1089
1090 /* Stop the pipe */
1091 ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
1092 if (ret)
1093 return ret;
1094
ce1b5551
JK
1095 for (i = 0; i < sink_mconfig->max_in_queue; i++) {
1096 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1097 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
1098 if (!src_mconfig)
1099 continue;
d93f8e55 1100
ce1b5551
JK
1101 ret = skl_unbind_modules(ctx,
1102 src_mconfig, sink_mconfig);
d93f8e55 1103 }
d93f8e55
VK
1104 }
1105
1106 return ret;
1107}
1108
1109/*
1110 * in the Post-PMD event of mixer we need to do following:
1111 * - Free the mcps used
1112 * - Free the mem used
1113 * - Unbind the modules within the pipeline
1114 * - Delete the pipeline (modules are not required to be explicitly
1115 * deleted, pipeline delete is enough here
1116 */
1117static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1118 struct skl *skl)
1119{
1120 struct skl_module_cfg *mconfig = w->priv;
1121 struct skl_pipe_module *w_module;
1122 struct skl_module_cfg *src_module = NULL, *dst_module;
1123 struct skl_sst *ctx = skl->skl_sst;
1124 struct skl_pipe *s_pipe = mconfig->pipe;
550b349a 1125 struct skl_module_deferred_bind *modules, *tmp;
d93f8e55 1126
260eb73a
D
1127 if (s_pipe->state == SKL_PIPE_INVALID)
1128 return -EINVAL;
1129
d93f8e55 1130 skl_tplg_free_pipe_mcps(skl, mconfig);
65976878 1131 skl_tplg_free_pipe_mem(skl, mconfig);
d93f8e55 1132
b8c722dd
JK
1133 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1134 if (list_empty(&skl->bind_list))
1135 break;
1136
1137 src_module = w_module->w->priv;
1138
550b349a 1139 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
b8c722dd
JK
1140 /*
1141 * When the destination module is deleted, Unbind the
1142 * modules from deferred bind list.
1143 */
1144 if (modules->dst == src_module) {
1145 skl_unbind_modules(ctx, modules->src,
1146 modules->dst);
1147 }
1148
1149 /*
1150 * When the source module is deleted, remove this entry
1151 * from the deferred bind list.
1152 */
1153 if (modules->src == src_module) {
1154 list_del(&modules->node);
1155 modules->src = NULL;
1156 modules->dst = NULL;
1157 kfree(modules);
1158 }
1159 }
1160 }
1161
d93f8e55
VK
1162 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1163 dst_module = w_module->w->priv;
1164
260eb73a
D
1165 if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
1166 skl_tplg_free_pipe_mcps(skl, dst_module);
d93f8e55
VK
1167 if (src_module == NULL) {
1168 src_module = dst_module;
1169 continue;
1170 }
1171
7ca42f5a 1172 skl_unbind_modules(ctx, src_module, dst_module);
d93f8e55
VK
1173 src_module = dst_module;
1174 }
1175
547cafa3 1176 skl_delete_pipe(ctx, mconfig->pipe);
d93f8e55 1177
473a4d51
JK
1178 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1179 src_module = w_module->w->priv;
1180 src_module->m_state = SKL_MODULE_UNINIT;
1181 }
1182
6c5768b3 1183 return skl_tplg_unload_pipe_modules(ctx, s_pipe);
d93f8e55
VK
1184}
1185
1186/*
1187 * in the Post-PMD event of PGA we need to do following:
1188 * - Free the mcps used
1189 * - Stop the pipeline
1190 * - In source pipe is connected, unbind with source pipelines
1191 */
1192static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1193 struct skl *skl)
1194{
d93f8e55 1195 struct skl_module_cfg *src_mconfig, *sink_mconfig;
ce1b5551 1196 int ret = 0, i;
d93f8e55
VK
1197 struct skl_sst *ctx = skl->skl_sst;
1198
ce1b5551 1199 src_mconfig = w->priv;
d93f8e55 1200
d93f8e55
VK
1201 /* Stop the pipe since this is a mixin module */
1202 ret = skl_stop_pipe(ctx, src_mconfig->pipe);
1203 if (ret)
1204 return ret;
1205
ce1b5551
JK
1206 for (i = 0; i < src_mconfig->max_out_queue; i++) {
1207 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1208 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1209 if (!sink_mconfig)
1210 continue;
1211 /*
1212 * This is a connecter and if path is found that means
1213 * unbind between source and sink has not happened yet
1214 */
ce1b5551
JK
1215 ret = skl_unbind_modules(ctx, src_mconfig,
1216 sink_mconfig);
d93f8e55
VK
1217 }
1218 }
1219
d93f8e55
VK
1220 return ret;
1221}
1222
d93f8e55
VK
1223/*
1224 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1225 * second one is required that is created as another pipe entity.
1226 * The mixer is responsible for pipe management and represent a pipeline
1227 * instance
1228 */
1229static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1230 struct snd_kcontrol *k, int event)
1231{
1232 struct snd_soc_dapm_context *dapm = w->dapm;
1233 struct skl *skl = get_skl_ctx(dapm->dev);
1234
1235 switch (event) {
1236 case SND_SOC_DAPM_PRE_PMU:
1237 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1238
1239 case SND_SOC_DAPM_POST_PMU:
1240 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1241
1242 case SND_SOC_DAPM_PRE_PMD:
1243 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1244
1245 case SND_SOC_DAPM_POST_PMD:
1246 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1247 }
1248
1249 return 0;
1250}
1251
1252/*
1253 * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1254 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1255 * the sink when it is running (two FE to one BE or one FE to two BE)
1256 * scenarios
1257 */
1258static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1259 struct snd_kcontrol *k, int event)
1260
1261{
1262 struct snd_soc_dapm_context *dapm = w->dapm;
1263 struct skl *skl = get_skl_ctx(dapm->dev);
1264
1265 switch (event) {
1266 case SND_SOC_DAPM_PRE_PMU:
1267 return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1268
1269 case SND_SOC_DAPM_POST_PMD:
1270 return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1271 }
1272
1273 return 0;
1274}
cfb0a873 1275
140adfba
JK
1276static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1277 unsigned int __user *data, unsigned int size)
1278{
1279 struct soc_bytes_ext *sb =
1280 (struct soc_bytes_ext *)kcontrol->private_value;
1281 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
7d9f2911
OA
1282 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1283 struct skl_module_cfg *mconfig = w->priv;
1284 struct skl *skl = get_skl_ctx(w->dapm->dev);
1285
1286 if (w->power)
1287 skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
0d682104 1288 bc->size, bc->param_id, mconfig);
140adfba 1289
41556f68
VK
1290 /* decrement size for TLV header */
1291 size -= 2 * sizeof(u32);
1292
1293 /* check size as we don't want to send kernel data */
1294 if (size > bc->max)
1295 size = bc->max;
1296
140adfba
JK
1297 if (bc->params) {
1298 if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1299 return -EFAULT;
e8bc3c99 1300 if (copy_to_user(data + 1, &size, sizeof(u32)))
140adfba 1301 return -EFAULT;
e8bc3c99 1302 if (copy_to_user(data + 2, bc->params, size))
140adfba
JK
1303 return -EFAULT;
1304 }
1305
1306 return 0;
1307}
1308
1309#define SKL_PARAM_VENDOR_ID 0xff
1310
1311static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1312 const unsigned int __user *data, unsigned int size)
1313{
1314 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1315 struct skl_module_cfg *mconfig = w->priv;
1316 struct soc_bytes_ext *sb =
1317 (struct soc_bytes_ext *)kcontrol->private_value;
1318 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1319 struct skl *skl = get_skl_ctx(w->dapm->dev);
1320
1321 if (ac->params) {
0d682104
D
1322 if (size > ac->max)
1323 return -EINVAL;
1324
1325 ac->size = size;
140adfba
JK
1326 /*
1327 * if the param_is is of type Vendor, firmware expects actual
1328 * parameter id and size from the control.
1329 */
1330 if (ac->param_id == SKL_PARAM_VENDOR_ID) {
1331 if (copy_from_user(ac->params, data, size))
1332 return -EFAULT;
1333 } else {
1334 if (copy_from_user(ac->params,
65b4bcb8 1335 data + 2, size))
140adfba
JK
1336 return -EFAULT;
1337 }
1338
1339 if (w->power)
1340 return skl_set_module_params(skl->skl_sst,
0d682104 1341 (u32 *)ac->params, ac->size,
140adfba
JK
1342 ac->param_id, mconfig);
1343 }
1344
1345 return 0;
1346}
1347
7a1b749b
D
1348static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol,
1349 struct snd_ctl_elem_value *ucontrol)
1350{
1351 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1352 struct skl_module_cfg *mconfig = w->priv;
1353 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1354 u32 ch_type = *((u32 *)ec->dobj.private);
1355
1356 if (mconfig->dmic_ch_type == ch_type)
1357 ucontrol->value.enumerated.item[0] =
1358 mconfig->dmic_ch_combo_index;
1359 else
1360 ucontrol->value.enumerated.item[0] = 0;
1361
1362 return 0;
1363}
1364
1365static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig,
1366 struct skl_mic_sel_config *mic_cfg, struct device *dev)
1367{
1368 struct skl_specific_cfg *sp_cfg = &mconfig->formats_config;
1369
1370 sp_cfg->caps_size = sizeof(struct skl_mic_sel_config);
1371 sp_cfg->set_params = SKL_PARAM_SET;
1372 sp_cfg->param_id = 0x00;
1373 if (!sp_cfg->caps) {
1374 sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL);
1375 if (!sp_cfg->caps)
1376 return -ENOMEM;
1377 }
1378
1379 mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH;
1380 mic_cfg->flags = 0;
1381 memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size);
1382
1383 return 0;
1384}
1385
1386static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol,
1387 struct snd_ctl_elem_value *ucontrol)
1388{
1389 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1390 struct skl_module_cfg *mconfig = w->priv;
1391 struct skl_mic_sel_config mic_cfg = {0};
1392 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1393 u32 ch_type = *((u32 *)ec->dobj.private);
1394 const int *list;
1395 u8 in_ch, out_ch, index;
1396
1397 mconfig->dmic_ch_type = ch_type;
1398 mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0];
1399
1400 /* enum control index 0 is INVALID, so no channels to be set */
1401 if (mconfig->dmic_ch_combo_index == 0)
1402 return 0;
1403
1404 /* No valid channel selection map for index 0, so offset by 1 */
1405 index = mconfig->dmic_ch_combo_index - 1;
1406
1407 switch (ch_type) {
1408 case SKL_CH_MONO:
1409 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list))
1410 return -EINVAL;
1411
1412 list = &mic_mono_list[index];
1413 break;
1414
1415 case SKL_CH_STEREO:
1416 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list))
1417 return -EINVAL;
1418
1419 list = mic_stereo_list[index];
1420 break;
1421
1422 case SKL_CH_TRIO:
1423 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list))
1424 return -EINVAL;
1425
1426 list = mic_trio_list[index];
1427 break;
1428
1429 case SKL_CH_QUATRO:
1430 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list))
1431 return -EINVAL;
1432
1433 list = mic_quatro_list[index];
1434 break;
1435
1436 default:
1437 dev_err(w->dapm->dev,
1438 "Invalid channel %d for mic_select module\n",
1439 ch_type);
1440 return -EINVAL;
1441
1442 }
1443
1444 /* channel type enum map to number of chanels for that type */
1445 for (out_ch = 0; out_ch < ch_type; out_ch++) {
1446 in_ch = list[out_ch];
1447 mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN;
1448 }
1449
1450 return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev);
1451}
1452
8871dcb9
JK
1453/*
1454 * Fill the dma id for host and link. In case of passthrough
1455 * pipeline, this will both host and link in the same
1456 * pipeline, so need to copy the link and host based on dev_type
1457 */
1458static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1459 struct skl_pipe_params *params)
1460{
1461 struct skl_pipe *pipe = mcfg->pipe;
1462
1463 if (pipe->passthru) {
1464 switch (mcfg->dev_type) {
1465 case SKL_DEVICE_HDALINK:
1466 pipe->p_params->link_dma_id = params->link_dma_id;
12c3be0e 1467 pipe->p_params->link_index = params->link_index;
7f975a38 1468 pipe->p_params->link_bps = params->link_bps;
8871dcb9
JK
1469 break;
1470
1471 case SKL_DEVICE_HDAHOST:
1472 pipe->p_params->host_dma_id = params->host_dma_id;
7f975a38 1473 pipe->p_params->host_bps = params->host_bps;
8871dcb9
JK
1474 break;
1475
1476 default:
1477 break;
1478 }
1479 pipe->p_params->s_fmt = params->s_fmt;
1480 pipe->p_params->ch = params->ch;
1481 pipe->p_params->s_freq = params->s_freq;
1482 pipe->p_params->stream = params->stream;
12c3be0e 1483 pipe->p_params->format = params->format;
8871dcb9
JK
1484
1485 } else {
1486 memcpy(pipe->p_params, params, sizeof(*params));
1487 }
1488}
1489
cfb0a873
VK
1490/*
1491 * The FE params are passed by hw_params of the DAI.
1492 * On hw_params, the params are stored in Gateway module of the FE and we
1493 * need to calculate the format in DSP module configuration, that
1494 * conversion is done here
1495 */
1496int skl_tplg_update_pipe_params(struct device *dev,
1497 struct skl_module_cfg *mconfig,
1498 struct skl_pipe_params *params)
1499{
cfb0a873
VK
1500 struct skl_module_fmt *format = NULL;
1501
8871dcb9 1502 skl_tplg_fill_dma_id(mconfig, params);
cfb0a873
VK
1503
1504 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
4cd9899f 1505 format = &mconfig->in_fmt[0];
cfb0a873 1506 else
4cd9899f 1507 format = &mconfig->out_fmt[0];
cfb0a873
VK
1508
1509 /* set the hw_params */
1510 format->s_freq = params->s_freq;
1511 format->channels = params->ch;
1512 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1513
1514 /*
1515 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1516 * container so update bit depth accordingly
1517 */
1518 switch (format->valid_bit_depth) {
1519 case SKL_DEPTH_16BIT:
1520 format->bit_depth = format->valid_bit_depth;
1521 break;
1522
1523 case SKL_DEPTH_24BIT:
6654f39e 1524 case SKL_DEPTH_32BIT:
cfb0a873
VK
1525 format->bit_depth = SKL_DEPTH_32BIT;
1526 break;
1527
1528 default:
1529 dev_err(dev, "Invalid bit depth %x for pipe\n",
1530 format->valid_bit_depth);
1531 return -EINVAL;
1532 }
1533
1534 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1535 mconfig->ibs = (format->s_freq / 1000) *
1536 (format->channels) *
1537 (format->bit_depth >> 3);
1538 } else {
1539 mconfig->obs = (format->s_freq / 1000) *
1540 (format->channels) *
1541 (format->bit_depth >> 3);
1542 }
1543
1544 return 0;
1545}
1546
1547/*
1548 * Query the module config for the FE DAI
1549 * This is used to find the hw_params set for that DAI and apply to FE
1550 * pipeline
1551 */
1552struct skl_module_cfg *
1553skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1554{
1555 struct snd_soc_dapm_widget *w;
1556 struct snd_soc_dapm_path *p = NULL;
1557
1558 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1559 w = dai->playback_widget;
f0900eb2 1560 snd_soc_dapm_widget_for_each_sink_path(w, p) {
cfb0a873 1561 if (p->connect && p->sink->power &&
a28f51db 1562 !is_skl_dsp_widget_type(p->sink))
cfb0a873
VK
1563 continue;
1564
1565 if (p->sink->priv) {
1566 dev_dbg(dai->dev, "set params for %s\n",
1567 p->sink->name);
1568 return p->sink->priv;
1569 }
1570 }
1571 } else {
1572 w = dai->capture_widget;
f0900eb2 1573 snd_soc_dapm_widget_for_each_source_path(w, p) {
cfb0a873 1574 if (p->connect && p->source->power &&
a28f51db 1575 !is_skl_dsp_widget_type(p->source))
cfb0a873
VK
1576 continue;
1577
1578 if (p->source->priv) {
1579 dev_dbg(dai->dev, "set params for %s\n",
1580 p->source->name);
1581 return p->source->priv;
1582 }
1583 }
1584 }
1585
1586 return NULL;
1587}
1588
718a42b5
D
1589static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1590 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1591{
1592 struct snd_soc_dapm_path *p;
1593 struct skl_module_cfg *mconfig = NULL;
1594
1595 snd_soc_dapm_widget_for_each_source_path(w, p) {
1596 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1597 if (p->connect &&
1598 (p->sink->id == snd_soc_dapm_aif_out) &&
1599 p->source->priv) {
1600 mconfig = p->source->priv;
1601 return mconfig;
1602 }
1603 mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1604 if (mconfig)
1605 return mconfig;
1606 }
1607 }
1608 return mconfig;
1609}
1610
1611static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1612 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1613{
1614 struct snd_soc_dapm_path *p;
1615 struct skl_module_cfg *mconfig = NULL;
1616
1617 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1618 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1619 if (p->connect &&
1620 (p->source->id == snd_soc_dapm_aif_in) &&
1621 p->sink->priv) {
1622 mconfig = p->sink->priv;
1623 return mconfig;
1624 }
1625 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1626 if (mconfig)
1627 return mconfig;
1628 }
1629 }
1630 return mconfig;
1631}
1632
1633struct skl_module_cfg *
1634skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1635{
1636 struct snd_soc_dapm_widget *w;
1637 struct skl_module_cfg *mconfig;
1638
1639 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1640 w = dai->playback_widget;
1641 mconfig = skl_get_mconfig_pb_cpr(dai, w);
1642 } else {
1643 w = dai->capture_widget;
1644 mconfig = skl_get_mconfig_cap_cpr(dai, w);
1645 }
1646 return mconfig;
1647}
1648
cfb0a873
VK
1649static u8 skl_tplg_be_link_type(int dev_type)
1650{
1651 int ret;
1652
1653 switch (dev_type) {
1654 case SKL_DEVICE_BT:
1655 ret = NHLT_LINK_SSP;
1656 break;
1657
1658 case SKL_DEVICE_DMIC:
1659 ret = NHLT_LINK_DMIC;
1660 break;
1661
1662 case SKL_DEVICE_I2S:
1663 ret = NHLT_LINK_SSP;
1664 break;
1665
1666 case SKL_DEVICE_HDALINK:
1667 ret = NHLT_LINK_HDA;
1668 break;
1669
1670 default:
1671 ret = NHLT_LINK_INVALID;
1672 break;
1673 }
1674
1675 return ret;
1676}
1677
1678/*
1679 * Fill the BE gateway parameters
1680 * The BE gateway expects a blob of parameters which are kept in the ACPI
1681 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1682 * The port can have multiple settings so pick based on the PCM
1683 * parameters
1684 */
1685static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1686 struct skl_module_cfg *mconfig,
1687 struct skl_pipe_params *params)
1688{
cfb0a873
VK
1689 struct nhlt_specific_cfg *cfg;
1690 struct skl *skl = get_skl_ctx(dai->dev);
1691 int link_type = skl_tplg_be_link_type(mconfig->dev_type);
db2f586b 1692 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
cfb0a873 1693
8871dcb9 1694 skl_tplg_fill_dma_id(mconfig, params);
cfb0a873 1695
b30c275e
JK
1696 if (link_type == NHLT_LINK_HDA)
1697 return 0;
1698
cfb0a873
VK
1699 /* update the blob based on virtual bus_id*/
1700 cfg = skl_get_ep_blob(skl, mconfig->vbus_id, link_type,
1701 params->s_fmt, params->ch,
db2f586b
SV
1702 params->s_freq, params->stream,
1703 dev_type);
cfb0a873
VK
1704 if (cfg) {
1705 mconfig->formats_config.caps_size = cfg->size;
bc03281a 1706 mconfig->formats_config.caps = (u32 *) &cfg->caps;
cfb0a873
VK
1707 } else {
1708 dev_err(dai->dev, "Blob NULL for id %x type %d dirn %d\n",
1709 mconfig->vbus_id, link_type,
1710 params->stream);
1711 dev_err(dai->dev, "PCM: ch %d, freq %d, fmt %d\n",
1712 params->ch, params->s_freq, params->s_fmt);
1713 return -EINVAL;
1714 }
1715
1716 return 0;
1717}
1718
1719static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1720 struct snd_soc_dapm_widget *w,
1721 struct skl_pipe_params *params)
1722{
1723 struct snd_soc_dapm_path *p;
4d8adccb 1724 int ret = -EIO;
cfb0a873 1725
f0900eb2 1726 snd_soc_dapm_widget_for_each_source_path(w, p) {
cfb0a873
VK
1727 if (p->connect && is_skl_dsp_widget_type(p->source) &&
1728 p->source->priv) {
1729
9a03cb49
JK
1730 ret = skl_tplg_be_fill_pipe_params(dai,
1731 p->source->priv, params);
1732 if (ret < 0)
1733 return ret;
cfb0a873 1734 } else {
9a03cb49
JK
1735 ret = skl_tplg_be_set_src_pipe_params(dai,
1736 p->source, params);
4d8adccb
SP
1737 if (ret < 0)
1738 return ret;
cfb0a873
VK
1739 }
1740 }
1741
4d8adccb 1742 return ret;
cfb0a873
VK
1743}
1744
1745static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1746 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1747{
1748 struct snd_soc_dapm_path *p = NULL;
4d8adccb 1749 int ret = -EIO;
cfb0a873 1750
f0900eb2 1751 snd_soc_dapm_widget_for_each_sink_path(w, p) {
cfb0a873
VK
1752 if (p->connect && is_skl_dsp_widget_type(p->sink) &&
1753 p->sink->priv) {
1754
9a03cb49
JK
1755 ret = skl_tplg_be_fill_pipe_params(dai,
1756 p->sink->priv, params);
1757 if (ret < 0)
1758 return ret;
cfb0a873 1759 } else {
4d8adccb 1760 ret = skl_tplg_be_set_sink_pipe_params(
cfb0a873 1761 dai, p->sink, params);
4d8adccb
SP
1762 if (ret < 0)
1763 return ret;
cfb0a873
VK
1764 }
1765 }
1766
4d8adccb 1767 return ret;
cfb0a873
VK
1768}
1769
1770/*
1771 * BE hw_params can be a source parameters (capture) or sink parameters
1772 * (playback). Based on sink and source we need to either find the source
1773 * list or the sink list and set the pipeline parameters
1774 */
1775int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1776 struct skl_pipe_params *params)
1777{
1778 struct snd_soc_dapm_widget *w;
1779
1780 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1781 w = dai->playback_widget;
1782
1783 return skl_tplg_be_set_src_pipe_params(dai, w, params);
1784
1785 } else {
1786 w = dai->capture_widget;
1787
1788 return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1789 }
1790
1791 return 0;
1792}
3af36706
VK
1793
1794static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1795 {SKL_MIXER_EVENT, skl_tplg_mixer_event},
9a1e3507 1796 {SKL_VMIXER_EVENT, skl_tplg_mixer_event},
3af36706
VK
1797 {SKL_PGA_EVENT, skl_tplg_pga_event},
1798};
1799
140adfba
JK
1800static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1801 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1802 skl_tplg_tlv_control_set},
1803};
1804
7a1b749b
D
1805static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
1806 {
1807 .id = SKL_CONTROL_TYPE_MIC_SELECT,
1808 .get = skl_tplg_mic_control_get,
1809 .put = skl_tplg_mic_control_set,
1810 },
1811};
1812
6277e832
SN
1813static int skl_tplg_fill_pipe_tkn(struct device *dev,
1814 struct skl_pipe *pipe, u32 tkn,
1815 u32 tkn_val)
3af36706 1816{
3af36706 1817
6277e832
SN
1818 switch (tkn) {
1819 case SKL_TKN_U32_PIPE_CONN_TYPE:
1820 pipe->conn_type = tkn_val;
1821 break;
1822
1823 case SKL_TKN_U32_PIPE_PRIORITY:
1824 pipe->pipe_priority = tkn_val;
1825 break;
1826
1827 case SKL_TKN_U32_PIPE_MEM_PGS:
1828 pipe->memory_pages = tkn_val;
1829 break;
1830
8a0cb236
VK
1831 case SKL_TKN_U32_PMODE:
1832 pipe->lp_mode = tkn_val;
1833 break;
1834
6277e832
SN
1835 default:
1836 dev_err(dev, "Token not handled %d\n", tkn);
1837 return -EINVAL;
3af36706 1838 }
6277e832
SN
1839
1840 return 0;
3af36706
VK
1841}
1842
1843/*
6277e832
SN
1844 * Add pipeline by parsing the relevant tokens
1845 * Return an existing pipe if the pipe already exists.
3af36706 1846 */
6277e832
SN
1847static int skl_tplg_add_pipe(struct device *dev,
1848 struct skl_module_cfg *mconfig, struct skl *skl,
1849 struct snd_soc_tplg_vendor_value_elem *tkn_elem)
3af36706
VK
1850{
1851 struct skl_pipeline *ppl;
1852 struct skl_pipe *pipe;
1853 struct skl_pipe_params *params;
1854
1855 list_for_each_entry(ppl, &skl->ppl_list, node) {
6277e832
SN
1856 if (ppl->pipe->ppl_id == tkn_elem->value) {
1857 mconfig->pipe = ppl->pipe;
081dc8ab 1858 return -EEXIST;
6277e832 1859 }
3af36706
VK
1860 }
1861
1862 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
1863 if (!ppl)
6277e832 1864 return -ENOMEM;
3af36706
VK
1865
1866 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
1867 if (!pipe)
6277e832 1868 return -ENOMEM;
3af36706
VK
1869
1870 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
1871 if (!params)
6277e832 1872 return -ENOMEM;
3af36706 1873
3af36706 1874 pipe->p_params = params;
6277e832 1875 pipe->ppl_id = tkn_elem->value;
3af36706
VK
1876 INIT_LIST_HEAD(&pipe->w_list);
1877
1878 ppl->pipe = pipe;
1879 list_add(&ppl->node, &skl->ppl_list);
1880
6277e832
SN
1881 mconfig->pipe = pipe;
1882 mconfig->pipe->state = SKL_PIPE_INVALID;
1883
1884 return 0;
1885}
1886
1887static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
1888 struct skl_module_pin *m_pin,
1889 int pin_index, u32 value)
1890{
1891 switch (tkn) {
1892 case SKL_TKN_U32_PIN_MOD_ID:
1893 m_pin[pin_index].id.module_id = value;
1894 break;
1895
1896 case SKL_TKN_U32_PIN_INST_ID:
1897 m_pin[pin_index].id.instance_id = value;
1898 break;
1899
1900 default:
1901 dev_err(dev, "%d Not a pin token\n", value);
1902 return -EINVAL;
1903 }
1904
1905 return 0;
1906}
1907
1908/*
1909 * Parse for pin config specific tokens to fill up the
1910 * module private data
1911 */
1912static int skl_tplg_fill_pins_info(struct device *dev,
1913 struct skl_module_cfg *mconfig,
1914 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
1915 int dir, int pin_count)
1916{
1917 int ret;
1918 struct skl_module_pin *m_pin;
1919
1920 switch (dir) {
1921 case SKL_DIR_IN:
1922 m_pin = mconfig->m_in_pin;
1923 break;
1924
1925 case SKL_DIR_OUT:
1926 m_pin = mconfig->m_out_pin;
1927 break;
1928
1929 default:
ecd286a9 1930 dev_err(dev, "Invalid direction value\n");
6277e832
SN
1931 return -EINVAL;
1932 }
1933
1934 ret = skl_tplg_fill_pin(dev, tkn_elem->token,
1935 m_pin, pin_count, tkn_elem->value);
1936
1937 if (ret < 0)
1938 return ret;
1939
1940 m_pin[pin_count].in_use = false;
1941 m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
1942
1943 return 0;
3af36706
VK
1944}
1945
6277e832
SN
1946/*
1947 * Fill up input/output module config format based
1948 * on the direction
1949 */
1950static int skl_tplg_fill_fmt(struct device *dev,
ca312fda
SN
1951 struct skl_module_fmt *dst_fmt,
1952 u32 tkn, u32 value)
6277e832 1953{
6277e832
SN
1954 switch (tkn) {
1955 case SKL_TKN_U32_FMT_CH:
1956 dst_fmt->channels = value;
1957 break;
1958
1959 case SKL_TKN_U32_FMT_FREQ:
1960 dst_fmt->s_freq = value;
1961 break;
1962
1963 case SKL_TKN_U32_FMT_BIT_DEPTH:
1964 dst_fmt->bit_depth = value;
1965 break;
1966
1967 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
1968 dst_fmt->valid_bit_depth = value;
1969 break;
1970
1971 case SKL_TKN_U32_FMT_CH_CONFIG:
1972 dst_fmt->ch_cfg = value;
1973 break;
1974
1975 case SKL_TKN_U32_FMT_INTERLEAVE:
1976 dst_fmt->interleaving_style = value;
1977 break;
1978
1979 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
1980 dst_fmt->sample_type = value;
1981 break;
1982
1983 case SKL_TKN_U32_FMT_CH_MAP:
1984 dst_fmt->ch_map = value;
1985 break;
1986
1987 default:
ecd286a9 1988 dev_err(dev, "Invalid token %d\n", tkn);
6277e832
SN
1989 return -EINVAL;
1990 }
1991
1992 return 0;
1993}
1994
ca312fda
SN
1995static int skl_tplg_widget_fill_fmt(struct device *dev,
1996 struct skl_module_cfg *mconfig,
1997 u32 tkn, u32 val, u32 dir, int fmt_idx)
1998{
1999 struct skl_module_fmt *dst_fmt;
2000
2001 switch (dir) {
2002 case SKL_DIR_IN:
2003 dst_fmt = &mconfig->in_fmt[fmt_idx];
2004 break;
2005
2006 case SKL_DIR_OUT:
2007 dst_fmt = &mconfig->out_fmt[fmt_idx];
2008 break;
2009
2010 default:
2011 dev_err(dev, "Invalid direction: %d\n", dir);
2012 return -EINVAL;
2013 }
2014
2015 return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val);
2016}
2017
6277e832
SN
2018static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
2019 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
2020{
2021 if (uuid_tkn->token == SKL_TKN_UUID)
2022 memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
2023 else {
ecd286a9 2024 dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
6277e832
SN
2025 return -EINVAL;
2026 }
2027
2028 return 0;
2029}
2030
2031static void skl_tplg_fill_pin_dynamic_val(
2032 struct skl_module_pin *mpin, u32 pin_count, u32 value)
4cd9899f
HS
2033{
2034 int i;
2035
6277e832
SN
2036 for (i = 0; i < pin_count; i++)
2037 mpin[i].is_dynamic = value;
2038}
2039
2040/*
2041 * Parse tokens to fill up the module private data
2042 */
2043static int skl_tplg_get_token(struct device *dev,
2044 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2045 struct skl *skl, struct skl_module_cfg *mconfig)
2046{
2047 int tkn_count = 0;
2048 int ret;
2049 static int is_pipe_exists;
2050 static int pin_index, dir;
2051
2052 if (tkn_elem->token > SKL_TKN_MAX)
2053 return -EINVAL;
2054
2055 switch (tkn_elem->token) {
2056 case SKL_TKN_U8_IN_QUEUE_COUNT:
2057 mconfig->max_in_queue = tkn_elem->value;
2058 mconfig->m_in_pin = devm_kzalloc(dev, mconfig->max_in_queue *
2059 sizeof(*mconfig->m_in_pin),
2060 GFP_KERNEL);
2061 if (!mconfig->m_in_pin)
2062 return -ENOMEM;
2063
2064 break;
2065
2066 case SKL_TKN_U8_OUT_QUEUE_COUNT:
2067 mconfig->max_out_queue = tkn_elem->value;
2068 mconfig->m_out_pin = devm_kzalloc(dev, mconfig->max_out_queue *
2069 sizeof(*mconfig->m_out_pin),
2070 GFP_KERNEL);
2071
2072 if (!mconfig->m_out_pin)
2073 return -ENOMEM;
2074
2075 break;
2076
2077 case SKL_TKN_U8_DYN_IN_PIN:
2078 if (!mconfig->m_in_pin)
2079 return -ENOMEM;
2080
2081 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin,
2082 mconfig->max_in_queue, tkn_elem->value);
2083
2084 break;
2085
2086 case SKL_TKN_U8_DYN_OUT_PIN:
2087 if (!mconfig->m_out_pin)
2088 return -ENOMEM;
2089
2090 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin,
2091 mconfig->max_out_queue, tkn_elem->value);
2092
2093 break;
2094
2095 case SKL_TKN_U8_TIME_SLOT:
2096 mconfig->time_slot = tkn_elem->value;
2097 break;
2098
2099 case SKL_TKN_U8_CORE_ID:
2100 mconfig->core_id = tkn_elem->value;
2101
2102 case SKL_TKN_U8_MOD_TYPE:
2103 mconfig->m_type = tkn_elem->value;
2104 break;
2105
2106 case SKL_TKN_U8_DEV_TYPE:
2107 mconfig->dev_type = tkn_elem->value;
2108 break;
2109
2110 case SKL_TKN_U8_HW_CONN_TYPE:
2111 mconfig->hw_conn_type = tkn_elem->value;
2112 break;
2113
2114 case SKL_TKN_U16_MOD_INST_ID:
2115 mconfig->id.instance_id =
2116 tkn_elem->value;
2117 break;
2118
2119 case SKL_TKN_U32_MEM_PAGES:
2120 mconfig->mem_pages = tkn_elem->value;
2121 break;
2122
2123 case SKL_TKN_U32_MAX_MCPS:
2124 mconfig->mcps = tkn_elem->value;
2125 break;
2126
2127 case SKL_TKN_U32_OBS:
2128 mconfig->obs = tkn_elem->value;
2129 break;
2130
2131 case SKL_TKN_U32_IBS:
2132 mconfig->ibs = tkn_elem->value;
2133 break;
2134
2135 case SKL_TKN_U32_VBUS_ID:
2136 mconfig->vbus_id = tkn_elem->value;
2137 break;
2138
2139 case SKL_TKN_U32_PARAMS_FIXUP:
2140 mconfig->params_fixup = tkn_elem->value;
2141 break;
2142
2143 case SKL_TKN_U32_CONVERTER:
2144 mconfig->converter = tkn_elem->value;
2145 break;
2146
c0116be3 2147 case SKL_TKN_U32_D0I3_CAPS:
6bd9dcf3
VK
2148 mconfig->d0i3_caps = tkn_elem->value;
2149 break;
2150
6277e832
SN
2151 case SKL_TKN_U32_PIPE_ID:
2152 ret = skl_tplg_add_pipe(dev,
2153 mconfig, skl, tkn_elem);
2154
081dc8ab
GS
2155 if (ret < 0) {
2156 if (ret == -EEXIST) {
2157 is_pipe_exists = 1;
2158 break;
2159 }
6277e832 2160 return is_pipe_exists;
081dc8ab 2161 }
6277e832
SN
2162
2163 break;
2164
2165 case SKL_TKN_U32_PIPE_CONN_TYPE:
2166 case SKL_TKN_U32_PIPE_PRIORITY:
2167 case SKL_TKN_U32_PIPE_MEM_PGS:
8a0cb236 2168 case SKL_TKN_U32_PMODE:
6277e832
SN
2169 if (is_pipe_exists) {
2170 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
2171 tkn_elem->token, tkn_elem->value);
2172 if (ret < 0)
2173 return ret;
2174 }
2175
2176 break;
2177
2178 /*
2179 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
2180 * direction and the pin count. The first four bits represent
2181 * direction and next four the pin count.
2182 */
2183 case SKL_TKN_U32_DIR_PIN_COUNT:
2184 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
2185 pin_index = (tkn_elem->value &
2186 SKL_PIN_COUNT_MASK) >> 4;
2187
2188 break;
2189
2190 case SKL_TKN_U32_FMT_CH:
2191 case SKL_TKN_U32_FMT_FREQ:
2192 case SKL_TKN_U32_FMT_BIT_DEPTH:
2193 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2194 case SKL_TKN_U32_FMT_CH_CONFIG:
2195 case SKL_TKN_U32_FMT_INTERLEAVE:
2196 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2197 case SKL_TKN_U32_FMT_CH_MAP:
ca312fda 2198 ret = skl_tplg_widget_fill_fmt(dev, mconfig, tkn_elem->token,
6277e832
SN
2199 tkn_elem->value, dir, pin_index);
2200
2201 if (ret < 0)
2202 return ret;
2203
2204 break;
2205
2206 case SKL_TKN_U32_PIN_MOD_ID:
2207 case SKL_TKN_U32_PIN_INST_ID:
2208 ret = skl_tplg_fill_pins_info(dev,
2209 mconfig, tkn_elem, dir,
2210 pin_index);
2211 if (ret < 0)
2212 return ret;
2213
2214 break;
2215
2216 case SKL_TKN_U32_CAPS_SIZE:
2217 mconfig->formats_config.caps_size =
2218 tkn_elem->value;
2219
2220 break;
2221
133e6e5c
SN
2222 case SKL_TKN_U32_CAPS_SET_PARAMS:
2223 mconfig->formats_config.set_params =
2224 tkn_elem->value;
2225 break;
2226
2227 case SKL_TKN_U32_CAPS_PARAMS_ID:
2228 mconfig->formats_config.param_id =
2229 tkn_elem->value;
2230 break;
2231
6277e832
SN
2232 case SKL_TKN_U32_PROC_DOMAIN:
2233 mconfig->domain =
2234 tkn_elem->value;
2235
2236 break;
2237
939df3ad
RB
2238 case SKL_TKN_U32_DMA_BUF_SIZE:
2239 mconfig->dma_buffer_size = tkn_elem->value;
2240 break;
6277e832
SN
2241
2242 case SKL_TKN_U8_IN_PIN_TYPE:
2243 case SKL_TKN_U8_OUT_PIN_TYPE:
2244 case SKL_TKN_U8_CONN_TYPE:
2245 break;
2246
2247 default:
2248 dev_err(dev, "Token %d not handled\n",
2249 tkn_elem->token);
2250 return -EINVAL;
4cd9899f 2251 }
6277e832
SN
2252
2253 tkn_count++;
2254
2255 return tkn_count;
2256}
2257
2258/*
2259 * Parse the vendor array for specific tokens to construct
2260 * module private data
2261 */
2262static int skl_tplg_get_tokens(struct device *dev,
2263 char *pvt_data, struct skl *skl,
2264 struct skl_module_cfg *mconfig, int block_size)
2265{
2266 struct snd_soc_tplg_vendor_array *array;
2267 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2268 int tkn_count = 0, ret;
2269 int off = 0, tuple_size = 0;
2270
2271 if (block_size <= 0)
2272 return -EINVAL;
2273
2274 while (tuple_size < block_size) {
2275 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2276
2277 off += array->size;
2278
2279 switch (array->type) {
2280 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
ecd286a9 2281 dev_warn(dev, "no string tokens expected for skl tplg\n");
6277e832
SN
2282 continue;
2283
2284 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2285 ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
2286 if (ret < 0)
2287 return ret;
2288
2289 tuple_size += sizeof(*array->uuid);
2290
2291 continue;
2292
2293 default:
2294 tkn_elem = array->value;
2295 tkn_count = 0;
2296 break;
2297 }
2298
2299 while (tkn_count <= (array->num_elems - 1)) {
2300 ret = skl_tplg_get_token(dev, tkn_elem,
2301 skl, mconfig);
2302
2303 if (ret < 0)
2304 return ret;
2305
2306 tkn_count = tkn_count + ret;
2307 tkn_elem++;
2308 }
2309
2310 tuple_size += tkn_count * sizeof(*tkn_elem);
2311 }
2312
133e6e5c 2313 return off;
6277e832
SN
2314}
2315
2316/*
2317 * Every data block is preceded by a descriptor to read the number
2318 * of data blocks, they type of the block and it's size
2319 */
2320static int skl_tplg_get_desc_blocks(struct device *dev,
2321 struct snd_soc_tplg_vendor_array *array)
2322{
2323 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2324
2325 tkn_elem = array->value;
2326
2327 switch (tkn_elem->token) {
2328 case SKL_TKN_U8_NUM_BLOCKS:
2329 case SKL_TKN_U8_BLOCK_TYPE:
2330 case SKL_TKN_U16_BLOCK_SIZE:
2331 return tkn_elem->value;
2332
2333 default:
ecd286a9 2334 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
6277e832
SN
2335 break;
2336 }
2337
2338 return -EINVAL;
2339}
2340
2341/*
2342 * Parse the private data for the token and corresponding value.
2343 * The private data can have multiple data blocks. So, a data block
2344 * is preceded by a descriptor for number of blocks and a descriptor
2345 * for the type and size of the suceeding data block.
2346 */
2347static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2348 struct skl *skl, struct device *dev,
2349 struct skl_module_cfg *mconfig)
2350{
2351 struct snd_soc_tplg_vendor_array *array;
2352 int num_blocks, block_size = 0, block_type, off = 0;
2353 char *data;
2354 int ret;
2355
2356 /* Read the NUM_DATA_BLOCKS descriptor */
2357 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2358 ret = skl_tplg_get_desc_blocks(dev, array);
2359 if (ret < 0)
2360 return ret;
2361 num_blocks = ret;
2362
2363 off += array->size;
6277e832
SN
2364 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2365 while (num_blocks > 0) {
133e6e5c
SN
2366 array = (struct snd_soc_tplg_vendor_array *)
2367 (tplg_w->priv.data + off);
2368
6277e832
SN
2369 ret = skl_tplg_get_desc_blocks(dev, array);
2370
2371 if (ret < 0)
2372 return ret;
2373 block_type = ret;
2374 off += array->size;
2375
2376 array = (struct snd_soc_tplg_vendor_array *)
2377 (tplg_w->priv.data + off);
2378
2379 ret = skl_tplg_get_desc_blocks(dev, array);
2380
2381 if (ret < 0)
2382 return ret;
2383 block_size = ret;
2384 off += array->size;
2385
2386 array = (struct snd_soc_tplg_vendor_array *)
2387 (tplg_w->priv.data + off);
2388
2389 data = (tplg_w->priv.data + off);
2390
2391 if (block_type == SKL_TYPE_TUPLE) {
2392 ret = skl_tplg_get_tokens(dev, data,
2393 skl, mconfig, block_size);
2394
2395 if (ret < 0)
2396 return ret;
2397
2398 --num_blocks;
2399 } else {
2400 if (mconfig->formats_config.caps_size > 0)
2401 memcpy(mconfig->formats_config.caps, data,
2402 mconfig->formats_config.caps_size);
2403 --num_blocks;
133e6e5c 2404 ret = mconfig->formats_config.caps_size;
6277e832 2405 }
133e6e5c 2406 off += ret;
6277e832
SN
2407 }
2408
2409 return 0;
4cd9899f
HS
2410}
2411
fe3f4442
D
2412static void skl_clear_pin_config(struct snd_soc_platform *platform,
2413 struct snd_soc_dapm_widget *w)
2414{
2415 int i;
2416 struct skl_module_cfg *mconfig;
2417 struct skl_pipe *pipe;
2418
2419 if (!strncmp(w->dapm->component->name, platform->component.name,
2420 strlen(platform->component.name))) {
2421 mconfig = w->priv;
2422 pipe = mconfig->pipe;
2423 for (i = 0; i < mconfig->max_in_queue; i++) {
2424 mconfig->m_in_pin[i].in_use = false;
2425 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2426 }
2427 for (i = 0; i < mconfig->max_out_queue; i++) {
2428 mconfig->m_out_pin[i].in_use = false;
2429 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2430 }
2431 pipe->state = SKL_PIPE_INVALID;
2432 mconfig->m_state = SKL_MODULE_UNINIT;
2433 }
2434}
2435
2436void skl_cleanup_resources(struct skl *skl)
2437{
2438 struct skl_sst *ctx = skl->skl_sst;
2439 struct snd_soc_platform *soc_platform = skl->platform;
2440 struct snd_soc_dapm_widget *w;
2441 struct snd_soc_card *card;
2442
2443 if (soc_platform == NULL)
2444 return;
2445
2446 card = soc_platform->component.card;
2447 if (!card || !card->instantiated)
2448 return;
2449
2450 skl->resource.mem = 0;
2451 skl->resource.mcps = 0;
2452
2453 list_for_each_entry(w, &card->widgets, list) {
2454 if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
2455 skl_clear_pin_config(soc_platform, w);
2456 }
2457
2458 skl_clear_module_cnt(ctx->dsp);
2459}
2460
3af36706
VK
2461/*
2462 * Topology core widget load callback
2463 *
2464 * This is used to save the private data for each widget which gives
2465 * information to the driver about module and pipeline parameters which DSP
2466 * FW expects like ids, resource values, formats etc
2467 */
2468static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
b663a8c5
JK
2469 struct snd_soc_dapm_widget *w,
2470 struct snd_soc_tplg_dapm_widget *tplg_w)
3af36706
VK
2471{
2472 int ret;
2473 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2474 struct skl *skl = ebus_to_skl(ebus);
2475 struct hdac_bus *bus = ebus_to_hbus(ebus);
2476 struct skl_module_cfg *mconfig;
3af36706
VK
2477
2478 if (!tplg_w->priv.size)
2479 goto bind_event;
2480
2481 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
2482
2483 if (!mconfig)
2484 return -ENOMEM;
2485
2486 w->priv = mconfig;
09305da9 2487
b7c50555
VK
2488 /*
2489 * module binary can be loaded later, so set it to query when
2490 * module is load for a use case
2491 */
2492 mconfig->id.module_id = -1;
3af36706 2493
6277e832
SN
2494 /* Parse private data for tuples */
2495 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
2496 if (ret < 0)
2497 return ret;
d14700a0
VK
2498
2499 skl_debug_init_module(skl->debugfs, w, mconfig);
2500
3af36706
VK
2501bind_event:
2502 if (tplg_w->event_type == 0) {
3373f716 2503 dev_dbg(bus->dev, "ASoC: No event handler required\n");
3af36706
VK
2504 return 0;
2505 }
2506
2507 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
b663a8c5
JK
2508 ARRAY_SIZE(skl_tplg_widget_ops),
2509 tplg_w->event_type);
3af36706
VK
2510
2511 if (ret) {
2512 dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
2513 __func__, tplg_w->event_type);
2514 return -EINVAL;
2515 }
2516
2517 return 0;
2518}
2519
140adfba
JK
2520static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
2521 struct snd_soc_tplg_bytes_control *bc)
2522{
2523 struct skl_algo_data *ac;
2524 struct skl_dfw_algo_data *dfw_ac =
2525 (struct skl_dfw_algo_data *)bc->priv.data;
2526
2527 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
2528 if (!ac)
2529 return -ENOMEM;
2530
2531 /* Fill private data */
2532 ac->max = dfw_ac->max;
2533 ac->param_id = dfw_ac->param_id;
2534 ac->set_params = dfw_ac->set_params;
0d682104 2535 ac->size = dfw_ac->max;
140adfba
JK
2536
2537 if (ac->max) {
2538 ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
2539 if (!ac->params)
2540 return -ENOMEM;
2541
edd7ea2d 2542 memcpy(ac->params, dfw_ac->params, ac->max);
140adfba
JK
2543 }
2544
2545 be->dobj.private = ac;
2546 return 0;
2547}
2548
7a1b749b
D
2549static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
2550 struct snd_soc_tplg_enum_control *ec)
2551{
2552
2553 void *data;
2554
2555 if (ec->priv.size) {
2556 data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL);
2557 if (!data)
2558 return -ENOMEM;
2559 memcpy(data, ec->priv.data, ec->priv.size);
2560 se->dobj.private = data;
2561 }
2562
2563 return 0;
2564
2565}
2566
140adfba
JK
2567static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
2568 struct snd_kcontrol_new *kctl,
2569 struct snd_soc_tplg_ctl_hdr *hdr)
2570{
2571 struct soc_bytes_ext *sb;
2572 struct snd_soc_tplg_bytes_control *tplg_bc;
7a1b749b 2573 struct snd_soc_tplg_enum_control *tplg_ec;
140adfba
JK
2574 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2575 struct hdac_bus *bus = ebus_to_hbus(ebus);
7a1b749b 2576 struct soc_enum *se;
140adfba
JK
2577
2578 switch (hdr->ops.info) {
2579 case SND_SOC_TPLG_CTL_BYTES:
2580 tplg_bc = container_of(hdr,
2581 struct snd_soc_tplg_bytes_control, hdr);
2582 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
2583 sb = (struct soc_bytes_ext *)kctl->private_value;
2584 if (tplg_bc->priv.size)
2585 return skl_init_algo_data(
2586 bus->dev, sb, tplg_bc);
2587 }
2588 break;
2589
7a1b749b
D
2590 case SND_SOC_TPLG_CTL_ENUM:
2591 tplg_ec = container_of(hdr,
2592 struct snd_soc_tplg_enum_control, hdr);
2593 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) {
2594 se = (struct soc_enum *)kctl->private_value;
2595 if (tplg_ec->priv.size)
2596 return skl_init_enum_data(bus->dev, se,
2597 tplg_ec);
2598 }
2599 break;
2600
140adfba
JK
2601 default:
2602 dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
2603 hdr->ops.get, hdr->ops.put, hdr->ops.info);
2604 break;
2605 }
2606
2607 return 0;
2608}
2609
541070ce
SN
2610static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
2611 struct snd_soc_tplg_vendor_string_elem *str_elem,
eee0e16f 2612 struct skl *skl)
541070ce
SN
2613{
2614 int tkn_count = 0;
2615 static int ref_count;
2616
2617 switch (str_elem->token) {
2618 case SKL_TKN_STR_LIB_NAME:
eee0e16f 2619 if (ref_count > skl->skl_sst->lib_count - 1) {
541070ce
SN
2620 ref_count = 0;
2621 return -EINVAL;
2622 }
2623
eee0e16f
JK
2624 strncpy(skl->skl_sst->lib_info[ref_count].name,
2625 str_elem->string,
2626 ARRAY_SIZE(skl->skl_sst->lib_info[ref_count].name));
541070ce
SN
2627 ref_count++;
2628 tkn_count++;
2629 break;
2630
2631 default:
ecd286a9 2632 dev_err(dev, "Not a string token %d\n", str_elem->token);
541070ce
SN
2633 break;
2634 }
2635
2636 return tkn_count;
2637}
2638
2639static int skl_tplg_get_str_tkn(struct device *dev,
2640 struct snd_soc_tplg_vendor_array *array,
eee0e16f 2641 struct skl *skl)
541070ce
SN
2642{
2643 int tkn_count = 0, ret;
2644 struct snd_soc_tplg_vendor_string_elem *str_elem;
2645
2646 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
2647 while (tkn_count < array->num_elems) {
eee0e16f 2648 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
541070ce
SN
2649 str_elem++;
2650
2651 if (ret < 0)
2652 return ret;
2653
2654 tkn_count = tkn_count + ret;
2655 }
2656
2657 return tkn_count;
2658}
2659
2660static int skl_tplg_get_int_tkn(struct device *dev,
2661 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
eee0e16f 2662 struct skl *skl)
541070ce
SN
2663{
2664 int tkn_count = 0;
2665
2666 switch (tkn_elem->token) {
2667 case SKL_TKN_U32_LIB_COUNT:
eee0e16f 2668 skl->skl_sst->lib_count = tkn_elem->value;
541070ce
SN
2669 tkn_count++;
2670 break;
2671
2672 default:
ecd286a9 2673 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
541070ce
SN
2674 return -EINVAL;
2675 }
2676
2677 return tkn_count;
2678}
2679
2680/*
2681 * Fill the manifest structure by parsing the tokens based on the
2682 * type.
2683 */
2684static int skl_tplg_get_manifest_tkn(struct device *dev,
eee0e16f 2685 char *pvt_data, struct skl *skl,
541070ce
SN
2686 int block_size)
2687{
2688 int tkn_count = 0, ret;
2689 int off = 0, tuple_size = 0;
2690 struct snd_soc_tplg_vendor_array *array;
2691 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2692
2693 if (block_size <= 0)
2694 return -EINVAL;
2695
2696 while (tuple_size < block_size) {
2697 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2698 off += array->size;
2699 switch (array->type) {
2700 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
eee0e16f 2701 ret = skl_tplg_get_str_tkn(dev, array, skl);
541070ce
SN
2702
2703 if (ret < 0)
2704 return ret;
0a716776 2705 tkn_count = ret;
541070ce
SN
2706
2707 tuple_size += tkn_count *
2708 sizeof(struct snd_soc_tplg_vendor_string_elem);
2709 continue;
2710
2711 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
ecd286a9 2712 dev_warn(dev, "no uuid tokens for skl tplf manifest\n");
541070ce
SN
2713 continue;
2714
2715 default:
2716 tkn_elem = array->value;
2717 tkn_count = 0;
2718 break;
2719 }
2720
2721 while (tkn_count <= array->num_elems - 1) {
2722 ret = skl_tplg_get_int_tkn(dev,
eee0e16f 2723 tkn_elem, skl);
541070ce
SN
2724 if (ret < 0)
2725 return ret;
2726
2727 tkn_count = tkn_count + ret;
2728 tkn_elem++;
541070ce 2729 }
9fc129f6 2730 tuple_size += (tkn_count * sizeof(*tkn_elem));
541070ce
SN
2731 tkn_count = 0;
2732 }
2733
9fc129f6 2734 return off;
541070ce
SN
2735}
2736
2737/*
2738 * Parse manifest private data for tokens. The private data block is
2739 * preceded by descriptors for type and size of data block.
2740 */
2741static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
eee0e16f 2742 struct device *dev, struct skl *skl)
541070ce
SN
2743{
2744 struct snd_soc_tplg_vendor_array *array;
2745 int num_blocks, block_size = 0, block_type, off = 0;
2746 char *data;
2747 int ret;
2748
2749 /* Read the NUM_DATA_BLOCKS descriptor */
2750 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
2751 ret = skl_tplg_get_desc_blocks(dev, array);
2752 if (ret < 0)
2753 return ret;
2754 num_blocks = ret;
2755
2756 off += array->size;
541070ce
SN
2757 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2758 while (num_blocks > 0) {
9fc129f6
SN
2759 array = (struct snd_soc_tplg_vendor_array *)
2760 (manifest->priv.data + off);
541070ce
SN
2761 ret = skl_tplg_get_desc_blocks(dev, array);
2762
2763 if (ret < 0)
2764 return ret;
2765 block_type = ret;
2766 off += array->size;
2767
2768 array = (struct snd_soc_tplg_vendor_array *)
2769 (manifest->priv.data + off);
2770
2771 ret = skl_tplg_get_desc_blocks(dev, array);
2772
2773 if (ret < 0)
2774 return ret;
2775 block_size = ret;
2776 off += array->size;
2777
2778 array = (struct snd_soc_tplg_vendor_array *)
2779 (manifest->priv.data + off);
2780
2781 data = (manifest->priv.data + off);
2782
2783 if (block_type == SKL_TYPE_TUPLE) {
eee0e16f 2784 ret = skl_tplg_get_manifest_tkn(dev, data, skl,
541070ce
SN
2785 block_size);
2786
2787 if (ret < 0)
2788 return ret;
2789
2790 --num_blocks;
2791 } else {
2792 return -EINVAL;
2793 }
9fc129f6 2794 off += ret;
541070ce
SN
2795 }
2796
2797 return 0;
2798}
2799
15ecaba9
K
2800static int skl_manifest_load(struct snd_soc_component *cmpnt,
2801 struct snd_soc_tplg_manifest *manifest)
2802{
15ecaba9
K
2803 struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
2804 struct hdac_bus *bus = ebus_to_hbus(ebus);
2805 struct skl *skl = ebus_to_skl(ebus);
15ecaba9 2806
c15ad605
VK
2807 /* proceed only if we have private data defined */
2808 if (manifest->priv.size == 0)
2809 return 0;
2810
eee0e16f 2811 skl_tplg_get_manifest_data(manifest, bus->dev, skl);
15ecaba9 2812
eee0e16f 2813 if (skl->skl_sst->lib_count > SKL_MAX_LIB) {
15ecaba9 2814 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
eee0e16f
JK
2815 skl->skl_sst->lib_count);
2816 return -EINVAL;
15ecaba9
K
2817 }
2818
eee0e16f 2819 return 0;
15ecaba9
K
2820}
2821
3af36706
VK
2822static struct snd_soc_tplg_ops skl_tplg_ops = {
2823 .widget_load = skl_tplg_widget_load,
140adfba
JK
2824 .control_load = skl_tplg_control_load,
2825 .bytes_ext_ops = skl_tlv_ops,
2826 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
7a1b749b
D
2827 .io_ops = skl_tplg_kcontrol_ops,
2828 .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
15ecaba9 2829 .manifest = skl_manifest_load,
3af36706
VK
2830};
2831
287af4f9
JK
2832/*
2833 * A pipe can have multiple modules, each of them will be a DAPM widget as
2834 * well. While managing a pipeline we need to get the list of all the
2835 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
2836 * helps to get the SKL type widgets in that pipeline
2837 */
2838static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
2839{
2840 struct snd_soc_dapm_widget *w;
2841 struct skl_module_cfg *mcfg = NULL;
2842 struct skl_pipe_module *p_module = NULL;
2843 struct skl_pipe *pipe;
2844
2845 list_for_each_entry(w, &platform->component.card->widgets, list) {
2846 if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
2847 mcfg = w->priv;
2848 pipe = mcfg->pipe;
2849
2850 p_module = devm_kzalloc(platform->dev,
2851 sizeof(*p_module), GFP_KERNEL);
2852 if (!p_module)
2853 return -ENOMEM;
2854
2855 p_module->w = w;
2856 list_add_tail(&p_module->node, &pipe->w_list);
2857 }
2858 }
2859
2860 return 0;
2861}
2862
f0aa94fa
JK
2863static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
2864{
2865 struct skl_pipe_module *w_module;
2866 struct snd_soc_dapm_widget *w;
2867 struct skl_module_cfg *mconfig;
2868 bool host_found = false, link_found = false;
2869
2870 list_for_each_entry(w_module, &pipe->w_list, node) {
2871 w = w_module->w;
2872 mconfig = w->priv;
2873
2874 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
2875 host_found = true;
2876 else if (mconfig->dev_type != SKL_DEVICE_NONE)
2877 link_found = true;
2878 }
2879
2880 if (host_found && link_found)
2881 pipe->passthru = true;
2882 else
2883 pipe->passthru = false;
2884}
2885
3af36706
VK
2886/* This will be read from topology manifest, currently defined here */
2887#define SKL_MAX_MCPS 30000000
2888#define SKL_FW_MAX_MEM 1000000
2889
2890/*
2891 * SKL topology init routine
2892 */
2893int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
2894{
2895 int ret;
2896 const struct firmware *fw;
2897 struct hdac_bus *bus = ebus_to_hbus(ebus);
2898 struct skl *skl = ebus_to_skl(ebus);
f0aa94fa 2899 struct skl_pipeline *ppl;
3af36706 2900
4b235c43 2901 ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3af36706 2902 if (ret < 0) {
b663a8c5 2903 dev_err(bus->dev, "tplg fw %s load failed with %d\n",
4b235c43
VK
2904 skl->tplg_name, ret);
2905 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
2906 if (ret < 0) {
2907 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
2908 "dfw_sst.bin", ret);
2909 return ret;
2910 }
3af36706
VK
2911 }
2912
2913 /*
2914 * The complete tplg for SKL is loaded as index 0, we don't use
2915 * any other index
2916 */
b663a8c5
JK
2917 ret = snd_soc_tplg_component_load(&platform->component,
2918 &skl_tplg_ops, fw, 0);
3af36706
VK
2919 if (ret < 0) {
2920 dev_err(bus->dev, "tplg component load failed%d\n", ret);
c14a82c7 2921 release_firmware(fw);
3af36706
VK
2922 return -EINVAL;
2923 }
2924
2925 skl->resource.max_mcps = SKL_MAX_MCPS;
2926 skl->resource.max_mem = SKL_FW_MAX_MEM;
2927
d8018361 2928 skl->tplg = fw;
287af4f9
JK
2929 ret = skl_tplg_create_pipe_widget_list(platform);
2930 if (ret < 0)
2931 return ret;
d8018361 2932
f0aa94fa
JK
2933 list_for_each_entry(ppl, &skl->ppl_list, node)
2934 skl_tplg_set_pipe_type(skl, ppl->pipe);
d8018361 2935
3af36706
VK
2936 return 0;
2937}