]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - sound/soc/intel/skylake/skl-messages.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / sound / soc / intel / skylake / skl-messages.c
1 /*
2 * skl-message.c - HDA DSP interface for FW registration, Pipe and Module
3 * configurations
4 *
5 * Copyright (C) 2015 Intel Corp
6 * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
7 * Jeeja KP <jeeja.kp@intel.com>
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as version 2, as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <sound/core.h>
23 #include <sound/pcm.h>
24 #include "skl-sst-dsp.h"
25 #include "skl-sst-ipc.h"
26 #include "skl.h"
27 #include "../common/sst-dsp.h"
28 #include "../common/sst-dsp-priv.h"
29 #include "skl-topology.h"
30 #include "skl-tplg-interface.h"
31
32 static int skl_alloc_dma_buf(struct device *dev,
33 struct snd_dma_buffer *dmab, size_t size)
34 {
35 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
36 struct hdac_bus *bus = ebus_to_hbus(ebus);
37
38 if (!bus)
39 return -ENODEV;
40
41 return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab);
42 }
43
44 static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
45 {
46 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
47 struct hdac_bus *bus = ebus_to_hbus(ebus);
48
49 if (!bus)
50 return -ENODEV;
51
52 bus->io_ops->dma_free_pages(bus, dmab);
53
54 return 0;
55 }
56
57 #define NOTIFICATION_PARAM_ID 3
58 #define NOTIFICATION_MASK 0xf
59
60 /* disable notfication for underruns/overruns from firmware module */
61 void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable)
62 {
63 struct notification_mask mask;
64 struct skl_ipc_large_config_msg msg = {0};
65
66 mask.notify = NOTIFICATION_MASK;
67 mask.enable = enable;
68
69 msg.large_param_id = NOTIFICATION_PARAM_ID;
70 msg.param_data_size = sizeof(mask);
71
72 skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)&mask);
73 }
74
75 static int skl_dsp_setup_spib(struct device *dev, unsigned int size,
76 int stream_tag, int enable)
77 {
78 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
79 struct hdac_bus *bus = ebus_to_hbus(ebus);
80 struct hdac_stream *stream = snd_hdac_get_stream(bus,
81 SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
82 struct hdac_ext_stream *estream;
83
84 if (!stream)
85 return -EINVAL;
86
87 estream = stream_to_hdac_ext_stream(stream);
88 /* enable/disable SPIB for this hdac stream */
89 snd_hdac_ext_stream_spbcap_enable(ebus, enable, stream->index);
90
91 /* set the spib value */
92 snd_hdac_ext_stream_set_spib(ebus, estream, size);
93
94 return 0;
95 }
96
97 static int skl_dsp_prepare(struct device *dev, unsigned int format,
98 unsigned int size, struct snd_dma_buffer *dmab)
99 {
100 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
101 struct hdac_bus *bus = ebus_to_hbus(ebus);
102 struct hdac_ext_stream *estream;
103 struct hdac_stream *stream;
104 struct snd_pcm_substream substream;
105 int ret;
106
107 if (!bus)
108 return -ENODEV;
109
110 memset(&substream, 0, sizeof(substream));
111 substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
112
113 estream = snd_hdac_ext_stream_assign(ebus, &substream,
114 HDAC_EXT_STREAM_TYPE_HOST);
115 if (!estream)
116 return -ENODEV;
117
118 stream = hdac_stream(estream);
119
120 /* assign decouple host dma channel */
121 ret = snd_hdac_dsp_prepare(stream, format, size, dmab);
122 if (ret < 0)
123 return ret;
124
125 skl_dsp_setup_spib(dev, size, stream->stream_tag, true);
126
127 return stream->stream_tag;
128 }
129
130 static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag)
131 {
132 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
133 struct hdac_stream *stream;
134 struct hdac_bus *bus = ebus_to_hbus(ebus);
135
136 if (!bus)
137 return -ENODEV;
138
139 stream = snd_hdac_get_stream(bus,
140 SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
141 if (!stream)
142 return -EINVAL;
143
144 snd_hdac_dsp_trigger(stream, start);
145
146 return 0;
147 }
148
149 static int skl_dsp_cleanup(struct device *dev,
150 struct snd_dma_buffer *dmab, int stream_tag)
151 {
152 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
153 struct hdac_stream *stream;
154 struct hdac_ext_stream *estream;
155 struct hdac_bus *bus = ebus_to_hbus(ebus);
156
157 if (!bus)
158 return -ENODEV;
159
160 stream = snd_hdac_get_stream(bus,
161 SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
162 if (!stream)
163 return -EINVAL;
164
165 estream = stream_to_hdac_ext_stream(stream);
166 skl_dsp_setup_spib(dev, 0, stream_tag, false);
167 snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST);
168
169 snd_hdac_dsp_cleanup(stream, dmab);
170
171 return 0;
172 }
173
174 static struct skl_dsp_loader_ops skl_get_loader_ops(void)
175 {
176 struct skl_dsp_loader_ops loader_ops;
177
178 memset(&loader_ops, 0, sizeof(struct skl_dsp_loader_ops));
179
180 loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
181 loader_ops.free_dma_buf = skl_free_dma_buf;
182
183 return loader_ops;
184 };
185
186 static struct skl_dsp_loader_ops bxt_get_loader_ops(void)
187 {
188 struct skl_dsp_loader_ops loader_ops;
189
190 memset(&loader_ops, 0, sizeof(loader_ops));
191
192 loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
193 loader_ops.free_dma_buf = skl_free_dma_buf;
194 loader_ops.prepare = skl_dsp_prepare;
195 loader_ops.trigger = skl_dsp_trigger;
196 loader_ops.cleanup = skl_dsp_cleanup;
197
198 return loader_ops;
199 };
200
201 static const struct skl_dsp_ops dsp_ops[] = {
202 {
203 .id = 0x9d70,
204 .loader_ops = skl_get_loader_ops,
205 .init = skl_sst_dsp_init,
206 .init_fw = skl_sst_init_fw,
207 .cleanup = skl_sst_dsp_cleanup
208 },
209 {
210 .id = 0x9d71,
211 .loader_ops = skl_get_loader_ops,
212 .init = kbl_sst_dsp_init,
213 .init_fw = skl_sst_init_fw,
214 .cleanup = skl_sst_dsp_cleanup
215 },
216 {
217 .id = 0x5a98,
218 .loader_ops = bxt_get_loader_ops,
219 .init = bxt_sst_dsp_init,
220 .init_fw = bxt_sst_init_fw,
221 .cleanup = bxt_sst_dsp_cleanup
222 },
223 {
224 .id = 0x3198,
225 .loader_ops = bxt_get_loader_ops,
226 .init = bxt_sst_dsp_init,
227 .init_fw = bxt_sst_init_fw,
228 .cleanup = bxt_sst_dsp_cleanup
229 },
230 };
231
232 const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id)
233 {
234 int i;
235
236 for (i = 0; i < ARRAY_SIZE(dsp_ops); i++) {
237 if (dsp_ops[i].id == pci_id)
238 return &dsp_ops[i];
239 }
240
241 return NULL;
242 }
243
244 int skl_init_dsp(struct skl *skl)
245 {
246 void __iomem *mmio_base;
247 struct hdac_ext_bus *ebus = &skl->ebus;
248 struct hdac_bus *bus = ebus_to_hbus(ebus);
249 struct skl_dsp_loader_ops loader_ops;
250 int irq = bus->irq;
251 const struct skl_dsp_ops *ops;
252 int ret;
253
254 /* enable ppcap interrupt */
255 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
256 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
257
258 /* read the BAR of the ADSP MMIO */
259 mmio_base = pci_ioremap_bar(skl->pci, 4);
260 if (mmio_base == NULL) {
261 dev_err(bus->dev, "ioremap error\n");
262 return -ENXIO;
263 }
264
265 ops = skl_get_dsp_ops(skl->pci->device);
266 if (!ops)
267 return -EIO;
268
269 loader_ops = ops->loader_ops();
270 ret = ops->init(bus->dev, mmio_base, irq,
271 skl->fw_name, loader_ops,
272 &skl->skl_sst);
273
274 if (ret < 0)
275 return ret;
276
277 skl->skl_sst->dsp_ops = ops;
278 dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
279
280 return ret;
281 }
282
283 int skl_free_dsp(struct skl *skl)
284 {
285 struct hdac_ext_bus *ebus = &skl->ebus;
286 struct hdac_bus *bus = ebus_to_hbus(ebus);
287 struct skl_sst *ctx = skl->skl_sst;
288
289 /* disable ppcap interrupt */
290 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
291
292 ctx->dsp_ops->cleanup(bus->dev, ctx);
293
294 if (ctx->dsp->addr.lpe)
295 iounmap(ctx->dsp->addr.lpe);
296
297 return 0;
298 }
299
300 /*
301 * In the case of "suspend_active" i.e, the Audio IP being active
302 * during system suspend, immediately excecute any pending D0i3 work
303 * before suspending. This is needed for the IP to work in low power
304 * mode during system suspend. In the case of normal suspend, cancel
305 * any pending D0i3 work.
306 */
307 int skl_suspend_late_dsp(struct skl *skl)
308 {
309 struct skl_sst *ctx = skl->skl_sst;
310 struct delayed_work *dwork;
311
312 if (!ctx)
313 return 0;
314
315 dwork = &ctx->d0i3.work;
316
317 if (dwork->work.func) {
318 if (skl->supend_active)
319 flush_delayed_work(dwork);
320 else
321 cancel_delayed_work_sync(dwork);
322 }
323
324 return 0;
325 }
326
327 int skl_suspend_dsp(struct skl *skl)
328 {
329 struct skl_sst *ctx = skl->skl_sst;
330 int ret;
331
332 /* if ppcap is not supported return 0 */
333 if (!skl->ebus.bus.ppcap)
334 return 0;
335
336 ret = skl_dsp_sleep(ctx->dsp);
337 if (ret < 0)
338 return ret;
339
340 /* disable ppcap interrupt */
341 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
342 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false);
343
344 return 0;
345 }
346
347 int skl_resume_dsp(struct skl *skl)
348 {
349 struct skl_sst *ctx = skl->skl_sst;
350 int ret;
351
352 /* if ppcap is not supported return 0 */
353 if (!skl->ebus.bus.ppcap)
354 return 0;
355
356 /* enable ppcap interrupt */
357 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
358 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
359
360 /* check if DSP 1st boot is done */
361 if (skl->skl_sst->is_first_boot == true)
362 return 0;
363
364 ret = skl_dsp_wake(ctx->dsp);
365 if (ret < 0)
366 return ret;
367
368 skl_dsp_enable_notification(skl->skl_sst, false);
369 return ret;
370 }
371
372 enum skl_bitdepth skl_get_bit_depth(int params)
373 {
374 switch (params) {
375 case 8:
376 return SKL_DEPTH_8BIT;
377
378 case 16:
379 return SKL_DEPTH_16BIT;
380
381 case 24:
382 return SKL_DEPTH_24BIT;
383
384 case 32:
385 return SKL_DEPTH_32BIT;
386
387 default:
388 return SKL_DEPTH_INVALID;
389
390 }
391 }
392
393 /*
394 * Each module in DSP expects a base module configuration, which consists of
395 * PCM format information, which we calculate in driver and resource values
396 * which are read from widget information passed through topology binary
397 * This is send when we create a module with INIT_INSTANCE IPC msg
398 */
399 static void skl_set_base_module_format(struct skl_sst *ctx,
400 struct skl_module_cfg *mconfig,
401 struct skl_base_cfg *base_cfg)
402 {
403 struct skl_module_fmt *format = &mconfig->in_fmt[0];
404
405 base_cfg->audio_fmt.number_of_channels = (u8)format->channels;
406
407 base_cfg->audio_fmt.s_freq = format->s_freq;
408 base_cfg->audio_fmt.bit_depth = format->bit_depth;
409 base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
410 base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
411
412 dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
413 format->bit_depth, format->valid_bit_depth,
414 format->ch_cfg);
415
416 base_cfg->audio_fmt.channel_map = format->ch_map;
417
418 base_cfg->audio_fmt.interleaving = format->interleaving_style;
419
420 base_cfg->cps = mconfig->mcps;
421 base_cfg->ibs = mconfig->ibs;
422 base_cfg->obs = mconfig->obs;
423 base_cfg->is_pages = mconfig->mem_pages;
424 }
425
426 /*
427 * Copies copier capabilities into copier module and updates copier module
428 * config size.
429 */
430 static void skl_copy_copier_caps(struct skl_module_cfg *mconfig,
431 struct skl_cpr_cfg *cpr_mconfig)
432 {
433 if (mconfig->formats_config.caps_size == 0)
434 return;
435
436 memcpy(cpr_mconfig->gtw_cfg.config_data,
437 mconfig->formats_config.caps,
438 mconfig->formats_config.caps_size);
439
440 cpr_mconfig->gtw_cfg.config_length =
441 (mconfig->formats_config.caps_size) / 4;
442 }
443
444 #define SKL_NON_GATEWAY_CPR_NODE_ID 0xFFFFFFFF
445 /*
446 * Calculate the gatewat settings required for copier module, type of
447 * gateway and index of gateway to use
448 */
449 static u32 skl_get_node_id(struct skl_sst *ctx,
450 struct skl_module_cfg *mconfig)
451 {
452 union skl_connector_node_id node_id = {0};
453 union skl_ssp_dma_node ssp_node = {0};
454 struct skl_pipe_params *params = mconfig->pipe->p_params;
455
456 switch (mconfig->dev_type) {
457 case SKL_DEVICE_BT:
458 node_id.node.dma_type =
459 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
460 SKL_DMA_I2S_LINK_OUTPUT_CLASS :
461 SKL_DMA_I2S_LINK_INPUT_CLASS;
462 node_id.node.vindex = params->host_dma_id +
463 (mconfig->vbus_id << 3);
464 break;
465
466 case SKL_DEVICE_I2S:
467 node_id.node.dma_type =
468 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
469 SKL_DMA_I2S_LINK_OUTPUT_CLASS :
470 SKL_DMA_I2S_LINK_INPUT_CLASS;
471 ssp_node.dma_node.time_slot_index = mconfig->time_slot;
472 ssp_node.dma_node.i2s_instance = mconfig->vbus_id;
473 node_id.node.vindex = ssp_node.val;
474 break;
475
476 case SKL_DEVICE_DMIC:
477 node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS;
478 node_id.node.vindex = mconfig->vbus_id +
479 (mconfig->time_slot);
480 break;
481
482 case SKL_DEVICE_HDALINK:
483 node_id.node.dma_type =
484 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
485 SKL_DMA_HDA_LINK_OUTPUT_CLASS :
486 SKL_DMA_HDA_LINK_INPUT_CLASS;
487 node_id.node.vindex = params->link_dma_id;
488 break;
489
490 case SKL_DEVICE_HDAHOST:
491 node_id.node.dma_type =
492 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
493 SKL_DMA_HDA_HOST_OUTPUT_CLASS :
494 SKL_DMA_HDA_HOST_INPUT_CLASS;
495 node_id.node.vindex = params->host_dma_id;
496 break;
497
498 default:
499 node_id.val = 0xFFFFFFFF;
500 break;
501 }
502
503 return node_id.val;
504 }
505
506 static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
507 struct skl_module_cfg *mconfig,
508 struct skl_cpr_cfg *cpr_mconfig)
509 {
510 u32 dma_io_buf;
511
512 cpr_mconfig->gtw_cfg.node_id = skl_get_node_id(ctx, mconfig);
513
514 if (cpr_mconfig->gtw_cfg.node_id == SKL_NON_GATEWAY_CPR_NODE_ID) {
515 cpr_mconfig->cpr_feature_mask = 0;
516 return;
517 }
518
519 switch (mconfig->hw_conn_type) {
520 case SKL_CONN_SOURCE:
521 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
522 dma_io_buf = mconfig->ibs;
523 else
524 dma_io_buf = mconfig->obs;
525 break;
526
527 case SKL_CONN_SINK:
528 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
529 dma_io_buf = mconfig->obs;
530 else
531 dma_io_buf = mconfig->ibs;
532 break;
533
534 default:
535 dev_warn(ctx->dev, "wrong connection type: %d\n",
536 mconfig->hw_conn_type);
537 return;
538 }
539
540 cpr_mconfig->gtw_cfg.dma_buffer_size =
541 mconfig->dma_buffer_size * dma_io_buf;
542
543 /* fallback to 2ms default value */
544 if (!cpr_mconfig->gtw_cfg.dma_buffer_size) {
545 if (mconfig->hw_conn_type == SKL_CONN_SOURCE)
546 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->obs;
547 else
548 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->ibs;
549 }
550
551 cpr_mconfig->cpr_feature_mask = 0;
552 cpr_mconfig->gtw_cfg.config_length = 0;
553
554 skl_copy_copier_caps(mconfig, cpr_mconfig);
555 }
556
557 #define DMA_CONTROL_ID 5
558
559 int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
560 {
561 struct skl_dma_control *dma_ctrl;
562 struct skl_ipc_large_config_msg msg = {0};
563 int err = 0;
564
565
566 /*
567 * if blob size zero, then return
568 */
569 if (mconfig->formats_config.caps_size == 0)
570 return 0;
571
572 msg.large_param_id = DMA_CONTROL_ID;
573 msg.param_data_size = sizeof(struct skl_dma_control) +
574 mconfig->formats_config.caps_size;
575
576 dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL);
577 if (dma_ctrl == NULL)
578 return -ENOMEM;
579
580 dma_ctrl->node_id = skl_get_node_id(ctx, mconfig);
581
582 /* size in dwords */
583 dma_ctrl->config_length = mconfig->formats_config.caps_size / 4;
584
585 memcpy(dma_ctrl->config_data, mconfig->formats_config.caps,
586 mconfig->formats_config.caps_size);
587
588 err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl);
589
590 kfree(dma_ctrl);
591 return err;
592 }
593
594 static void skl_setup_out_format(struct skl_sst *ctx,
595 struct skl_module_cfg *mconfig,
596 struct skl_audio_data_format *out_fmt)
597 {
598 struct skl_module_fmt *format = &mconfig->out_fmt[0];
599
600 out_fmt->number_of_channels = (u8)format->channels;
601 out_fmt->s_freq = format->s_freq;
602 out_fmt->bit_depth = format->bit_depth;
603 out_fmt->valid_bit_depth = format->valid_bit_depth;
604 out_fmt->ch_cfg = format->ch_cfg;
605
606 out_fmt->channel_map = format->ch_map;
607 out_fmt->interleaving = format->interleaving_style;
608 out_fmt->sample_type = format->sample_type;
609
610 dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n",
611 out_fmt->number_of_channels, format->s_freq, format->bit_depth);
612 }
613
614 /*
615 * DSP needs SRC module for frequency conversion, SRC takes base module
616 * configuration and the target frequency as extra parameter passed as src
617 * config
618 */
619 static void skl_set_src_format(struct skl_sst *ctx,
620 struct skl_module_cfg *mconfig,
621 struct skl_src_module_cfg *src_mconfig)
622 {
623 struct skl_module_fmt *fmt = &mconfig->out_fmt[0];
624
625 skl_set_base_module_format(ctx, mconfig,
626 (struct skl_base_cfg *)src_mconfig);
627
628 src_mconfig->src_cfg = fmt->s_freq;
629 }
630
631 /*
632 * DSP needs updown module to do channel conversion. updown module take base
633 * module configuration and channel configuration
634 * It also take coefficients and now we have defaults applied here
635 */
636 static void skl_set_updown_mixer_format(struct skl_sst *ctx,
637 struct skl_module_cfg *mconfig,
638 struct skl_up_down_mixer_cfg *mixer_mconfig)
639 {
640 struct skl_module_fmt *fmt = &mconfig->out_fmt[0];
641 int i = 0;
642
643 skl_set_base_module_format(ctx, mconfig,
644 (struct skl_base_cfg *)mixer_mconfig);
645 mixer_mconfig->out_ch_cfg = fmt->ch_cfg;
646
647 /* Select F/W default coefficient */
648 mixer_mconfig->coeff_sel = 0x0;
649
650 /* User coeff, don't care since we are selecting F/W defaults */
651 for (i = 0; i < UP_DOWN_MIXER_MAX_COEFF; i++)
652 mixer_mconfig->coeff[i] = 0xDEADBEEF;
653 }
654
655 /*
656 * 'copier' is DSP internal module which copies data from Host DMA (HDA host
657 * dma) or link (hda link, SSP, PDM)
658 * Here we calculate the copier module parameters, like PCM format, output
659 * format, gateway settings
660 * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg
661 */
662 static void skl_set_copier_format(struct skl_sst *ctx,
663 struct skl_module_cfg *mconfig,
664 struct skl_cpr_cfg *cpr_mconfig)
665 {
666 struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt;
667 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig;
668
669 skl_set_base_module_format(ctx, mconfig, base_cfg);
670
671 skl_setup_out_format(ctx, mconfig, out_fmt);
672 skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig);
673 }
674
675 /*
676 * Algo module are DSP pre processing modules. Algo module take base module
677 * configuration and params
678 */
679
680 static void skl_set_algo_format(struct skl_sst *ctx,
681 struct skl_module_cfg *mconfig,
682 struct skl_algo_cfg *algo_mcfg)
683 {
684 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)algo_mcfg;
685
686 skl_set_base_module_format(ctx, mconfig, base_cfg);
687
688 if (mconfig->formats_config.caps_size == 0)
689 return;
690
691 memcpy(algo_mcfg->params,
692 mconfig->formats_config.caps,
693 mconfig->formats_config.caps_size);
694
695 }
696
697 /*
698 * Mic select module allows selecting one or many input channels, thus
699 * acting as a demux.
700 *
701 * Mic select module take base module configuration and out-format
702 * configuration
703 */
704 static void skl_set_base_outfmt_format(struct skl_sst *ctx,
705 struct skl_module_cfg *mconfig,
706 struct skl_base_outfmt_cfg *base_outfmt_mcfg)
707 {
708 struct skl_audio_data_format *out_fmt = &base_outfmt_mcfg->out_fmt;
709 struct skl_base_cfg *base_cfg =
710 (struct skl_base_cfg *)base_outfmt_mcfg;
711
712 skl_set_base_module_format(ctx, mconfig, base_cfg);
713 skl_setup_out_format(ctx, mconfig, out_fmt);
714 }
715
716 static u16 skl_get_module_param_size(struct skl_sst *ctx,
717 struct skl_module_cfg *mconfig)
718 {
719 u16 param_size;
720
721 switch (mconfig->m_type) {
722 case SKL_MODULE_TYPE_COPIER:
723 param_size = sizeof(struct skl_cpr_cfg);
724 param_size += mconfig->formats_config.caps_size;
725 return param_size;
726
727 case SKL_MODULE_TYPE_SRCINT:
728 return sizeof(struct skl_src_module_cfg);
729
730 case SKL_MODULE_TYPE_UPDWMIX:
731 return sizeof(struct skl_up_down_mixer_cfg);
732
733 case SKL_MODULE_TYPE_ALGO:
734 param_size = sizeof(struct skl_base_cfg);
735 param_size += mconfig->formats_config.caps_size;
736 return param_size;
737
738 case SKL_MODULE_TYPE_BASE_OUTFMT:
739 case SKL_MODULE_TYPE_MIC_SELECT:
740 case SKL_MODULE_TYPE_KPB:
741 return sizeof(struct skl_base_outfmt_cfg);
742
743 default:
744 /*
745 * return only base cfg when no specific module type is
746 * specified
747 */
748 return sizeof(struct skl_base_cfg);
749 }
750
751 return 0;
752 }
753
754 /*
755 * DSP firmware supports various modules like copier, SRC, updown etc.
756 * These modules required various parameters to be calculated and sent for
757 * the module initialization to DSP. By default a generic module needs only
758 * base module format configuration
759 */
760
761 static int skl_set_module_format(struct skl_sst *ctx,
762 struct skl_module_cfg *module_config,
763 u16 *module_config_size,
764 void **param_data)
765 {
766 u16 param_size;
767
768 param_size = skl_get_module_param_size(ctx, module_config);
769
770 *param_data = kzalloc(param_size, GFP_KERNEL);
771 if (NULL == *param_data)
772 return -ENOMEM;
773
774 *module_config_size = param_size;
775
776 switch (module_config->m_type) {
777 case SKL_MODULE_TYPE_COPIER:
778 skl_set_copier_format(ctx, module_config, *param_data);
779 break;
780
781 case SKL_MODULE_TYPE_SRCINT:
782 skl_set_src_format(ctx, module_config, *param_data);
783 break;
784
785 case SKL_MODULE_TYPE_UPDWMIX:
786 skl_set_updown_mixer_format(ctx, module_config, *param_data);
787 break;
788
789 case SKL_MODULE_TYPE_ALGO:
790 skl_set_algo_format(ctx, module_config, *param_data);
791 break;
792
793 case SKL_MODULE_TYPE_BASE_OUTFMT:
794 case SKL_MODULE_TYPE_MIC_SELECT:
795 case SKL_MODULE_TYPE_KPB:
796 skl_set_base_outfmt_format(ctx, module_config, *param_data);
797 break;
798
799 default:
800 skl_set_base_module_format(ctx, module_config, *param_data);
801 break;
802
803 }
804
805 dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n",
806 module_config->id.module_id, param_size);
807 print_hex_dump_debug("Module params:", DUMP_PREFIX_OFFSET, 8, 4,
808 *param_data, param_size, false);
809 return 0;
810 }
811
812 static int skl_get_queue_index(struct skl_module_pin *mpin,
813 struct skl_module_inst_id id, int max)
814 {
815 int i;
816
817 for (i = 0; i < max; i++) {
818 if (mpin[i].id.module_id == id.module_id &&
819 mpin[i].id.instance_id == id.instance_id)
820 return i;
821 }
822
823 return -EINVAL;
824 }
825
826 /*
827 * Allocates queue for each module.
828 * if dynamic, the pin_index is allocated 0 to max_pin.
829 * In static, the pin_index is fixed based on module_id and instance id
830 */
831 static int skl_alloc_queue(struct skl_module_pin *mpin,
832 struct skl_module_cfg *tgt_cfg, int max)
833 {
834 int i;
835 struct skl_module_inst_id id = tgt_cfg->id;
836 /*
837 * if pin in dynamic, find first free pin
838 * otherwise find match module and instance id pin as topology will
839 * ensure a unique pin is assigned to this so no need to
840 * allocate/free
841 */
842 for (i = 0; i < max; i++) {
843 if (mpin[i].is_dynamic) {
844 if (!mpin[i].in_use &&
845 mpin[i].pin_state == SKL_PIN_UNBIND) {
846
847 mpin[i].in_use = true;
848 mpin[i].id.module_id = id.module_id;
849 mpin[i].id.instance_id = id.instance_id;
850 mpin[i].id.pvt_id = id.pvt_id;
851 mpin[i].tgt_mcfg = tgt_cfg;
852 return i;
853 }
854 } else {
855 if (mpin[i].id.module_id == id.module_id &&
856 mpin[i].id.instance_id == id.instance_id &&
857 mpin[i].pin_state == SKL_PIN_UNBIND) {
858
859 mpin[i].tgt_mcfg = tgt_cfg;
860 return i;
861 }
862 }
863 }
864
865 return -EINVAL;
866 }
867
868 static void skl_free_queue(struct skl_module_pin *mpin, int q_index)
869 {
870 if (mpin[q_index].is_dynamic) {
871 mpin[q_index].in_use = false;
872 mpin[q_index].id.module_id = 0;
873 mpin[q_index].id.instance_id = 0;
874 mpin[q_index].id.pvt_id = 0;
875 }
876 mpin[q_index].pin_state = SKL_PIN_UNBIND;
877 mpin[q_index].tgt_mcfg = NULL;
878 }
879
880 /* Module state will be set to unint, if all the out pin state is UNBIND */
881
882 static void skl_clear_module_state(struct skl_module_pin *mpin, int max,
883 struct skl_module_cfg *mcfg)
884 {
885 int i;
886 bool found = false;
887
888 for (i = 0; i < max; i++) {
889 if (mpin[i].pin_state == SKL_PIN_UNBIND)
890 continue;
891 found = true;
892 break;
893 }
894
895 if (!found)
896 mcfg->m_state = SKL_MODULE_INIT_DONE;
897 return;
898 }
899
900 /*
901 * A module needs to be instanataited in DSP. A mdoule is present in a
902 * collection of module referred as a PIPE.
903 * We first calculate the module format, based on module type and then
904 * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper
905 */
906 int skl_init_module(struct skl_sst *ctx,
907 struct skl_module_cfg *mconfig)
908 {
909 u16 module_config_size = 0;
910 void *param_data = NULL;
911 int ret;
912 struct skl_ipc_init_instance_msg msg;
913
914 dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__,
915 mconfig->id.module_id, mconfig->id.pvt_id);
916
917 if (mconfig->pipe->state != SKL_PIPE_CREATED) {
918 dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n",
919 mconfig->pipe->state, mconfig->pipe->ppl_id);
920 return -EIO;
921 }
922
923 ret = skl_set_module_format(ctx, mconfig,
924 &module_config_size, &param_data);
925 if (ret < 0) {
926 dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret);
927 return ret;
928 }
929
930 msg.module_id = mconfig->id.module_id;
931 msg.instance_id = mconfig->id.pvt_id;
932 msg.ppl_instance_id = mconfig->pipe->ppl_id;
933 msg.param_data_size = module_config_size;
934 msg.core_id = mconfig->core_id;
935 msg.domain = mconfig->domain;
936
937 ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data);
938 if (ret < 0) {
939 dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret);
940 kfree(param_data);
941 return ret;
942 }
943 mconfig->m_state = SKL_MODULE_INIT_DONE;
944 kfree(param_data);
945 return ret;
946 }
947
948 static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg
949 *src_module, struct skl_module_cfg *dst_module)
950 {
951 dev_dbg(ctx->dev, "%s: src module_id = %d src_instance=%d\n",
952 __func__, src_module->id.module_id, src_module->id.pvt_id);
953 dev_dbg(ctx->dev, "%s: dst_module=%d dst_instacne=%d\n", __func__,
954 dst_module->id.module_id, dst_module->id.pvt_id);
955
956 dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n",
957 src_module->m_state, dst_module->m_state);
958 }
959
960 /*
961 * On module freeup, we need to unbind the module with modules
962 * it is already bind.
963 * Find the pin allocated and unbind then using bind_unbind IPC
964 */
965 int skl_unbind_modules(struct skl_sst *ctx,
966 struct skl_module_cfg *src_mcfg,
967 struct skl_module_cfg *dst_mcfg)
968 {
969 int ret;
970 struct skl_ipc_bind_unbind_msg msg;
971 struct skl_module_inst_id src_id = src_mcfg->id;
972 struct skl_module_inst_id dst_id = dst_mcfg->id;
973 int in_max = dst_mcfg->max_in_queue;
974 int out_max = src_mcfg->max_out_queue;
975 int src_index, dst_index, src_pin_state, dst_pin_state;
976
977 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
978
979 /* get src queue index */
980 src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
981 if (src_index < 0)
982 return 0;
983
984 msg.src_queue = src_index;
985
986 /* get dst queue index */
987 dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
988 if (dst_index < 0)
989 return 0;
990
991 msg.dst_queue = dst_index;
992
993 src_pin_state = src_mcfg->m_out_pin[src_index].pin_state;
994 dst_pin_state = dst_mcfg->m_in_pin[dst_index].pin_state;
995
996 if (src_pin_state != SKL_PIN_BIND_DONE ||
997 dst_pin_state != SKL_PIN_BIND_DONE)
998 return 0;
999
1000 msg.module_id = src_mcfg->id.module_id;
1001 msg.instance_id = src_mcfg->id.pvt_id;
1002 msg.dst_module_id = dst_mcfg->id.module_id;
1003 msg.dst_instance_id = dst_mcfg->id.pvt_id;
1004 msg.bind = false;
1005
1006 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
1007 if (!ret) {
1008 /* free queue only if unbind is success */
1009 skl_free_queue(src_mcfg->m_out_pin, src_index);
1010 skl_free_queue(dst_mcfg->m_in_pin, dst_index);
1011
1012 /*
1013 * check only if src module bind state, bind is
1014 * always from src -> sink
1015 */
1016 skl_clear_module_state(src_mcfg->m_out_pin, out_max, src_mcfg);
1017 }
1018
1019 return ret;
1020 }
1021
1022 /*
1023 * Once a module is instantiated it need to be 'bind' with other modules in
1024 * the pipeline. For binding we need to find the module pins which are bind
1025 * together
1026 * This function finds the pins and then sends bund_unbind IPC message to
1027 * DSP using IPC helper
1028 */
1029 int skl_bind_modules(struct skl_sst *ctx,
1030 struct skl_module_cfg *src_mcfg,
1031 struct skl_module_cfg *dst_mcfg)
1032 {
1033 int ret;
1034 struct skl_ipc_bind_unbind_msg msg;
1035 int in_max = dst_mcfg->max_in_queue;
1036 int out_max = src_mcfg->max_out_queue;
1037 int src_index, dst_index;
1038
1039 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
1040
1041 if (src_mcfg->m_state < SKL_MODULE_INIT_DONE ||
1042 dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
1043 return 0;
1044
1045 src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_mcfg, out_max);
1046 if (src_index < 0)
1047 return -EINVAL;
1048
1049 msg.src_queue = src_index;
1050 dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_mcfg, in_max);
1051 if (dst_index < 0) {
1052 skl_free_queue(src_mcfg->m_out_pin, src_index);
1053 return -EINVAL;
1054 }
1055
1056 msg.dst_queue = dst_index;
1057
1058 dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n",
1059 msg.src_queue, msg.dst_queue);
1060
1061 msg.module_id = src_mcfg->id.module_id;
1062 msg.instance_id = src_mcfg->id.pvt_id;
1063 msg.dst_module_id = dst_mcfg->id.module_id;
1064 msg.dst_instance_id = dst_mcfg->id.pvt_id;
1065 msg.bind = true;
1066
1067 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
1068
1069 if (!ret) {
1070 src_mcfg->m_state = SKL_MODULE_BIND_DONE;
1071 src_mcfg->m_out_pin[src_index].pin_state = SKL_PIN_BIND_DONE;
1072 dst_mcfg->m_in_pin[dst_index].pin_state = SKL_PIN_BIND_DONE;
1073 } else {
1074 /* error case , if IPC fails, clear the queue index */
1075 skl_free_queue(src_mcfg->m_out_pin, src_index);
1076 skl_free_queue(dst_mcfg->m_in_pin, dst_index);
1077 }
1078
1079 return ret;
1080 }
1081
1082 static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe,
1083 enum skl_ipc_pipeline_state state)
1084 {
1085 dev_dbg(ctx->dev, "%s: pipe_satate = %d\n", __func__, state);
1086
1087 return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state);
1088 }
1089
1090 /*
1091 * A pipeline is a collection of modules. Before a module in instantiated a
1092 * pipeline needs to be created for it.
1093 * This function creates pipeline, by sending create pipeline IPC messages
1094 * to FW
1095 */
1096 int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
1097 {
1098 int ret;
1099
1100 dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
1101
1102 ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
1103 pipe->pipe_priority, pipe->ppl_id,
1104 pipe->lp_mode);
1105 if (ret < 0) {
1106 dev_err(ctx->dev, "Failed to create pipeline\n");
1107 return ret;
1108 }
1109
1110 pipe->state = SKL_PIPE_CREATED;
1111
1112 return 0;
1113 }
1114
1115 /*
1116 * A pipeline needs to be deleted on cleanup. If a pipeline is running, then
1117 * pause the pipeline first and then delete it
1118 * The pipe delete is done by sending delete pipeline IPC. DSP will stop the
1119 * DMA engines and releases resources
1120 */
1121 int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
1122 {
1123 int ret;
1124
1125 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
1126
1127 /* If pipe is started, do stop the pipe in FW. */
1128 if (pipe->state >= SKL_PIPE_STARTED) {
1129 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
1130 if (ret < 0) {
1131 dev_err(ctx->dev, "Failed to stop pipeline\n");
1132 return ret;
1133 }
1134
1135 pipe->state = SKL_PIPE_PAUSED;
1136 }
1137
1138 /* If pipe was not created in FW, do not try to delete it */
1139 if (pipe->state < SKL_PIPE_CREATED)
1140 return 0;
1141
1142 ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
1143 if (ret < 0) {
1144 dev_err(ctx->dev, "Failed to delete pipeline\n");
1145 return ret;
1146 }
1147
1148 pipe->state = SKL_PIPE_INVALID;
1149
1150 return ret;
1151 }
1152
1153 /*
1154 * A pipeline is also a scheduling entity in DSP which can be run, stopped
1155 * For processing data the pipe need to be run by sending IPC set pipe state
1156 * to DSP
1157 */
1158 int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
1159 {
1160 int ret;
1161
1162 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
1163
1164 /* If pipe was not created in FW, do not try to pause or delete */
1165 if (pipe->state < SKL_PIPE_CREATED)
1166 return 0;
1167
1168 /* Pipe has to be paused before it is started */
1169 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
1170 if (ret < 0) {
1171 dev_err(ctx->dev, "Failed to pause pipe\n");
1172 return ret;
1173 }
1174
1175 pipe->state = SKL_PIPE_PAUSED;
1176
1177 ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING);
1178 if (ret < 0) {
1179 dev_err(ctx->dev, "Failed to start pipe\n");
1180 return ret;
1181 }
1182
1183 pipe->state = SKL_PIPE_STARTED;
1184
1185 return 0;
1186 }
1187
1188 /*
1189 * Stop the pipeline by sending set pipe state IPC
1190 * DSP doesnt implement stop so we always send pause message
1191 */
1192 int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
1193 {
1194 int ret;
1195
1196 dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id);
1197
1198 /* If pipe was not created in FW, do not try to pause or delete */
1199 if (pipe->state < SKL_PIPE_PAUSED)
1200 return 0;
1201
1202 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
1203 if (ret < 0) {
1204 dev_dbg(ctx->dev, "Failed to stop pipe\n");
1205 return ret;
1206 }
1207
1208 pipe->state = SKL_PIPE_PAUSED;
1209
1210 return 0;
1211 }
1212
1213 /*
1214 * Reset the pipeline by sending set pipe state IPC this will reset the DMA
1215 * from the DSP side
1216 */
1217 int skl_reset_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
1218 {
1219 int ret;
1220
1221 /* If pipe was not created in FW, do not try to pause or delete */
1222 if (pipe->state < SKL_PIPE_PAUSED)
1223 return 0;
1224
1225 ret = skl_set_pipe_state(ctx, pipe, PPL_RESET);
1226 if (ret < 0) {
1227 dev_dbg(ctx->dev, "Failed to reset pipe ret=%d\n", ret);
1228 return ret;
1229 }
1230
1231 pipe->state = SKL_PIPE_RESET;
1232
1233 return 0;
1234 }
1235
1236 /* Algo parameter set helper function */
1237 int skl_set_module_params(struct skl_sst *ctx, u32 *params, int size,
1238 u32 param_id, struct skl_module_cfg *mcfg)
1239 {
1240 struct skl_ipc_large_config_msg msg;
1241
1242 msg.module_id = mcfg->id.module_id;
1243 msg.instance_id = mcfg->id.pvt_id;
1244 msg.param_data_size = size;
1245 msg.large_param_id = param_id;
1246
1247 return skl_ipc_set_large_config(&ctx->ipc, &msg, params);
1248 }
1249
1250 int skl_get_module_params(struct skl_sst *ctx, u32 *params, int size,
1251 u32 param_id, struct skl_module_cfg *mcfg)
1252 {
1253 struct skl_ipc_large_config_msg msg;
1254
1255 msg.module_id = mcfg->id.module_id;
1256 msg.instance_id = mcfg->id.pvt_id;
1257 msg.param_data_size = size;
1258 msg.large_param_id = param_id;
1259
1260 return skl_ipc_get_large_config(&ctx->ipc, &msg, params);
1261 }