]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge remote-tracking branch 'asoc/topic/intel' into asoc-next
authorMark Brown <broonie@kernel.org>
Sun, 12 Apr 2015 18:48:33 +0000 (19:48 +0100)
committerMark Brown <broonie@kernel.org>
Sun, 12 Apr 2015 18:48:33 +0000 (19:48 +0100)
1  2 
sound/soc/intel/atom/sst/sst_acpi.c
sound/soc/intel/common/sst-acpi.c
sound/soc/intel/common/sst-dsp.h
sound/soc/intel/common/sst-firmware.c

index 0000000000000000000000000000000000000000,678f36ed97a5e9cbe05c8f8baa76147806251bc1..05f69308391114c5d76333c8818c44b66b276f03
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,384 +1,384 @@@
 -              return -ENOMEM;
+ /*
+  * sst_acpi.c - SST (LPE) driver init file for ACPI enumeration.
+  *
+  * Copyright (c) 2013, Intel Corporation.
+  *
+  *  Authors:  Ramesh Babu K V <Ramesh.Babu@intel.com>
+  *  Authors:  Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms and conditions of the GNU General Public License,
+  * version 2, as published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope it will be useful, but WITHOUT
+  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  * more details.
+  *
+  *
+  */
+ #include <linux/module.h>
+ #include <linux/fs.h>
+ #include <linux/interrupt.h>
+ #include <linux/slab.h>
+ #include <linux/io.h>
+ #include <linux/miscdevice.h>
+ #include <linux/platform_device.h>
+ #include <linux/firmware.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/pm_qos.h>
+ #include <linux/acpi.h>
+ #include <asm/platform_sst_audio.h>
+ #include <sound/core.h>
+ #include <sound/soc.h>
+ #include <sound/compress_driver.h>
+ #include <acpi/acbuffer.h>
+ #include <acpi/platform/acenv.h>
+ #include <acpi/platform/aclinux.h>
+ #include <acpi/actypes.h>
+ #include <acpi/acpi_bus.h>
+ #include "../sst-mfld-platform.h"
+ #include "../../common/sst-dsp.h"
+ #include "sst.h"
+ struct sst_machines {
+       char *codec_id;
+       char board[32];
+       char machine[32];
+       void (*machine_quirk)(void);
+       char firmware[FW_NAME_SIZE];
+       struct sst_platform_info *pdata;
+ };
+ /* LPE viewpoint addresses */
+ #define SST_BYT_IRAM_PHY_START        0xff2c0000
+ #define SST_BYT_IRAM_PHY_END  0xff2d4000
+ #define SST_BYT_DRAM_PHY_START        0xff300000
+ #define SST_BYT_DRAM_PHY_END  0xff320000
+ #define SST_BYT_IMR_VIRT_START        0xc0000000 /* virtual addr in LPE */
+ #define SST_BYT_IMR_VIRT_END  0xc01fffff
+ #define SST_BYT_SHIM_PHY_ADDR 0xff340000
+ #define SST_BYT_MBOX_PHY_ADDR 0xff344000
+ #define SST_BYT_DMA0_PHY_ADDR 0xff298000
+ #define SST_BYT_DMA1_PHY_ADDR 0xff29c000
+ #define SST_BYT_SSP0_PHY_ADDR 0xff2a0000
+ #define SST_BYT_SSP2_PHY_ADDR 0xff2a2000
+ #define BYT_FW_MOD_TABLE_OFFSET       0x80000
+ #define BYT_FW_MOD_TABLE_SIZE 0x100
+ #define BYT_FW_MOD_OFFSET     (BYT_FW_MOD_TABLE_OFFSET + BYT_FW_MOD_TABLE_SIZE)
+ static const struct sst_info byt_fwparse_info = {
+       .use_elf        = false,
+       .max_streams    = 25,
+       .iram_start     = SST_BYT_IRAM_PHY_START,
+       .iram_end       = SST_BYT_IRAM_PHY_END,
+       .iram_use       = true,
+       .dram_start     = SST_BYT_DRAM_PHY_START,
+       .dram_end       = SST_BYT_DRAM_PHY_END,
+       .dram_use       = true,
+       .imr_start      = SST_BYT_IMR_VIRT_START,
+       .imr_end        = SST_BYT_IMR_VIRT_END,
+       .imr_use        = true,
+       .mailbox_start  = SST_BYT_MBOX_PHY_ADDR,
+       .num_probes     = 0,
+       .lpe_viewpt_rqd  = true,
+ };
+ static const struct sst_ipc_info byt_ipc_info = {
+       .ipc_offset = 0,
+       .mbox_recv_off = 0x400,
+ };
+ static const struct sst_lib_dnld_info  byt_lib_dnld_info = {
+       .mod_base           = SST_BYT_IMR_VIRT_START,
+       .mod_end            = SST_BYT_IMR_VIRT_END,
+       .mod_table_offset   = BYT_FW_MOD_TABLE_OFFSET,
+       .mod_table_size     = BYT_FW_MOD_TABLE_SIZE,
+       .mod_ddr_dnld       = false,
+ };
+ static const struct sst_res_info byt_rvp_res_info = {
+       .shim_offset = 0x140000,
+       .shim_size = 0x000100,
+       .shim_phy_addr = SST_BYT_SHIM_PHY_ADDR,
+       .ssp0_offset = 0xa0000,
+       .ssp0_size = 0x1000,
+       .dma0_offset = 0x98000,
+       .dma0_size = 0x4000,
+       .dma1_offset = 0x9c000,
+       .dma1_size = 0x4000,
+       .iram_offset = 0x0c0000,
+       .iram_size = 0x14000,
+       .dram_offset = 0x100000,
+       .dram_size = 0x28000,
+       .mbox_offset = 0x144000,
+       .mbox_size = 0x1000,
+       .acpi_lpe_res_index = 0,
+       .acpi_ddr_index = 2,
+       .acpi_ipc_irq_index = 5,
+ };
+ static struct sst_platform_info byt_rvp_platform_data = {
+       .probe_data = &byt_fwparse_info,
+       .ipc_info = &byt_ipc_info,
+       .lib_info = &byt_lib_dnld_info,
+       .res_info = &byt_rvp_res_info,
+       .platform = "sst-mfld-platform",
+ };
+ /* Cherryview (Cherrytrail and Braswell) uses same mrfld dpcm fw as Baytrail,
+  * so pdata is same as Baytrail.
+  */
+ static struct sst_platform_info chv_platform_data = {
+       .probe_data = &byt_fwparse_info,
+       .ipc_info = &byt_ipc_info,
+       .lib_info = &byt_lib_dnld_info,
+       .res_info = &byt_rvp_res_info,
+       .platform = "sst-mfld-platform",
+ };
+ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
+ {
+       struct resource *rsrc;
+       struct platform_device *pdev = to_platform_device(ctx->dev);
+       /* All ACPI resource request here */
+       /* Get Shim addr */
+       rsrc = platform_get_resource(pdev, IORESOURCE_MEM,
+                                       ctx->pdata->res_info->acpi_lpe_res_index);
+       if (!rsrc) {
+               dev_err(ctx->dev, "Invalid SHIM base from IFWI");
+               return -EIO;
+       }
+       dev_info(ctx->dev, "LPE base: %#x size:%#x", (unsigned int) rsrc->start,
+                                       (unsigned int)resource_size(rsrc));
+       ctx->iram_base = rsrc->start + ctx->pdata->res_info->iram_offset;
+       ctx->iram_end =  ctx->iram_base + ctx->pdata->res_info->iram_size - 1;
+       dev_info(ctx->dev, "IRAM base: %#x", ctx->iram_base);
+       ctx->iram = devm_ioremap_nocache(ctx->dev, ctx->iram_base,
+                                        ctx->pdata->res_info->iram_size);
+       if (!ctx->iram) {
+               dev_err(ctx->dev, "unable to map IRAM");
+               return -EIO;
+       }
+       ctx->dram_base = rsrc->start + ctx->pdata->res_info->dram_offset;
+       ctx->dram_end = ctx->dram_base + ctx->pdata->res_info->dram_size - 1;
+       dev_info(ctx->dev, "DRAM base: %#x", ctx->dram_base);
+       ctx->dram = devm_ioremap_nocache(ctx->dev, ctx->dram_base,
+                                        ctx->pdata->res_info->dram_size);
+       if (!ctx->dram) {
+               dev_err(ctx->dev, "unable to map DRAM");
+               return -EIO;
+       }
+       ctx->shim_phy_add = rsrc->start + ctx->pdata->res_info->shim_offset;
+       dev_info(ctx->dev, "SHIM base: %#x", ctx->shim_phy_add);
+       ctx->shim = devm_ioremap_nocache(ctx->dev, ctx->shim_phy_add,
+                                       ctx->pdata->res_info->shim_size);
+       if (!ctx->shim) {
+               dev_err(ctx->dev, "unable to map SHIM");
+               return -EIO;
+       }
+       /* reassign physical address to LPE viewpoint address */
+       ctx->shim_phy_add = ctx->pdata->res_info->shim_phy_addr;
+       /* Get mailbox addr */
+       ctx->mailbox_add = rsrc->start + ctx->pdata->res_info->mbox_offset;
+       dev_info(ctx->dev, "Mailbox base: %#x", ctx->mailbox_add);
+       ctx->mailbox = devm_ioremap_nocache(ctx->dev, ctx->mailbox_add,
+                                           ctx->pdata->res_info->mbox_size);
+       if (!ctx->mailbox) {
+               dev_err(ctx->dev, "unable to map mailbox");
+               return -EIO;
+       }
+       /* reassign physical address to LPE viewpoint address */
+       ctx->mailbox_add = ctx->info.mailbox_start;
+       rsrc = platform_get_resource(pdev, IORESOURCE_MEM,
+                                       ctx->pdata->res_info->acpi_ddr_index);
+       if (!rsrc) {
+               dev_err(ctx->dev, "Invalid DDR base from IFWI");
+               return -EIO;
+       }
+       ctx->ddr_base = rsrc->start;
+       ctx->ddr_end = rsrc->end;
+       dev_info(ctx->dev, "DDR base: %#x", ctx->ddr_base);
+       ctx->ddr = devm_ioremap_nocache(ctx->dev, ctx->ddr_base,
+                                       resource_size(rsrc));
+       if (!ctx->ddr) {
+               dev_err(ctx->dev, "unable to map DDR");
+               return -EIO;
+       }
+       /* Find the IRQ */
+       ctx->irq_num = platform_get_irq(pdev,
+                               ctx->pdata->res_info->acpi_ipc_irq_index);
+       return 0;
+ }
+ static acpi_status sst_acpi_mach_match(acpi_handle handle, u32 level,
+                                      void *context, void **ret)
+ {
+       *(bool *)context = true;
+       return AE_OK;
+ }
+ static struct sst_machines *sst_acpi_find_machine(
+       struct sst_machines *machines)
+ {
+       struct sst_machines *mach;
+       bool found = false;
+       for (mach = machines; mach->codec_id; mach++)
+               if (ACPI_SUCCESS(acpi_get_devices(mach->codec_id,
+                                                 sst_acpi_mach_match,
+                                                 &found, NULL)) && found)
+                       return mach;
+       return NULL;
+ }
+ static int sst_acpi_probe(struct platform_device *pdev)
+ {
+       struct device *dev = &pdev->dev;
+       int ret = 0;
+       struct intel_sst_drv *ctx;
+       const struct acpi_device_id *id;
+       struct sst_machines *mach;
+       struct platform_device *mdev;
+       struct platform_device *plat_dev;
+       unsigned int dev_id;
+       id = acpi_match_device(dev->driver->acpi_match_table, dev);
+       if (!id)
+               return -ENODEV;
+       dev_dbg(dev, "for %s", id->id);
+       mach = (struct sst_machines *)id->driver_data;
+       mach = sst_acpi_find_machine(mach);
+       if (mach == NULL) {
+               dev_err(dev, "No matching machine driver found\n");
+               return -ENODEV;
+       }
+       ret = kstrtouint(id->id, 16, &dev_id);
+       if (ret < 0) {
+               dev_err(dev, "Unique device id conversion error: %d\n", ret);
+               return ret;
+       }
+       dev_dbg(dev, "ACPI device id: %x\n", dev_id);
+       plat_dev = platform_device_register_data(dev, mach->pdata->platform, -1, NULL, 0);
+       if (IS_ERR(plat_dev)) {
+               dev_err(dev, "Failed to create machine device: %s\n", mach->pdata->platform);
+               return PTR_ERR(plat_dev);
+       }
+       /* Create platform device for sst machine driver */
+       mdev = platform_device_register_data(dev, mach->machine, -1, NULL, 0);
+       if (IS_ERR(mdev)) {
+               dev_err(dev, "Failed to create machine device: %s\n", mach->machine);
+               return PTR_ERR(mdev);
+       }
+       ret = sst_alloc_drv_context(&ctx, dev, dev_id);
+       if (ret < 0)
+               return ret;
+       /* Fill sst platform data */
+       ctx->pdata = mach->pdata;
+       strcpy(ctx->firmware_name, mach->firmware);
+       ret = sst_platform_get_resources(ctx);
+       if (ret)
+               return ret;
+       ret = sst_context_init(ctx);
+       if (ret < 0)
+               return ret;
+       /* need to save shim registers in BYT */
+       ctx->shim_regs64 = devm_kzalloc(ctx->dev, sizeof(*ctx->shim_regs64),
+                                       GFP_KERNEL);
+       if (!ctx->shim_regs64) {
++              ret = -ENOMEM;
+               goto do_sst_cleanup;
+       }
+       sst_configure_runtime_pm(ctx);
+       platform_set_drvdata(pdev, ctx);
+       return ret;
+ do_sst_cleanup:
+       sst_context_cleanup(ctx);
+       platform_set_drvdata(pdev, NULL);
+       dev_err(ctx->dev, "failed with %d\n", ret);
+       return ret;
+ }
+ /**
+ * intel_sst_remove - remove function
+ *
+ * @pdev:      platform device structure
+ *
+ * This function is called by OS when a device is unloaded
+ * This frees the interrupt etc
+ */
+ static int sst_acpi_remove(struct platform_device *pdev)
+ {
+       struct intel_sst_drv *ctx;
+       ctx = platform_get_drvdata(pdev);
+       sst_context_cleanup(ctx);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+ }
+ static struct sst_machines sst_acpi_bytcr[] = {
+       {"10EC5640", "T100", "bytt100_rt5640", NULL, "intel/fw_sst_0f28.bin",
+                                               &byt_rvp_platform_data },
+       {},
+ };
+ /* Cherryview-based platforms: CherryTrail and Braswell */
+ static struct sst_machines sst_acpi_chv[] = {
+       {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "intel/fw_sst_22a8.bin",
+                                               &chv_platform_data },
+       {"10EC5645", "cht-bsw", "cht-bsw-rt5645", NULL, "intel/fw_sst_22a8.bin",
+                                               &chv_platform_data },
+       {},
+ };
+ static const struct acpi_device_id sst_acpi_ids[] = {
+       { "80860F28", (unsigned long)&sst_acpi_bytcr},
+       { "808622A8", (unsigned long) &sst_acpi_chv},
+       { },
+ };
+ MODULE_DEVICE_TABLE(acpi, sst_acpi_ids);
+ static struct platform_driver sst_acpi_driver = {
+       .driver = {
+               .name                   = "intel_sst_acpi",
+               .acpi_match_table       = ACPI_PTR(sst_acpi_ids),
+               .pm                     = &intel_sst_pm,
+       },
+       .probe  = sst_acpi_probe,
+       .remove = sst_acpi_remove,
+ };
+ module_platform_driver(sst_acpi_driver);
+ MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine ACPI Driver");
+ MODULE_AUTHOR("Ramesh Babu K V");
+ MODULE_AUTHOR("Omair Mohammed Abdullah");
+ MODULE_LICENSE("GPL v2");
+ MODULE_ALIAS("sst");
index 0000000000000000000000000000000000000000,b3d84560fbb5da2f6ce908eb1803b9031da71d78..42f293f9c6e2dcfbdf5da2f451a6cf196fc296dc
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,285 +1,286 @@@
+ /*
+  * Intel SST loader on ACPI systems
+  *
+  * Copyright (C) 2013, Intel Corporation. All rights reserved.
+  *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public License version
+  * 2 as published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  */
+ #include <linux/acpi.h>
+ #include <linux/device.h>
+ #include <linux/firmware.h>
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include "sst-dsp.h"
+ #define SST_LPT_DSP_DMA_ADDR_OFFSET   0x0F0000
+ #define SST_WPT_DSP_DMA_ADDR_OFFSET   0x0FE000
+ #define SST_LPT_DSP_DMA_SIZE          (1024 - 1)
+ /* Descriptor for SST ASoC machine driver */
+ struct sst_acpi_mach {
+       /* ACPI ID for the matching machine driver. Audio codec for instance */
+       const u8 id[ACPI_ID_LEN];
+       /* machine driver name */
+       const char *drv_name;
+       /* firmware file name */
+       const char *fw_filename;
+ };
+ /* Descriptor for setting up SST platform data */
+ struct sst_acpi_desc {
+       const char *drv_name;
+       struct sst_acpi_mach *machines;
+       /* Platform resource indexes. Must set to -1 if not used */
+       int resindex_lpe_base;
+       int resindex_pcicfg_base;
+       int resindex_fw_base;
+       int irqindex_host_ipc;
+       int resindex_dma_base;
+       /* Unique number identifying the SST core on platform */
+       int sst_id;
+       /* DMA only valid when resindex_dma_base != -1*/
+       int dma_engine;
+       int dma_size;
+ };
+ struct sst_acpi_priv {
+       struct platform_device *pdev_mach;
+       struct platform_device *pdev_pcm;
+       struct sst_pdata sst_pdata;
+       struct sst_acpi_desc *desc;
+       struct sst_acpi_mach *mach;
+ };
+ static void sst_acpi_fw_cb(const struct firmware *fw, void *context)
+ {
+       struct platform_device *pdev = context;
+       struct device *dev = &pdev->dev;
+       struct sst_acpi_priv *sst_acpi = platform_get_drvdata(pdev);
+       struct sst_pdata *sst_pdata = &sst_acpi->sst_pdata;
+       struct sst_acpi_desc *desc = sst_acpi->desc;
+       struct sst_acpi_mach *mach = sst_acpi->mach;
+       sst_pdata->fw = fw;
+       if (!fw) {
+               dev_err(dev, "Cannot load firmware %s\n", mach->fw_filename);
+               return;
+       }
+       /* register PCM and DAI driver */
+       sst_acpi->pdev_pcm =
+               platform_device_register_data(dev, desc->drv_name, -1,
+                                             sst_pdata, sizeof(*sst_pdata));
+       if (IS_ERR(sst_acpi->pdev_pcm)) {
+               dev_err(dev, "Cannot register device %s. Error %d\n",
+                       desc->drv_name, (int)PTR_ERR(sst_acpi->pdev_pcm));
+       }
+       return;
+ }
+ static acpi_status sst_acpi_mach_match(acpi_handle handle, u32 level,
+                                      void *context, void **ret)
+ {
+       *(bool *)context = true;
+       return AE_OK;
+ }
+ static struct sst_acpi_mach *sst_acpi_find_machine(
+       struct sst_acpi_mach *machines)
+ {
+       struct sst_acpi_mach *mach;
+       bool found = false;
+       for (mach = machines; mach->id[0]; mach++)
+               if (ACPI_SUCCESS(acpi_get_devices(mach->id,
+                                                 sst_acpi_mach_match,
+                                                 &found, NULL)) && found)
+                       return mach;
+       return NULL;
+ }
+ static int sst_acpi_probe(struct platform_device *pdev)
+ {
+       const struct acpi_device_id *id;
+       struct device *dev = &pdev->dev;
+       struct sst_acpi_priv *sst_acpi;
+       struct sst_pdata *sst_pdata;
+       struct sst_acpi_mach *mach;
+       struct sst_acpi_desc *desc;
+       struct resource *mmio;
+       int ret = 0;
+       sst_acpi = devm_kzalloc(dev, sizeof(*sst_acpi), GFP_KERNEL);
+       if (sst_acpi == NULL)
+               return -ENOMEM;
+       id = acpi_match_device(dev->driver->acpi_match_table, dev);
+       if (!id)
+               return -ENODEV;
+       desc = (struct sst_acpi_desc *)id->driver_data;
+       mach = sst_acpi_find_machine(desc->machines);
+       if (mach == NULL) {
+               dev_err(dev, "No matching ASoC machine driver found\n");
+               return -ENODEV;
+       }
+       sst_pdata = &sst_acpi->sst_pdata;
+       sst_pdata->id = desc->sst_id;
+       sst_pdata->dma_dev = dev;
+       sst_acpi->desc = desc;
+       sst_acpi->mach = mach;
++      sst_pdata->resindex_dma_base = desc->resindex_dma_base;
+       if (desc->resindex_dma_base >= 0) {
+               sst_pdata->dma_engine = desc->dma_engine;
+               sst_pdata->dma_base = desc->resindex_dma_base;
+               sst_pdata->dma_size = desc->dma_size;
+       }
+       if (desc->irqindex_host_ipc >= 0)
+               sst_pdata->irq = platform_get_irq(pdev, desc->irqindex_host_ipc);
+       if (desc->resindex_lpe_base >= 0) {
+               mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+                                            desc->resindex_lpe_base);
+               if (mmio) {
+                       sst_pdata->lpe_base = mmio->start;
+                       sst_pdata->lpe_size = resource_size(mmio);
+               }
+       }
+       if (desc->resindex_pcicfg_base >= 0) {
+               mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+                                            desc->resindex_pcicfg_base);
+               if (mmio) {
+                       sst_pdata->pcicfg_base = mmio->start;
+                       sst_pdata->pcicfg_size = resource_size(mmio);
+               }
+       }
+       if (desc->resindex_fw_base >= 0) {
+               mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+                                            desc->resindex_fw_base);
+               if (mmio) {
+                       sst_pdata->fw_base = mmio->start;
+                       sst_pdata->fw_size = resource_size(mmio);
+               }
+       }
+       platform_set_drvdata(pdev, sst_acpi);
+       /* register machine driver */
+       sst_acpi->pdev_mach =
+               platform_device_register_data(dev, mach->drv_name, -1,
+                                             sst_pdata, sizeof(*sst_pdata));
+       if (IS_ERR(sst_acpi->pdev_mach))
+               return PTR_ERR(sst_acpi->pdev_mach);
+       /* continue SST probing after firmware is loaded */
+       ret = request_firmware_nowait(THIS_MODULE, true, mach->fw_filename,
+                                     dev, GFP_KERNEL, pdev, sst_acpi_fw_cb);
+       if (ret)
+               platform_device_unregister(sst_acpi->pdev_mach);
+       return ret;
+ }
+ static int sst_acpi_remove(struct platform_device *pdev)
+ {
+       struct sst_acpi_priv *sst_acpi = platform_get_drvdata(pdev);
+       struct sst_pdata *sst_pdata = &sst_acpi->sst_pdata;
+       platform_device_unregister(sst_acpi->pdev_mach);
+       if (!IS_ERR_OR_NULL(sst_acpi->pdev_pcm))
+               platform_device_unregister(sst_acpi->pdev_pcm);
+       release_firmware(sst_pdata->fw);
+       return 0;
+ }
+ static struct sst_acpi_mach haswell_machines[] = {
+       { "INT33CA", "haswell-audio", "intel/IntcSST1.bin" },
+       {}
+ };
+ static struct sst_acpi_desc sst_acpi_haswell_desc = {
+       .drv_name = "haswell-pcm-audio",
+       .machines = haswell_machines,
+       .resindex_lpe_base = 0,
+       .resindex_pcicfg_base = 1,
+       .resindex_fw_base = -1,
+       .irqindex_host_ipc = 0,
+       .sst_id = SST_DEV_ID_LYNX_POINT,
+       .dma_engine = SST_DMA_TYPE_DW,
+       .resindex_dma_base = SST_LPT_DSP_DMA_ADDR_OFFSET,
+       .dma_size = SST_LPT_DSP_DMA_SIZE,
+ };
+ static struct sst_acpi_mach broadwell_machines[] = {
+       { "INT343A", "broadwell-audio", "intel/IntcSST2.bin" },
+       {}
+ };
+ static struct sst_acpi_desc sst_acpi_broadwell_desc = {
+       .drv_name = "haswell-pcm-audio",
+       .machines = broadwell_machines,
+       .resindex_lpe_base = 0,
+       .resindex_pcicfg_base = 1,
+       .resindex_fw_base = -1,
+       .irqindex_host_ipc = 0,
+       .sst_id = SST_DEV_ID_WILDCAT_POINT,
+       .dma_engine = SST_DMA_TYPE_DW,
+       .resindex_dma_base = SST_WPT_DSP_DMA_ADDR_OFFSET,
+       .dma_size = SST_LPT_DSP_DMA_SIZE,
+ };
+ static struct sst_acpi_mach baytrail_machines[] = {
+       { "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-48kHz_i2s_master" },
+       { "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-48kHz_i2s_master" },
+       {}
+ };
+ static struct sst_acpi_desc sst_acpi_baytrail_desc = {
+       .drv_name = "baytrail-pcm-audio",
+       .machines = baytrail_machines,
+       .resindex_lpe_base = 0,
+       .resindex_pcicfg_base = 1,
+       .resindex_fw_base = 2,
+       .irqindex_host_ipc = 5,
+       .sst_id = SST_DEV_ID_BYT,
+       .resindex_dma_base = -1,
+ };
+ static struct acpi_device_id sst_acpi_match[] = {
+       { "INT33C8", (unsigned long)&sst_acpi_haswell_desc },
+       { "INT3438", (unsigned long)&sst_acpi_broadwell_desc },
+       { "80860F28", (unsigned long)&sst_acpi_baytrail_desc },
+       { }
+ };
+ MODULE_DEVICE_TABLE(acpi, sst_acpi_match);
+ static struct platform_driver sst_acpi_driver = {
+       .probe = sst_acpi_probe,
+       .remove = sst_acpi_remove,
+       .driver = {
+               .name = "sst-acpi",
+               .acpi_match_table = ACPI_PTR(sst_acpi_match),
+       },
+ };
+ module_platform_driver(sst_acpi_driver);
+ MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
+ MODULE_DESCRIPTION("Intel SST loader on ACPI systems");
+ MODULE_LICENSE("GPL v2");
index 0000000000000000000000000000000000000000,3412474083ffdfeba79598433f345fa13b40dc72..96aeb2556ad40d3d4e8bc35f9e180555d8d72873
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,284 +1,285 @@@
+ /*
+  * Intel Smart Sound Technology (SST) Core
+  *
+  * Copyright (C) 2013, Intel Corporation. All rights reserved.
+  *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public License version
+  * 2 as published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  */
+ #ifndef __SOUND_SOC_SST_DSP_H
+ #define __SOUND_SOC_SST_DSP_H
+ #include <linux/kernel.h>
+ #include <linux/types.h>
+ #include <linux/interrupt.h>
+ /* SST Device IDs  */
+ #define SST_DEV_ID_LYNX_POINT         0x33C8
+ #define SST_DEV_ID_WILDCAT_POINT      0x3438
+ #define SST_DEV_ID_BYT                        0x0F28
+ /* Supported SST DMA Devices */
+ #define SST_DMA_TYPE_DW               1
+ /* autosuspend delay 5s*/
+ #define SST_RUNTIME_SUSPEND_DELAY     (5 * 1000)
+ /* SST Shim register map
+  * The register naming can differ between products. Some products also
+  * contain extra functionality.
+  */
+ #define SST_CSR                       0x00
+ #define SST_PISR              0x08
+ #define SST_PIMR              0x10
+ #define SST_ISRX              0x18
+ #define SST_ISRD              0x20
+ #define SST_IMRX              0x28
+ #define SST_IMRD              0x30
+ #define SST_IPCX              0x38 /* IPC IA -> SST */
+ #define SST_IPCD              0x40 /* IPC SST -> IA */
+ #define SST_ISRSC             0x48
+ #define SST_ISRLPESC          0x50
+ #define SST_IMRSC             0x58
+ #define SST_IMRLPESC          0x60
+ #define SST_IPCSC             0x68
+ #define SST_IPCLPESC          0x70
+ #define SST_CLKCTL            0x78
+ #define SST_CSR2              0x80
+ #define SST_LTRC              0xE0
+ #define SST_HMDC              0xE8
+ #define SST_SHIM_BEGIN                SST_CSR
+ #define SST_SHIM_END          SST_HDMC
+ #define SST_DBGO              0xF0
+ #define SST_SHIM_SIZE         0x100
+ #define SST_PWMCTRL             0x1000
+ /* SST Shim Register bits
+  * The register bit naming can differ between products. Some products also
+  * contain extra functionality.
+  */
+ /* CSR / CS */
+ #define SST_CSR_RST           (0x1 << 1)
+ #define SST_CSR_SBCS0         (0x1 << 2)
+ #define SST_CSR_SBCS1         (0x1 << 3)
+ #define SST_CSR_DCS(x)                (x << 4)
+ #define SST_CSR_DCS_MASK      (0x7 << 4)
+ #define SST_CSR_STALL         (0x1 << 10)
+ #define SST_CSR_S0IOCS                (0x1 << 21)
+ #define SST_CSR_S1IOCS                (0x1 << 23)
+ #define SST_CSR_LPCS          (0x1 << 31)
+ #define SST_CSR_24MHZ_LPCS    (SST_CSR_SBCS0 | SST_CSR_SBCS1 | SST_CSR_LPCS)
+ #define SST_CSR_24MHZ_NO_LPCS (SST_CSR_SBCS0 | SST_CSR_SBCS1)
+ #define SST_BYT_CSR_RST               (0x1 << 0)
+ #define SST_BYT_CSR_VECTOR_SEL        (0x1 << 1)
+ #define SST_BYT_CSR_STALL     (0x1 << 2)
+ #define SST_BYT_CSR_PWAITMODE (0x1 << 3)
+ /*  ISRX / ISC */
+ #define SST_ISRX_BUSY         (0x1 << 1)
+ #define SST_ISRX_DONE         (0x1 << 0)
+ #define SST_BYT_ISRX_REQUEST  (0x1 << 1)
+ /*  ISRD / ISD */
+ #define SST_ISRD_BUSY         (0x1 << 1)
+ #define SST_ISRD_DONE         (0x1 << 0)
+ /* IMRX / IMC */
+ #define SST_IMRX_BUSY         (0x1 << 1)
+ #define SST_IMRX_DONE         (0x1 << 0)
+ #define SST_BYT_IMRX_REQUEST  (0x1 << 1)
+ /* IMRD / IMD */
+ #define SST_IMRD_DONE         (0x1 << 0)
+ #define SST_IMRD_BUSY         (0x1 << 1)
+ #define SST_IMRD_SSP0         (0x1 << 16)
+ #define SST_IMRD_DMAC0                (0x1 << 21)
+ #define SST_IMRD_DMAC1                (0x1 << 22)
+ #define SST_IMRD_DMAC         (SST_IMRD_DMAC0 | SST_IMRD_DMAC1)
+ /*  IPCX / IPCC */
+ #define       SST_IPCX_DONE           (0x1 << 30)
+ #define       SST_IPCX_BUSY           (0x1 << 31)
+ #define SST_BYT_IPCX_DONE     ((u64)0x1 << 62)
+ #define SST_BYT_IPCX_BUSY     ((u64)0x1 << 63)
+ /*  IPCD */
+ #define       SST_IPCD_DONE           (0x1 << 30)
+ #define       SST_IPCD_BUSY           (0x1 << 31)
+ #define SST_BYT_IPCD_DONE     ((u64)0x1 << 62)
+ #define SST_BYT_IPCD_BUSY     ((u64)0x1 << 63)
+ /* CLKCTL */
+ #define SST_CLKCTL_SMOS(x)    (x << 24)
+ #define SST_CLKCTL_MASK               (3 << 24)
+ #define SST_CLKCTL_DCPLCG     (1 << 18)
+ #define SST_CLKCTL_SCOE1      (1 << 17)
+ #define SST_CLKCTL_SCOE0      (1 << 16)
+ /* CSR2 / CS2 */
+ #define SST_CSR2_SDFD_SSP0    (1 << 1)
+ #define SST_CSR2_SDFD_SSP1    (1 << 2)
+ /* LTRC */
+ #define SST_LTRC_VAL(x)               (x << 0)
+ /* HMDC */
+ #define SST_HMDC_HDDA0(x)     (x << 0)
+ #define SST_HMDC_HDDA1(x)     (x << 7)
+ #define SST_HMDC_HDDA_E0_CH0  1
+ #define SST_HMDC_HDDA_E0_CH1  2
+ #define SST_HMDC_HDDA_E0_CH2  4
+ #define SST_HMDC_HDDA_E0_CH3  8
+ #define SST_HMDC_HDDA_E1_CH0  SST_HMDC_HDDA1(SST_HMDC_HDDA_E0_CH0)
+ #define SST_HMDC_HDDA_E1_CH1  SST_HMDC_HDDA1(SST_HMDC_HDDA_E0_CH1)
+ #define SST_HMDC_HDDA_E1_CH2  SST_HMDC_HDDA1(SST_HMDC_HDDA_E0_CH2)
+ #define SST_HMDC_HDDA_E1_CH3  SST_HMDC_HDDA1(SST_HMDC_HDDA_E0_CH3)
+ #define SST_HMDC_HDDA_E0_ALLCH        (SST_HMDC_HDDA_E0_CH0 | SST_HMDC_HDDA_E0_CH1 | \
+                                SST_HMDC_HDDA_E0_CH2 | SST_HMDC_HDDA_E0_CH3)
+ #define SST_HMDC_HDDA_E1_ALLCH        (SST_HMDC_HDDA_E1_CH0 | SST_HMDC_HDDA_E1_CH1 | \
+                                SST_HMDC_HDDA_E1_CH2 | SST_HMDC_HDDA_E1_CH3)
+ /* SST Vendor Defined Registers and bits */
+ #define SST_VDRTCTL0          0xa0
+ #define SST_VDRTCTL1          0xa4
+ #define SST_VDRTCTL2          0xa8
+ #define SST_VDRTCTL3          0xaC
+ /* VDRTCTL0 */
+ #define SST_VDRTCL0_D3PGD             (1 << 0)
+ #define SST_VDRTCL0_D3SRAMPGD         (1 << 1)
+ #define SST_VDRTCL0_DSRAMPGE_SHIFT    12
+ #define SST_VDRTCL0_DSRAMPGE_MASK     (0xfffff << SST_VDRTCL0_DSRAMPGE_SHIFT)
+ #define SST_VDRTCL0_ISRAMPGE_SHIFT    2
+ #define SST_VDRTCL0_ISRAMPGE_MASK     (0x3ff << SST_VDRTCL0_ISRAMPGE_SHIFT)
+ /* VDRTCTL2 */
+ #define SST_VDRTCL2_DCLCGE            (1 << 1)
+ #define SST_VDRTCL2_DTCGE             (1 << 10)
+ #define SST_VDRTCL2_APLLSE_MASK               (1 << 31)
+ /* PMCS */
+ #define SST_PMCS              0x84
+ #define SST_PMCS_PS_MASK      0x3
+ struct sst_dsp;
+ /*
+  * SST Device.
+  *
+  * This structure is populated by the SST core driver.
+  */
+ struct sst_dsp_device {
+       /* Mandatory fields */
+       struct sst_ops *ops;
+       irqreturn_t (*thread)(int irq, void *context);
+       void *thread_context;
+ };
+ /*
+  * SST Platform Data.
+  */
+ struct sst_pdata {
+       /* ACPI data */
+       u32 lpe_base;
+       u32 lpe_size;
+       u32 pcicfg_base;
+       u32 pcicfg_size;
+       u32 fw_base;
+       u32 fw_size;
+       int irq;
+       /* Firmware */
+       const struct firmware *fw;
+       /* DMA */
++      int resindex_dma_base; /* other fields invalid if equals to -1 */
+       u32 dma_base;
+       u32 dma_size;
+       int dma_engine;
+       struct device *dma_dev;
+       /* DSP */
+       u32 id;
+       void *dsp;
+ };
+ /* Initialization */
+ struct sst_dsp *sst_dsp_new(struct device *dev,
+       struct sst_dsp_device *sst_dev, struct sst_pdata *pdata);
+ void sst_dsp_free(struct sst_dsp *sst);
+ /* SHIM Read / Write */
+ void sst_dsp_shim_write(struct sst_dsp *sst, u32 offset, u32 value);
+ u32 sst_dsp_shim_read(struct sst_dsp *sst, u32 offset);
+ int sst_dsp_shim_update_bits(struct sst_dsp *sst, u32 offset,
+                               u32 mask, u32 value);
+ void sst_dsp_shim_write64(struct sst_dsp *sst, u32 offset, u64 value);
+ u64 sst_dsp_shim_read64(struct sst_dsp *sst, u32 offset);
+ int sst_dsp_shim_update_bits64(struct sst_dsp *sst, u32 offset,
+                               u64 mask, u64 value);
+ /* SHIM Read / Write Unlocked for callers already holding sst lock */
+ void sst_dsp_shim_write_unlocked(struct sst_dsp *sst, u32 offset, u32 value);
+ u32 sst_dsp_shim_read_unlocked(struct sst_dsp *sst, u32 offset);
+ int sst_dsp_shim_update_bits_unlocked(struct sst_dsp *sst, u32 offset,
+                               u32 mask, u32 value);
+ void sst_dsp_shim_write64_unlocked(struct sst_dsp *sst, u32 offset, u64 value);
+ u64 sst_dsp_shim_read64_unlocked(struct sst_dsp *sst, u32 offset);
+ int sst_dsp_shim_update_bits64_unlocked(struct sst_dsp *sst, u32 offset,
+                                       u64 mask, u64 value);
+ /* Internal generic low-level SST IO functions - can be overidden */
+ void sst_shim32_write(void __iomem *addr, u32 offset, u32 value);
+ u32 sst_shim32_read(void __iomem *addr, u32 offset);
+ void sst_shim32_write64(void __iomem *addr, u32 offset, u64 value);
+ u64 sst_shim32_read64(void __iomem *addr, u32 offset);
+ void sst_memcpy_toio_32(struct sst_dsp *sst,
+                       void __iomem *dest, void *src, size_t bytes);
+ void sst_memcpy_fromio_32(struct sst_dsp *sst,
+                         void *dest, void __iomem *src, size_t bytes);
+ /* DSP reset & boot */
+ void sst_dsp_reset(struct sst_dsp *sst);
+ int sst_dsp_boot(struct sst_dsp *sst);
+ int sst_dsp_wake(struct sst_dsp *sst);
+ void sst_dsp_sleep(struct sst_dsp *sst);
+ void sst_dsp_stall(struct sst_dsp *sst);
+ /* DMA */
+ int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id);
+ void sst_dsp_dma_put_channel(struct sst_dsp *dsp);
+ int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
+       dma_addr_t src_addr, size_t size);
+ int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
+       dma_addr_t src_addr, size_t size);
+ /* Msg IO */
+ void sst_dsp_ipc_msg_tx(struct sst_dsp *dsp, u32 msg);
+ u32 sst_dsp_ipc_msg_rx(struct sst_dsp *dsp);
+ /* Mailbox management */
+ int sst_dsp_mailbox_init(struct sst_dsp *dsp, u32 inbox_offset,
+       size_t inbox_size, u32 outbox_offset, size_t outbox_size);
+ void sst_dsp_inbox_write(struct sst_dsp *dsp, void *message, size_t bytes);
+ void sst_dsp_inbox_read(struct sst_dsp *dsp, void *message, size_t bytes);
+ void sst_dsp_outbox_write(struct sst_dsp *dsp, void *message, size_t bytes);
+ void sst_dsp_outbox_read(struct sst_dsp *dsp, void *message, size_t bytes);
+ void sst_dsp_mailbox_dump(struct sst_dsp *dsp, size_t bytes);
+ /* Debug */
+ void sst_dsp_dump(struct sst_dsp *sst);
+ #endif
index 0000000000000000000000000000000000000000,b5659ecb80de6f9fd8f7cfab454b7a04276efc39..ebcca6dc48d189eb16b8903683db5460a3955357
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,1201 +1,1205 @@@
+ /*
+  * Intel SST Firmware Loader
+  *
+  * Copyright (C) 2013, Intel Corporation. All rights reserved.
+  *
+  * This program is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU General Public License version
+  * 2 as published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+ #include <linux/sched.h>
+ #include <linux/firmware.h>
+ #include <linux/export.h>
+ #include <linux/platform_device.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dmaengine.h>
+ #include <linux/pci.h>
+ #include <linux/acpi.h>
+ /* supported DMA engine drivers */
+ #include <linux/platform_data/dma-dw.h>
+ #include <linux/dma/dw.h>
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include "sst-dsp.h"
+ #include "sst-dsp-priv.h"
+ #define SST_DMA_RESOURCES     2
+ #define SST_DSP_DMA_MAX_BURST 0x3
+ #define SST_HSW_BLOCK_ANY     0xffffffff
+ #define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
+ struct sst_dma {
+       struct sst_dsp *sst;
+       struct dw_dma_chip *chip;
+       struct dma_async_tx_descriptor *desc;
+       struct dma_chan *ch;
+ };
+ static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
+ {
+       /* __iowrite32_copy use 32bit size values so divide by 4 */
+       __iowrite32_copy((void *)dest, src, bytes/4);
+ }
+ static void sst_dma_transfer_complete(void *arg)
+ {
+       struct sst_dsp *sst = (struct sst_dsp *)arg;
+       dev_dbg(sst->dev, "DMA: callback\n");
+ }
+ static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
+       dma_addr_t src_addr, size_t size)
+ {
+       struct dma_async_tx_descriptor *desc;
+       struct sst_dma *dma = sst->dma;
+       if (dma->ch == NULL) {
+               dev_err(sst->dev, "error: no DMA channel\n");
+               return -ENODEV;
+       }
+       dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
+               (unsigned long)src_addr, (unsigned long)dest_addr, size);
+       desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
+               src_addr, size, DMA_CTRL_ACK);
+       if (!desc){
+               dev_err(sst->dev, "error: dma prep memcpy failed\n");
+               return -EINVAL;
+       }
+       desc->callback = sst_dma_transfer_complete;
+       desc->callback_param = sst;
+       desc->tx_submit(desc);
+       dma_wait_for_async_tx(desc);
+       return 0;
+ }
+ /* copy to DSP */
+ int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
+       dma_addr_t src_addr, size_t size)
+ {
+       return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
+                       src_addr, size);
+ }
+ EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
+ /* copy from DSP */
+ int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
+       dma_addr_t src_addr, size_t size)
+ {
+       return sst_dsp_dma_copy(sst, dest_addr,
+               src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
+ }
+ EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
+ /* remove module from memory - callers hold locks */
+ static void block_list_remove(struct sst_dsp *dsp,
+       struct list_head *block_list)
+ {
+       struct sst_mem_block *block, *tmp;
+       int err;
+       /* disable each block  */
+       list_for_each_entry(block, block_list, module_list) {
+               if (block->ops && block->ops->disable) {
+                       err = block->ops->disable(block);
+                       if (err < 0)
+                               dev_err(dsp->dev,
+                                       "error: cant disable block %d:%d\n",
+                                       block->type, block->index);
+               }
+       }
+       /* mark each block as free */
+       list_for_each_entry_safe(block, tmp, block_list, module_list) {
+               list_del(&block->module_list);
+               list_move(&block->list, &dsp->free_block_list);
+               dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
+                       block->type, block->index, block->offset);
+       }
+ }
+ /* prepare the memory block to receive data from host - callers hold locks */
+ static int block_list_prepare(struct sst_dsp *dsp,
+       struct list_head *block_list)
+ {
+       struct sst_mem_block *block;
+       int ret = 0;
+       /* enable each block so that's it'e ready for data */
+       list_for_each_entry(block, block_list, module_list) {
+               if (block->ops && block->ops->enable && !block->users) {
+                       ret = block->ops->enable(block);
+                       if (ret < 0) {
+                               dev_err(dsp->dev,
+                                       "error: cant disable block %d:%d\n",
+                                       block->type, block->index);
+                               goto err;
+                       }
+               }
+       }
+       return ret;
+ err:
+       list_for_each_entry(block, block_list, module_list) {
+               if (block->ops && block->ops->disable)
+                       block->ops->disable(block);
+       }
+       return ret;
+ }
+ static struct dw_dma_platform_data dw_pdata = {
+       .is_private = 1,
+       .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+       .chan_priority = CHAN_PRIORITY_ASCENDING,
+ };
+ static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
+       int irq)
+ {
+       struct dw_dma_chip *chip;
+       int err;
+       chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return ERR_PTR(-ENOMEM);
+       chip->irq = irq;
+       chip->regs = devm_ioremap_resource(dev, mem);
+       if (IS_ERR(chip->regs))
+               return ERR_CAST(chip->regs);
+       err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
+       if (err)
+               return ERR_PTR(err);
+       chip->dev = dev;
+       err = dw_dma_probe(chip, &dw_pdata);
+       if (err)
+               return ERR_PTR(err);
+       return chip;
+ }
+ static void dw_remove(struct dw_dma_chip *chip)
+ {
+       dw_dma_remove(chip);
+ }
+ static bool dma_chan_filter(struct dma_chan *chan, void *param)
+ {
+       struct sst_dsp *dsp = (struct sst_dsp *)param;
+       return chan->device->dev == dsp->dma_dev;
+ }
+ int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
+ {
+       struct sst_dma *dma = dsp->dma;
+       struct dma_slave_config slave;
+       dma_cap_mask_t mask;
+       int ret;
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+       dma_cap_set(DMA_MEMCPY, mask);
+       dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
+       if (dma->ch == NULL) {
+               dev_err(dsp->dev, "error: DMA request channel failed\n");
+               return -EIO;
+       }
+       memset(&slave, 0, sizeof(slave));
+       slave.direction = DMA_MEM_TO_DEV;
+       slave.src_addr_width =
+               slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
+       ret = dmaengine_slave_config(dma->ch, &slave);
+       if (ret) {
+               dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
+                       ret);
+               dma_release_channel(dma->ch);
+               dma->ch = NULL;
+       }
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
+ void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
+ {
+       struct sst_dma *dma = dsp->dma;
+       if (!dma->ch)
+               return;
+       dma_release_channel(dma->ch);
+       dma->ch = NULL;
+ }
+ EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
+ int sst_dma_new(struct sst_dsp *sst)
+ {
+       struct sst_pdata *sst_pdata = sst->pdata;
+       struct sst_dma *dma;
+       struct resource mem;
+       const char *dma_dev_name;
+       int ret = 0;
++      if (sst->pdata->resindex_dma_base == -1)
++              /* DMA is not used, return and squelsh error messages */
++              return 0;
++
+       /* configure the correct platform data for whatever DMA engine
+       * is attached to the ADSP IP. */
+       switch (sst->pdata->dma_engine) {
+       case SST_DMA_TYPE_DW:
+               dma_dev_name = "dw_dmac";
+               break;
+       default:
+               dev_err(sst->dev, "error: invalid DMA engine %d\n",
+                       sst->pdata->dma_engine);
+               return -EINVAL;
+       }
+       dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
+       if (!dma)
+               return -ENOMEM;
+       dma->sst = sst;
+       memset(&mem, 0, sizeof(mem));
+       mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
+       mem.end   = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
+       mem.flags = IORESOURCE_MEM;
+       /* now register DMA engine device */
+       dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
+       if (IS_ERR(dma->chip)) {
+               dev_err(sst->dev, "error: DMA device register failed\n");
+               ret = PTR_ERR(dma->chip);
+               goto err_dma_dev;
+       }
+       sst->dma = dma;
+       sst->fw_use_dma = true;
+       return 0;
+ err_dma_dev:
+       devm_kfree(sst->dev, dma);
+       return ret;
+ }
+ EXPORT_SYMBOL(sst_dma_new);
+ void sst_dma_free(struct sst_dma *dma)
+ {
+       if (dma == NULL)
+               return;
+       if (dma->ch)
+               dma_release_channel(dma->ch);
+       if (dma->chip)
+               dw_remove(dma->chip);
+ }
+ EXPORT_SYMBOL(sst_dma_free);
+ /* create new generic firmware object */
+ struct sst_fw *sst_fw_new(struct sst_dsp *dsp, 
+       const struct firmware *fw, void *private)
+ {
+       struct sst_fw *sst_fw;
+       int err;
+       if (!dsp->ops->parse_fw)
+               return NULL;
+       sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
+       if (sst_fw == NULL)
+               return NULL;
+       sst_fw->dsp = dsp;
+       sst_fw->private = private;
+       sst_fw->size = fw->size;
+       /* allocate DMA buffer to store FW data */
+       sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
+                               &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
+       if (!sst_fw->dma_buf) {
+               dev_err(dsp->dev, "error: DMA alloc failed\n");
+               kfree(sst_fw);
+               return NULL;
+       }
+       /* copy FW data to DMA-able memory */
+       memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
+       if (dsp->fw_use_dma) {
+               err = sst_dsp_dma_get_channel(dsp, 0);
+               if (err < 0)
+                       goto chan_err;
+       }
+       /* call core specific FW paser to load FW data into DSP */
+       err = dsp->ops->parse_fw(sst_fw);
+       if (err < 0) {
+               dev_err(dsp->dev, "error: parse fw failed %d\n", err);
+               goto parse_err;
+       }
+       if (dsp->fw_use_dma)
+               sst_dsp_dma_put_channel(dsp);
+       mutex_lock(&dsp->mutex);
+       list_add(&sst_fw->list, &dsp->fw_list);
+       mutex_unlock(&dsp->mutex);
+       return sst_fw;
+ parse_err:
+       if (dsp->fw_use_dma)
+               sst_dsp_dma_put_channel(dsp);
+ chan_err:
+       dma_free_coherent(dsp->dma_dev, sst_fw->size,
+                               sst_fw->dma_buf,
+                               sst_fw->dmable_fw_paddr);
+       sst_fw->dma_buf = NULL;
+       kfree(sst_fw);
+       return NULL;
+ }
+ EXPORT_SYMBOL_GPL(sst_fw_new);
+ int sst_fw_reload(struct sst_fw *sst_fw)
+ {
+       struct sst_dsp *dsp = sst_fw->dsp;
+       int ret;
+       dev_dbg(dsp->dev, "reloading firmware\n");
+       /* call core specific FW paser to load FW data into DSP */
+       ret = dsp->ops->parse_fw(sst_fw);
+       if (ret < 0)
+               dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(sst_fw_reload);
+ void sst_fw_unload(struct sst_fw *sst_fw)
+ {
+       struct sst_dsp *dsp = sst_fw->dsp;
+       struct sst_module *module, *mtmp;
+       struct sst_module_runtime *runtime, *rtmp;
+       dev_dbg(dsp->dev, "unloading firmware\n");
+       mutex_lock(&dsp->mutex);
+       /* check module by module */
+       list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
+               if (module->sst_fw == sst_fw) {
+                       /* remove runtime modules */
+                       list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
+                               block_list_remove(dsp, &runtime->block_list);
+                               list_del(&runtime->list);
+                               kfree(runtime);
+                       }
+                       /* now remove the module */
+                       block_list_remove(dsp, &module->block_list);
+                       list_del(&module->list);
+                       kfree(module);
+               }
+       }
+       /* remove all scratch blocks */
+       block_list_remove(dsp, &dsp->scratch_block_list);
+       mutex_unlock(&dsp->mutex);
+ }
+ EXPORT_SYMBOL_GPL(sst_fw_unload);
+ /* free single firmware object */
+ void sst_fw_free(struct sst_fw *sst_fw)
+ {
+       struct sst_dsp *dsp = sst_fw->dsp;
+       mutex_lock(&dsp->mutex);
+       list_del(&sst_fw->list);
+       mutex_unlock(&dsp->mutex);
+       if (sst_fw->dma_buf)
+               dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
+                       sst_fw->dmable_fw_paddr);
+       kfree(sst_fw);
+ }
+ EXPORT_SYMBOL_GPL(sst_fw_free);
+ /* free all firmware objects */
+ void sst_fw_free_all(struct sst_dsp *dsp)
+ {
+       struct sst_fw *sst_fw, *t;
+       mutex_lock(&dsp->mutex);
+       list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
+               list_del(&sst_fw->list);
+               dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
+                       sst_fw->dmable_fw_paddr);
+               kfree(sst_fw);
+       }
+       mutex_unlock(&dsp->mutex);
+ }
+ EXPORT_SYMBOL_GPL(sst_fw_free_all);
+ /* create a new SST generic module from FW template */
+ struct sst_module *sst_module_new(struct sst_fw *sst_fw,
+       struct sst_module_template *template, void *private)
+ {
+       struct sst_dsp *dsp = sst_fw->dsp;
+       struct sst_module *sst_module;
+       sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
+       if (sst_module == NULL)
+               return NULL;
+       sst_module->id = template->id;
+       sst_module->dsp = dsp;
+       sst_module->sst_fw = sst_fw;
+       sst_module->scratch_size = template->scratch_size;
+       sst_module->persistent_size = template->persistent_size;
+       sst_module->entry = template->entry;
+       sst_module->state = SST_MODULE_STATE_UNLOADED;
+       INIT_LIST_HEAD(&sst_module->block_list);
+       INIT_LIST_HEAD(&sst_module->runtime_list);
+       mutex_lock(&dsp->mutex);
+       list_add(&sst_module->list, &dsp->module_list);
+       mutex_unlock(&dsp->mutex);
+       return sst_module;
+ }
+ EXPORT_SYMBOL_GPL(sst_module_new);
+ /* free firmware module and remove from available list */
+ void sst_module_free(struct sst_module *sst_module)
+ {
+       struct sst_dsp *dsp = sst_module->dsp;
+       mutex_lock(&dsp->mutex);
+       list_del(&sst_module->list);
+       mutex_unlock(&dsp->mutex);
+       kfree(sst_module);
+ }
+ EXPORT_SYMBOL_GPL(sst_module_free);
+ struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
+       int id, void *private)
+ {
+       struct sst_dsp *dsp = module->dsp;
+       struct sst_module_runtime *runtime;
+       runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
+       if (runtime == NULL)
+               return NULL;
+       runtime->id = id;
+       runtime->dsp = dsp;
+       runtime->module = module;
+       INIT_LIST_HEAD(&runtime->block_list);
+       mutex_lock(&dsp->mutex);
+       list_add(&runtime->list, &module->runtime_list);
+       mutex_unlock(&dsp->mutex);
+       return runtime;
+ }
+ EXPORT_SYMBOL_GPL(sst_module_runtime_new);
+ void sst_module_runtime_free(struct sst_module_runtime *runtime)
+ {
+       struct sst_dsp *dsp = runtime->dsp;
+       mutex_lock(&dsp->mutex);
+       list_del(&runtime->list);
+       mutex_unlock(&dsp->mutex);
+       kfree(runtime);
+ }
+ EXPORT_SYMBOL_GPL(sst_module_runtime_free);
+ static struct sst_mem_block *find_block(struct sst_dsp *dsp,
+       struct sst_block_allocator *ba)
+ {
+       struct sst_mem_block *block;
+       list_for_each_entry(block, &dsp->free_block_list, list) {
+               if (block->type == ba->type && block->offset == ba->offset)
+                       return block;
+       }
+       return NULL;
+ }
+ /* Block allocator must be on block boundary */
+ static int block_alloc_contiguous(struct sst_dsp *dsp,
+       struct sst_block_allocator *ba, struct list_head *block_list)
+ {
+       struct list_head tmp = LIST_HEAD_INIT(tmp);
+       struct sst_mem_block *block;
+       u32 block_start = SST_HSW_BLOCK_ANY;
+       int size = ba->size, offset = ba->offset;
+       while (ba->size > 0) {
+               block = find_block(dsp, ba);
+               if (!block) {
+                       list_splice(&tmp, &dsp->free_block_list);
+                       ba->size = size;
+                       ba->offset = offset;
+                       return -ENOMEM;
+               }
+               list_move_tail(&block->list, &tmp);
+               ba->offset += block->size;
+               ba->size -= block->size;
+       }
+       ba->size = size;
+       ba->offset = offset;
+       list_for_each_entry(block, &tmp, list) {
+               if (block->offset < block_start)
+                       block_start = block->offset;
+               list_add(&block->module_list, block_list);
+               dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
+                       block->type, block->index, block->offset);
+       }
+       list_splice(&tmp, &dsp->used_block_list);
+       return 0;
+ }
+ /* allocate first free DSP blocks for data - callers hold locks */
+ static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+       struct list_head *block_list)
+ {
+       struct sst_mem_block *block, *tmp;
+       int ret = 0;
+       if (ba->size == 0)
+               return 0;
+       /* find first free whole blocks that can hold module */
+       list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
+               /* ignore blocks with wrong type */
+               if (block->type != ba->type)
+                       continue;
+               if (ba->size > block->size)
+                       continue;
+               ba->offset = block->offset;
+               block->bytes_used = ba->size % block->size;
+               list_add(&block->module_list, block_list);
+               list_move(&block->list, &dsp->used_block_list);
+               dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
+                       block->type, block->index, block->offset);
+               return 0;
+       }
+       /* then find free multiple blocks that can hold module */
+       list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
+               /* ignore blocks with wrong type */
+               if (block->type != ba->type)
+                       continue;
+               /* do we span > 1 blocks */
+               if (ba->size > block->size) {
+                       /* align ba to block boundary */
+                       ba->offset = block->offset;
+                       ret = block_alloc_contiguous(dsp, ba, block_list);
+                       if (ret == 0)
+                               return ret;
+               }
+       }
+       /* not enough free block space */
+       return -ENOMEM;
+ }
+ int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+       struct list_head *block_list)
+ {
+       int ret;
+       dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
+               ba->size, ba->offset, ba->type);
+       mutex_lock(&dsp->mutex);
+       ret = block_alloc(dsp, ba, block_list);
+       if (ret < 0) {
+               dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
+               goto out;
+       }
+       /* prepare DSP blocks for module usage */
+       ret = block_list_prepare(dsp, block_list);
+       if (ret < 0)
+               dev_err(dsp->dev, "error: prepare failed\n");
+ out:
+       mutex_unlock(&dsp->mutex);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(sst_alloc_blocks);
+ int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
+ {
+       mutex_lock(&dsp->mutex);
+       block_list_remove(dsp, block_list);
+       mutex_unlock(&dsp->mutex);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(sst_free_blocks);
+ /* allocate memory blocks for static module addresses - callers hold locks */
+ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+       struct list_head *block_list)
+ {
+       struct sst_mem_block *block, *tmp;
+       struct sst_block_allocator ba_tmp = *ba;
+       u32 end = ba->offset + ba->size, block_end;
+       int err;
+       /* only IRAM/DRAM blocks are managed */
+       if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
+               return 0;
+       /* are blocks already attached to this module */
+       list_for_each_entry_safe(block, tmp, block_list, module_list) {
+               /* ignore blocks with wrong type */
+               if (block->type != ba->type)
+                       continue;
+               block_end = block->offset + block->size;
+               /* find block that holds section */
+               if (ba->offset >= block->offset && end <= block_end)
+                       return 0;
+               /* does block span more than 1 section */
+               if (ba->offset >= block->offset && ba->offset < block_end) {
+                       /* align ba to block boundary */
+                       ba_tmp.size -= block_end - ba->offset;
+                       ba_tmp.offset = block_end;
+                       err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
+                       if (err < 0)
+                               return -ENOMEM;
+                       /* module already owns blocks */
+                       return 0;
+               }
+       }
+       /* find first free blocks that can hold section in free list */
+       list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
+               block_end = block->offset + block->size;
+               /* ignore blocks with wrong type */
+               if (block->type != ba->type)
+                       continue;
+               /* find block that holds section */
+               if (ba->offset >= block->offset && end <= block_end) {
+                       /* add block */
+                       list_move(&block->list, &dsp->used_block_list);
+                       list_add(&block->module_list, block_list);
+                       dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
+                               block->type, block->index, block->offset);
+                       return 0;
+               }
+               /* does block span more than 1 section */
+               if (ba->offset >= block->offset && ba->offset < block_end) {
+                       /* add block */
+                       list_move(&block->list, &dsp->used_block_list);
+                       list_add(&block->module_list, block_list);
+                       /* align ba to block boundary */
+                       ba_tmp.size -= block_end - ba->offset;
+                       ba_tmp.offset = block_end;
+                       err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
+                       if (err < 0)
+                               return -ENOMEM;
+                       return 0;
+               }
+       }
+       return -ENOMEM;
+ }
+ /* Load fixed module data into DSP memory blocks */
+ int sst_module_alloc_blocks(struct sst_module *module)
+ {
+       struct sst_dsp *dsp = module->dsp;
+       struct sst_fw *sst_fw = module->sst_fw;
+       struct sst_block_allocator ba;
+       int ret;
+       memset(&ba, 0, sizeof(ba));
+       ba.size = module->size;
+       ba.type = module->type;
+       ba.offset = module->offset;
+       dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
+               ba.size, ba.offset, ba.type);
+       mutex_lock(&dsp->mutex);
+       /* alloc blocks that includes this section */
+       ret = block_alloc_fixed(dsp, &ba, &module->block_list);
+       if (ret < 0) {
+               dev_err(dsp->dev,
+                       "error: no free blocks for section at offset 0x%x size 0x%x\n",
+                       module->offset, module->size);
+               mutex_unlock(&dsp->mutex);
+               return -ENOMEM;
+       }
+       /* prepare DSP blocks for module copy */
+       ret = block_list_prepare(dsp, &module->block_list);
+       if (ret < 0) {
+               dev_err(dsp->dev, "error: fw module prepare failed\n");
+               goto err;
+       }
+       /* copy partial module data to blocks */
+       if (dsp->fw_use_dma) {
+               ret = sst_dsp_dma_copyto(dsp,
+                       dsp->addr.lpe_base + module->offset,
+                       sst_fw->dmable_fw_paddr + module->data_offset,
+                       module->size);
+               if (ret < 0) {
+                       dev_err(dsp->dev, "error: module copy failed\n");
+                       goto err;
+               }
+       } else
+               sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
+                       module->size);
+       mutex_unlock(&dsp->mutex);
+       return ret;
+ err:
+       block_list_remove(dsp, &module->block_list);
+       mutex_unlock(&dsp->mutex);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
+ /* Unload entire module from DSP memory */
+ int sst_module_free_blocks(struct sst_module *module)
+ {
+       struct sst_dsp *dsp = module->dsp;
+       mutex_lock(&dsp->mutex);
+       block_list_remove(dsp, &module->block_list);
+       mutex_unlock(&dsp->mutex);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(sst_module_free_blocks);
+ int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
+       int offset)
+ {
+       struct sst_dsp *dsp = runtime->dsp;
+       struct sst_module *module = runtime->module;
+       struct sst_block_allocator ba;
+       int ret;
+       if (module->persistent_size == 0)
+               return 0;
+       memset(&ba, 0, sizeof(ba));
+       ba.size = module->persistent_size;
+       ba.type = SST_MEM_DRAM;
+       mutex_lock(&dsp->mutex);
+       /* do we need to allocate at a fixed address ? */
+       if (offset != 0) {
+               ba.offset = offset;
+               dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
+                       ba.size, ba.type, ba.offset);
+               /* alloc blocks that includes this section */
+               ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
+       } else {
+               dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
+                       ba.size, ba.type);
+               /* alloc blocks that includes this section */
+               ret = block_alloc(dsp, &ba, &runtime->block_list);
+       }
+       if (ret < 0) {
+               dev_err(dsp->dev,
+               "error: no free blocks for runtime module size 0x%x\n",
+                       module->persistent_size);
+               mutex_unlock(&dsp->mutex);
+               return -ENOMEM;
+       }
+       runtime->persistent_offset = ba.offset;
+       /* prepare DSP blocks for module copy */
+       ret = block_list_prepare(dsp, &runtime->block_list);
+       if (ret < 0) {
+               dev_err(dsp->dev, "error: runtime block prepare failed\n");
+               goto err;
+       }
+       mutex_unlock(&dsp->mutex);
+       return ret;
+ err:
+       block_list_remove(dsp, &module->block_list);
+       mutex_unlock(&dsp->mutex);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
+ int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
+ {
+       struct sst_dsp *dsp = runtime->dsp;
+       mutex_lock(&dsp->mutex);
+       block_list_remove(dsp, &runtime->block_list);
+       mutex_unlock(&dsp->mutex);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
+ int sst_module_runtime_save(struct sst_module_runtime *runtime,
+       struct sst_module_runtime_context *context)
+ {
+       struct sst_dsp *dsp = runtime->dsp;
+       struct sst_module *module = runtime->module;
+       int ret = 0;
+       dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
+               runtime->id, runtime->persistent_offset,
+               module->persistent_size);
+       context->buffer = dma_alloc_coherent(dsp->dma_dev,
+               module->persistent_size,
+               &context->dma_buffer, GFP_DMA | GFP_KERNEL);
+       if (!context->buffer) {
+               dev_err(dsp->dev, "error: DMA context alloc failed\n");
+               return -ENOMEM;
+       }
+       mutex_lock(&dsp->mutex);
+       if (dsp->fw_use_dma) {
+               ret = sst_dsp_dma_get_channel(dsp, 0);
+               if (ret < 0)
+                       goto err;
+               ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
+                       dsp->addr.lpe_base + runtime->persistent_offset,
+                       module->persistent_size);
+               sst_dsp_dma_put_channel(dsp);
+               if (ret < 0) {
+                       dev_err(dsp->dev, "error: context copy failed\n");
+                       goto err;
+               }
+       } else
+               sst_memcpy32(context->buffer, dsp->addr.lpe +
+                       runtime->persistent_offset,
+                       module->persistent_size);
+ err:
+       mutex_unlock(&dsp->mutex);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(sst_module_runtime_save);
+ int sst_module_runtime_restore(struct sst_module_runtime *runtime,
+       struct sst_module_runtime_context *context)
+ {
+       struct sst_dsp *dsp = runtime->dsp;
+       struct sst_module *module = runtime->module;
+       int ret = 0;
+       dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
+               runtime->id, runtime->persistent_offset,
+               module->persistent_size);
+       mutex_lock(&dsp->mutex);
+       if (!context->buffer) {
+               dev_info(dsp->dev, "no context buffer need to restore!\n");
+               goto err;
+       }
+       if (dsp->fw_use_dma) {
+               ret = sst_dsp_dma_get_channel(dsp, 0);
+               if (ret < 0)
+                       goto err;
+               ret = sst_dsp_dma_copyto(dsp,
+                       dsp->addr.lpe_base + runtime->persistent_offset,
+                       context->dma_buffer, module->persistent_size);
+               sst_dsp_dma_put_channel(dsp);
+               if (ret < 0) {
+                       dev_err(dsp->dev, "error: module copy failed\n");
+                       goto err;
+               }
+       } else
+               sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
+                       context->buffer, module->persistent_size);
+       dma_free_coherent(dsp->dma_dev, module->persistent_size,
+                               context->buffer, context->dma_buffer);
+       context->buffer = NULL;
+ err:
+       mutex_unlock(&dsp->mutex);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
+ /* register a DSP memory block for use with FW based modules */
+ struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
+       u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
+       void *private)
+ {
+       struct sst_mem_block *block;
+       block = kzalloc(sizeof(*block), GFP_KERNEL);
+       if (block == NULL)
+               return NULL;
+       block->offset = offset;
+       block->size = size;
+       block->index = index;
+       block->type = type;
+       block->dsp = dsp;
+       block->private = private;
+       block->ops = ops;
+       mutex_lock(&dsp->mutex);
+       list_add(&block->list, &dsp->free_block_list);
+       mutex_unlock(&dsp->mutex);
+       return block;
+ }
+ EXPORT_SYMBOL_GPL(sst_mem_block_register);
+ /* unregister all DSP memory blocks */
+ void sst_mem_block_unregister_all(struct sst_dsp *dsp)
+ {
+       struct sst_mem_block *block, *tmp;
+       mutex_lock(&dsp->mutex);
+       /* unregister used blocks */
+       list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
+               list_del(&block->list);
+               kfree(block);
+       }
+       /* unregister free blocks */
+       list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
+               list_del(&block->list);
+               kfree(block);
+       }
+       mutex_unlock(&dsp->mutex);
+ }
+ EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
+ /* allocate scratch buffer blocks */
+ int sst_block_alloc_scratch(struct sst_dsp *dsp)
+ {
+       struct sst_module *module;
+       struct sst_block_allocator ba;
+       int ret;
+       mutex_lock(&dsp->mutex);
+       /* calculate required scratch size */
+       dsp->scratch_size = 0;
+       list_for_each_entry(module, &dsp->module_list, list) {
+               dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
+                       module->id, module->scratch_size);
+               if (dsp->scratch_size < module->scratch_size)
+                       dsp->scratch_size = module->scratch_size;
+       }
+       dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
+               dsp->scratch_size);
+       if (dsp->scratch_size == 0) {
+               dev_info(dsp->dev, "no modules need scratch buffer\n");
+               mutex_unlock(&dsp->mutex);
+               return 0;
+       }
+       /* allocate blocks for module scratch buffers */
+       dev_dbg(dsp->dev, "allocating scratch blocks\n");
+       ba.size = dsp->scratch_size;
+       ba.type = SST_MEM_DRAM;
+       /* do we need to allocate at fixed offset */
+       if (dsp->scratch_offset != 0) {
+               dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
+                       ba.size, ba.type, ba.offset);
+               ba.offset = dsp->scratch_offset;
+               /* alloc blocks that includes this section */
+               ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
+       } else {
+               dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
+                       ba.size, ba.type);
+               ba.offset = 0;
+               ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
+       }
+       if (ret < 0) {
+               dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
+               mutex_unlock(&dsp->mutex);
+               return ret;
+       }
+       ret = block_list_prepare(dsp, &dsp->scratch_block_list);
+       if (ret < 0) {
+               dev_err(dsp->dev, "error: scratch block prepare failed\n");
+               mutex_unlock(&dsp->mutex);
+               return ret;
+       }
+       /* assign the same offset of scratch to each module */
+       dsp->scratch_offset = ba.offset;
+       mutex_unlock(&dsp->mutex);
+       return dsp->scratch_size;
+ }
+ EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
+ /* free all scratch blocks */
+ void sst_block_free_scratch(struct sst_dsp *dsp)
+ {
+       mutex_lock(&dsp->mutex);
+       block_list_remove(dsp, &dsp->scratch_block_list);
+       mutex_unlock(&dsp->mutex);
+ }
+ EXPORT_SYMBOL_GPL(sst_block_free_scratch);
+ /* get a module from it's unique ID */
+ struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
+ {
+       struct sst_module *module;
+       mutex_lock(&dsp->mutex);
+       list_for_each_entry(module, &dsp->module_list, list) {
+               if (module->id == id) {
+                       mutex_unlock(&dsp->mutex);
+                       return module;
+               }
+       }
+       mutex_unlock(&dsp->mutex);
+       return NULL;
+ }
+ EXPORT_SYMBOL_GPL(sst_module_get_from_id);
+ struct sst_module_runtime *sst_module_runtime_get_from_id(
+       struct sst_module *module, u32 id)
+ {
+       struct sst_module_runtime *runtime;
+       struct sst_dsp *dsp = module->dsp;
+       mutex_lock(&dsp->mutex);
+       list_for_each_entry(runtime, &module->runtime_list, list) {
+               if (runtime->id == id) {
+                       mutex_unlock(&dsp->mutex);
+                       return runtime;
+               }
+       }
+       mutex_unlock(&dsp->mutex);
+       return NULL;
+ }
+ EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
+ /* returns block address in DSP address space */
+ u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
+       enum sst_mem_type type)
+ {
+       switch (type) {
+       case SST_MEM_IRAM:
+               return offset - dsp->addr.iram_offset +
+                       dsp->addr.dsp_iram_offset;
+       case SST_MEM_DRAM:
+               return offset - dsp->addr.dram_offset +
+                       dsp->addr.dsp_dram_offset;
+       default:
+               return 0;
+       }
+ }
+ EXPORT_SYMBOL_GPL(sst_dsp_get_offset);