void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream);
void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream);
+ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
+ unsigned int dai_fmt);
+
/* Utility functions to get clock rates from various things */
int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
int snd_soc_params_to_frame_size(struct snd_pcm_hw_params *params);
unsigned int mask, unsigned int value);
#ifdef CONFIG_SND_SOC_AC97_BUS
+ struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
unsigned int symmetric_channels:1;
unsigned int symmetric_samplebits:1;
++++ /* Mark this pcm with non atomic ops */
++++ bool nonatomic;
++++
/* Do not create a PCM for this DAI link (Backend link) */
unsigned int no_pcm:1;
/*
* Card-specific routes and widgets.
++++ * Note: of_dapm_xxx for Device Tree; Otherwise for driver build-in.
*/
const struct snd_soc_dapm_widget *dapm_widgets;
int num_dapm_widgets;
const struct snd_soc_dapm_route *dapm_routes;
int num_dapm_routes;
++++ const struct snd_soc_dapm_widget *of_dapm_widgets;
++++ int num_of_dapm_widgets;
++++ const struct snd_soc_dapm_route *of_dapm_routes;
++++ int num_of_dapm_routes;
bool fully_routed;
struct work_struct deferred_resume_work;
return IRQ_HANDLED;
}
+ +++/*
+ +++ * When the bit clock is input, limit the maximum rate according to the
+ +++ * Serial Clock Ratio Considerations section from the SSC documentation:
+ +++ *
+ +++ * The Transmitter and the Receiver can be programmed to operate
+ +++ * with the clock signals provided on either the TK or RK pins.
+ +++ * This allows the SSC to support many slave-mode data transfers.
+ +++ * In this case, the maximum clock speed allowed on the RK pin is:
+ +++ * - Peripheral clock divided by 2 if Receiver Frame Synchro is input
+ +++ * - Peripheral clock divided by 3 if Receiver Frame Synchro is output
+ +++ * In addition, the maximum clock speed allowed on the TK pin is:
+ +++ * - Peripheral clock divided by 6 if Transmit Frame Synchro is input
+ +++ * - Peripheral clock divided by 2 if Transmit Frame Synchro is output
+ +++ *
+ +++ * When the bit clock is output, limit the rate according to the
+ +++ * SSC divider restrictions.
+ +++ */
+ +++static int atmel_ssc_hw_rule_rate(struct snd_pcm_hw_params *params,
+ +++ struct snd_pcm_hw_rule *rule)
+ +++{
+ +++ struct atmel_ssc_info *ssc_p = rule->private;
+ +++ struct ssc_device *ssc = ssc_p->ssc;
+ +++ struct snd_interval *i = hw_param_interval(params, rule->var);
+ +++ struct snd_interval t;
+ +++ struct snd_ratnum r = {
+ +++ .den_min = 1,
+ +++ .den_max = 4095,
+ +++ .den_step = 1,
+ +++ };
+ +++ unsigned int num = 0, den = 0;
+ +++ int frame_size;
+ +++ int mck_div = 2;
+ +++ int ret;
+ +++
+ +++ frame_size = snd_soc_params_to_frame_size(params);
+ +++ if (frame_size < 0)
+ +++ return frame_size;
+ +++
+ +++ switch (ssc_p->daifmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ +++ case SND_SOC_DAIFMT_CBM_CFS:
+ +++ if ((ssc_p->dir_mask & SSC_DIR_MASK_CAPTURE)
+ +++ && ssc->clk_from_rk_pin)
+ +++ /* Receiver Frame Synchro (i.e. capture)
+ +++ * is output (format is _CFS) and the RK pin
+ +++ * is used for input (format is _CBM_).
+ +++ */
+ +++ mck_div = 3;
+ +++ break;
+ +++
+ +++ case SND_SOC_DAIFMT_CBM_CFM:
+ +++ if ((ssc_p->dir_mask & SSC_DIR_MASK_PLAYBACK)
+ +++ && !ssc->clk_from_rk_pin)
+ +++ /* Transmit Frame Synchro (i.e. playback)
+ +++ * is input (format is _CFM) and the TK pin
+ +++ * is used for input (format _CBM_ but not
+ +++ * using the RK pin).
+ +++ */
+ +++ mck_div = 6;
+ +++ break;
+ +++ }
+ +++
+ +++ switch (ssc_p->daifmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ +++ case SND_SOC_DAIFMT_CBS_CFS:
+ +++ r.num = ssc_p->mck_rate / mck_div / frame_size;
+ +++
+ +++ ret = snd_interval_ratnum(i, 1, &r, &num, &den);
+ +++ if (ret >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
+ +++ params->rate_num = num;
+ +++ params->rate_den = den;
+ +++ }
+ +++ break;
+ +++
+ +++ case SND_SOC_DAIFMT_CBM_CFS:
+ +++ case SND_SOC_DAIFMT_CBM_CFM:
+ +++ t.min = 8000;
+ +++ t.max = ssc_p->mck_rate / mck_div / frame_size;
+ +++ t.openmin = t.openmax = 0;
+ +++ t.integer = 0;
+ +++ ret = snd_interval_refine(i, &t);
+ +++ break;
+ +++
+ +++ default:
+ +++ ret = -EINVAL;
+ +++ break;
+ +++ }
+ +++
+ +++ return ret;
+ +++}
/*-------------------------------------------------------------------------*\
* DAI functions
struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
struct atmel_pcm_dma_params *dma_params;
int dir, dir_mask;
+ +++ int ret;
pr_debug("atmel_ssc_startup: SSC_SR=0x%u\n",
ssc_readl(ssc_p->ssc->regs, SR));
/* Enable PMC peripheral clock for this SSC */
pr_debug("atmel_ssc_dai: Starting clock\n");
clk_enable(ssc_p->ssc->clk);
+ +++ ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk);
/* Reset the SSC to keep it at a clean status */
ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
dir_mask = SSC_DIR_MASK_CAPTURE;
}
+ +++ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+ +++ SNDRV_PCM_HW_PARAM_RATE,
+ +++ atmel_ssc_hw_rule_rate,
+ +++ ssc_p,
+ +++ SNDRV_PCM_HW_PARAM_FRAME_BITS,
+ +++ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ +++ if (ret < 0) {
+ +++ dev_err(dai->dev, "Failed to specify rate rule: %d\n", ret);
+ +++ return ret;
+ +++ }
+ +++
dma_params = &ssc_dma_params[dai->id][dir];
dma_params->ssc = ssc_p->ssc;
dma_params->substream = substream;
rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
| SSC_BF(RCMR_STTDLY, 1)
| SSC_BF(RCMR_START, SSC_START_RISING_RF)
- | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
+ | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, SSC_CKS_DIV);
tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
| SSC_BF(TCMR_STTDLY, 1)
| SSC_BF(TCMR_START, SSC_START_RISING_RF)
- | SSC_BF(TCMR_CKI, SSC_CKI_RISING)
+ | SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS)
| SSC_BF(TCMR_CKS, SSC_CKS_DIV);
rcmr = SSC_BF(RCMR_PERIOD, 0)
| SSC_BF(RCMR_STTDLY, START_DELAY)
| SSC_BF(RCMR_START, SSC_START_RISING_RF)
- | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
+ | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
SSC_CKS_PIN : SSC_CKS_CLOCK);
# define atmel_ssc_resume NULL
#endif /* CONFIG_PM */
- ---#define ATMEL_SSC_RATES (SNDRV_PCM_RATE_8000_96000)
- ---
#define ATMEL_SSC_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
.playback = {
.channels_min = 1,
.channels_max = 2,
- --- .rates = ATMEL_SSC_RATES,
+ +++ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ +++ .rate_min = 8000,
+ +++ .rate_max = 384000,
.formats = ATMEL_SSC_FORMATS,},
.capture = {
.channels_min = 1,
.channels_max = 2,
- --- .rates = ATMEL_SSC_RATES,
+ +++ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ +++ .rate_min = 8000,
+ +++ .rate_max = 384000,
.formats = ATMEL_SSC_FORMATS,},
.ops = &atmel_ssc_dai_ops,
};
spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
---- shim_regs->imrx = sst_shim_read64(shim, SST_IMRX),
++++ shim_regs->imrx = sst_shim_read64(shim, SST_IMRX);
++++ shim_regs->csr = sst_shim_read64(shim, SST_CSR);
++++
spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
}
*/
spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
sst_shim_write64(shim, SST_IMRX, shim_regs->imrx),
++++ sst_shim_write64(shim, SST_CSR, shim_regs->csr),
spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
}
* initially active. So change the state to active before
* enabling the pm
*/
++++
++++ if (!acpi_disabled)
++++ pm_runtime_set_active(ctx->dev);
++++
pm_runtime_enable(ctx->dev);
if (acpi_disabled)
synchronize_irq(ctx->irq_num);
flush_workqueue(ctx->post_msg_wq);
++++ ctx->ops->reset(ctx);
/* save the shim registers because PMC doesn't save state */
sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
return ret;
}
---- static int intel_sst_runtime_resume(struct device *dev)
++++ static int intel_sst_suspend(struct device *dev)
{
---- int ret = 0;
struct intel_sst_drv *ctx = dev_get_drvdata(dev);
++++ struct sst_fw_save *fw_save;
++++ int i, ret = 0;
---- if (ctx->sst_state == SST_RESET) {
---- ret = sst_load_fw(ctx);
---- if (ret) {
---- dev_err(dev, "FW download fail %d\n", ret);
---- sst_set_fw_state_locked(ctx, SST_RESET);
++++ /* check first if we are already in SW reset */
++++ if (ctx->sst_state == SST_RESET)
++++ return 0;
++++
++++ /*
++++ * check if any stream is active and running
++++ * they should already by suspend by soc_suspend
++++ */
++++ for (i = 1; i <= ctx->info.max_streams; i++) {
++++ struct stream_info *stream = &ctx->streams[i];
++++
++++ if (stream->status == STREAM_RUNNING) {
++++ dev_err(dev, "stream %d is running, cant susupend, abort\n", i);
++++ return -EBUSY;
}
}
++++ synchronize_irq(ctx->irq_num);
++++ flush_workqueue(ctx->post_msg_wq);
++++
++++ /* Move the SST state to Reset */
++++ sst_set_fw_state_locked(ctx, SST_RESET);
++++
++++ /* tell DSP we are suspending */
++++ if (ctx->ops->save_dsp_context(ctx))
++++ return -EBUSY;
++++
++++ /* save the memories */
++++ fw_save = kzalloc(sizeof(*fw_save), GFP_KERNEL);
++++ if (!fw_save)
++++ return -ENOMEM;
++++ fw_save->iram = kzalloc(ctx->iram_end - ctx->iram_base, GFP_KERNEL);
++++ if (!fw_save->iram) {
++++ ret = -ENOMEM;
++++ goto iram;
++++ }
++++ fw_save->dram = kzalloc(ctx->dram_end - ctx->dram_base, GFP_KERNEL);
++++ if (!fw_save->dram) {
++++ ret = -ENOMEM;
++++ goto dram;
++++ }
++++ fw_save->sram = kzalloc(SST_MAILBOX_SIZE, GFP_KERNEL);
++++ if (!fw_save->sram) {
++++ ret = -ENOMEM;
++++ goto sram;
++++ }
++++
++++ fw_save->ddr = kzalloc(ctx->ddr_end - ctx->ddr_base, GFP_KERNEL);
++++ if (!fw_save->ddr) {
++++ ret = -ENOMEM;
++++ goto ddr;
++++ }
++++
++++ memcpy32_fromio(fw_save->iram, ctx->iram, ctx->iram_end - ctx->iram_base);
++++ memcpy32_fromio(fw_save->dram, ctx->dram, ctx->dram_end - ctx->dram_base);
++++ memcpy32_fromio(fw_save->sram, ctx->mailbox, SST_MAILBOX_SIZE);
++++ memcpy32_fromio(fw_save->ddr, ctx->ddr, ctx->ddr_end - ctx->ddr_base);
++++
++++ ctx->fw_save = fw_save;
++++ ctx->ops->reset(ctx);
++++ return 0;
++++ ddr:
++++ kfree(fw_save->sram);
++++ sram:
++++ kfree(fw_save->dram);
++++ dram:
++++ kfree(fw_save->iram);
++++ iram:
++++ kfree(fw_save);
++++ return ret;
++++ }
++++
++++ static int intel_sst_resume(struct device *dev)
++++ {
++++ struct intel_sst_drv *ctx = dev_get_drvdata(dev);
++++ struct sst_fw_save *fw_save = ctx->fw_save;
++++ int ret = 0;
++++ struct sst_block *block;
++++
++++ if (!fw_save)
++++ return 0;
++++
++++ sst_set_fw_state_locked(ctx, SST_FW_LOADING);
++++
++++ /* we have to restore the memory saved */
++++ ctx->ops->reset(ctx);
++++
++++ ctx->fw_save = NULL;
++++
++++ memcpy32_toio(ctx->iram, fw_save->iram, ctx->iram_end - ctx->iram_base);
++++ memcpy32_toio(ctx->dram, fw_save->dram, ctx->dram_end - ctx->dram_base);
++++ memcpy32_toio(ctx->mailbox, fw_save->sram, SST_MAILBOX_SIZE);
++++ memcpy32_toio(ctx->ddr, fw_save->ddr, ctx->ddr_end - ctx->ddr_base);
++++
++++ kfree(fw_save->sram);
++++ kfree(fw_save->dram);
++++ kfree(fw_save->iram);
++++ kfree(fw_save->ddr);
++++ kfree(fw_save);
++++
++++ block = sst_create_block(ctx, 0, FW_DWNL_ID);
++++ if (block == NULL)
++++ return -ENOMEM;
++++
++++
++++ /* start and wait for ack */
++++ ctx->ops->start(ctx);
++++ ret = sst_wait_timeout(ctx, block);
++++ if (ret) {
++++ dev_err(ctx->dev, "fw download failed %d\n", ret);
++++ /* FW download failed due to timeout */
++++ ret = -EBUSY;
++++
++++ } else {
++++ sst_set_fw_state_locked(ctx, SST_FW_RUNNING);
++++ }
++++
++++ sst_free_block(ctx, block);
return ret;
}
const struct dev_pm_ops intel_sst_pm = {
++++ .suspend = intel_sst_suspend,
++++ .resume = intel_sst_resume,
.runtime_suspend = intel_sst_runtime_suspend,
---- .runtime_resume = intel_sst_runtime_resume,
};
EXPORT_SYMBOL_GPL(intel_sst_pm);