#include <linux/regulator/consumer.h>
#include "sdhci-pltfm.h"
+#include "cqhci.h"
#define CORE_MCI_VERSION 0x50
#define CORE_VERSION_MAJOR_SHIFT 28
#define msm_host_writel(msm_host, val, host, offset) \
msm_host->var_ops->msm_writel_relaxed(val, host, offset)
+/* CQHCI vendor specific registers */
+#define CQHCI_VENDOR_CFG1 0xA00
+#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
+
struct sdhci_msm_offset {
u32 core_hc_mode;
u32 core_mci_data_cnt;
__sdhci_msm_set_clock(host, clock);
}
+/*****************************************************************************\
+ * *
+ * MSM Command Queue Engine (CQE) *
+ * *
+\*****************************************************************************/
+
+static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+ return 0;
+}
+
+void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 ctrl;
+
+ /*
+ * When CQE is halted, the legacy SDHCI path operates only
+ * on 16-byte descriptors in 64bit mode.
+ */
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ host->desc_sz = 16;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * During CQE command transfers, command complete bit gets latched.
+ * So s/w should clear command complete interrupt status when CQE is
+ * either halted or disabled. Otherwise unexpected SDCHI legacy
+ * interrupt gets triggered when CQE is halted/disabled.
+ */
+ ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
+ ctrl |= SDHCI_INT_RESPONSE;
+ sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ sdhci_cqe_disable(mmc, recovery);
+}
+
+static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
+ .enable = sdhci_cqe_enable,
+ .disable = sdhci_msm_cqe_disable,
+};
+
+static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
+ struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct cqhci_host *cq_host;
+ bool dma64;
+ u32 cqcfg;
+ int ret;
+
+ /*
+ * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
+ * So ensure ADMA table is allocated for 16byte descriptors.
+ */
+ if (host->caps & SDHCI_CAN_64BIT)
+ host->alloc_desc_sz = 16;
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = cqhci_pltfm_init(pdev);
+ if (IS_ERR(cq_host)) {
+ ret = PTR_ERR(cq_host);
+ dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
+ goto cleanup;
+ }
+
+ msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+ cq_host->ops = &sdhci_msm_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ goto cleanup;
+ }
+
+ /* Disable cqe reset due to cqe enable signal */
+ cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
+ cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
+ cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
+
+ /*
+ * SDHC expects 12byte ADMA descriptors till CQE is enabled.
+ * So limit desc_sz to 12 so that the data commands that are sent
+ * during card initialization (before CQE gets enabled) would
+ * get executed without any issues.
+ */
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ host->desc_sz = 12;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ dev_info(&pdev->dev, "%s: CQE init: success\n",
+ mmc_hostname(host->mmc));
+ return ret;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
/*
* Platform specific register write functions. This is so that, if any
* register write needs to be followed up by platform specific actions,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
.write_w = sdhci_msm_writew,
.write_b = sdhci_msm_writeb,
+ .irq = sdhci_msm_cqe_irq,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
u8 core_major;
const struct sdhci_msm_offset *msm_offset;
const struct sdhci_msm_variant_info *var_info;
+ struct device_node *node = pdev->dev.of_node;
host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
if (IS_ERR(host))
pm_runtime_use_autosuspend(&pdev->dev);
host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
- ret = sdhci_add_host(host);
+ if (of_property_read_bool(node, "supports-cqe"))
+ ret = sdhci_msm_cqe_add_host(host, pdev);
+ else
+ ret = sdhci_add_host(host);
if (ret)
goto pm_runtime_disable;
sdhci_msm_set_regulator_caps(msm_host);