]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mmc/host/sdhci-msm.c
mmc: sdhci-msm: Factor out sdhci_msm_hs400
[mirror_ubuntu-artful-kernel.git] / drivers / mmc / host / sdhci-msm.c
CommitLineData
0eb0d9f4
GD
1/*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
3 *
4 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/of_device.h>
0eb0d9f4 19#include <linux/delay.h>
415b5a75 20#include <linux/mmc/mmc.h>
67e6db11 21#include <linux/pm_runtime.h>
415b5a75 22#include <linux/slab.h>
cc392c58 23#include <linux/iopoll.h>
0eb0d9f4
GD
24
25#include "sdhci-pltfm.h"
26
3a3ad3e9
GD
27#define CORE_MCI_VERSION 0x50
28#define CORE_VERSION_MAJOR_SHIFT 28
29#define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
30#define CORE_VERSION_MINOR_MASK 0xff
31
0eb0d9f4
GD
32#define CORE_HC_MODE 0x78
33#define HC_MODE_EN 0x1
34#define CORE_POWER 0x0
35#define CORE_SW_RST BIT(7)
ff06ce41 36#define FF_CLK_SW_RST_DIS BIT(13)
0eb0d9f4 37
ad81d387
GD
38#define CORE_PWRCTL_STATUS 0xdc
39#define CORE_PWRCTL_MASK 0xe0
40#define CORE_PWRCTL_CLEAR 0xe4
41#define CORE_PWRCTL_CTL 0xe8
42#define CORE_PWRCTL_BUS_OFF BIT(0)
43#define CORE_PWRCTL_BUS_ON BIT(1)
44#define CORE_PWRCTL_IO_LOW BIT(2)
45#define CORE_PWRCTL_IO_HIGH BIT(3)
46#define CORE_PWRCTL_BUS_SUCCESS BIT(0)
47#define CORE_PWRCTL_IO_SUCCESS BIT(2)
48#define REQ_BUS_OFF BIT(0)
49#define REQ_BUS_ON BIT(1)
50#define REQ_IO_LOW BIT(2)
51#define REQ_IO_HIGH BIT(3)
52#define INT_MASK 0xf
415b5a75
GD
53#define MAX_PHASES 16
54#define CORE_DLL_LOCK BIT(7)
02e4293d 55#define CORE_DDR_DLL_LOCK BIT(11)
415b5a75
GD
56#define CORE_DLL_EN BIT(16)
57#define CORE_CDR_EN BIT(17)
58#define CORE_CK_OUT_EN BIT(18)
59#define CORE_CDR_EXT_EN BIT(19)
60#define CORE_DLL_PDN BIT(29)
61#define CORE_DLL_RST BIT(30)
62#define CORE_DLL_CONFIG 0x100
cc392c58 63#define CORE_CMD_DAT_TRACK_SEL BIT(0)
415b5a75
GD
64#define CORE_DLL_STATUS 0x108
65
83736352 66#define CORE_DLL_CONFIG_2 0x1b4
02e4293d 67#define CORE_DDR_CAL_EN BIT(0)
83736352
VG
68#define CORE_FLL_CYCLE_CNT BIT(18)
69#define CORE_DLL_CLOCK_DISABLE BIT(21)
70
415b5a75
GD
71#define CORE_VENDOR_SPEC 0x10c
72#define CORE_CLK_PWRSAVE BIT(1)
ff06ce41
VG
73#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
74#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
75#define CORE_HC_MCLK_SEL_MASK (3 << 8)
76#define CORE_HC_SELECT_IN_EN BIT(18)
77#define CORE_HC_SELECT_IN_HS400 (6 << 19)
78#define CORE_HC_SELECT_IN_MASK (7 << 19)
415b5a75 79
cc392c58
RH
80#define CORE_CSR_CDC_CTLR_CFG0 0x130
81#define CORE_SW_TRIG_FULL_CALIB BIT(16)
82#define CORE_HW_AUTOCAL_ENA BIT(17)
83
84#define CORE_CSR_CDC_CTLR_CFG1 0x134
85#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
86#define CORE_TIMER_ENA BIT(16)
87
88#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
89#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
90#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
91#define CORE_CDC_OFFSET_CFG 0x14C
92#define CORE_CSR_CDC_DELAY_CFG 0x150
93#define CORE_CDC_SLAVE_DDA_CFG 0x160
94#define CORE_CSR_CDC_STATUS0 0x164
95#define CORE_CALIBRATION_DONE BIT(0)
96
97#define CORE_CDC_ERROR_CODE_MASK 0x7000000
98
99#define CORE_CSR_CDC_GEN_CFG 0x178
100#define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
101#define CORE_CDC_SWITCH_RC_EN BIT(1)
102
103#define CORE_DDR_200_CFG 0x184
104#define CORE_CDC_T4_DLY_SEL BIT(0)
105#define CORE_START_CDC_TRAFFIC BIT(6)
02e4293d
RH
106#define CORE_VENDOR_SPEC3 0x1b0
107#define CORE_PWRSAVE_DLL BIT(3)
108
109#define CORE_DDR_CONFIG 0x1b8
110#define DDR_CONFIG_POR_VAL 0x80040853
cc392c58 111
3a3ad3e9
GD
112#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11c
113
abf270e5 114#define INVALID_TUNING_PHASE -1
80031bde 115#define SDHCI_MSM_MIN_CLOCK 400000
ff06ce41 116#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
80031bde 117
415b5a75
GD
118#define CDR_SELEXT_SHIFT 20
119#define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
120#define CMUX_SHIFT_PHASE_SHIFT 24
121#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
122
67e6db11 123#define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
0eb0d9f4
GD
124struct sdhci_msm_host {
125 struct platform_device *pdev;
126 void __iomem *core_mem; /* MSM SDCC mapped address */
ad81d387 127 int pwr_irq; /* power irq */
0eb0d9f4
GD
128 struct clk *clk; /* main SD/MMC bus clock */
129 struct clk *pclk; /* SDHC peripheral bus clock */
130 struct clk *bus_clk; /* SDHC bus voter clock */
83736352 131 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
edc609fd 132 unsigned long clk_rate;
0eb0d9f4 133 struct mmc_host *mmc;
83736352 134 bool use_14lpp_dll_reset;
ff06ce41
VG
135 bool tuning_done;
136 bool calibration_done;
abf270e5 137 u8 saved_tuning_phase;
02e4293d 138 bool use_cdclp533;
0eb0d9f4
GD
139};
140
0fb8a3d4
RH
141static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
142 unsigned int clock)
143{
144 struct mmc_ios ios = host->mmc->ios;
145 /*
146 * The SDHC requires internal clock frequency to be double the
147 * actual clock that will be set for DDR mode. The controller
148 * uses the faster clock(100/400MHz) for some of its parts and
149 * send the actual required clock (50/200MHz) to the card.
150 */
151 if (ios.timing == MMC_TIMING_UHS_DDR50 ||
152 ios.timing == MMC_TIMING_MMC_DDR52 ||
153 ios.timing == MMC_TIMING_MMC_HS400)
154 clock *= 2;
155 return clock;
156}
157
158static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
159 unsigned int clock)
160{
161 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
162 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
163 struct mmc_ios curr_ios = host->mmc->ios;
164 int rc;
165
166 clock = msm_get_clock_rate_for_bus_mode(host, clock);
167 rc = clk_set_rate(msm_host->clk, clock);
168 if (rc) {
169 pr_err("%s: Failed to set clock at rate %u at timing %d\n",
170 mmc_hostname(host->mmc), clock,
171 curr_ios.timing);
172 return;
173 }
174 msm_host->clk_rate = clock;
175 pr_debug("%s: Setting clock at rate %lu at timing %d\n",
176 mmc_hostname(host->mmc), clk_get_rate(msm_host->clk),
177 curr_ios.timing);
178}
179
0eb0d9f4 180/* Platform specific tuning */
415b5a75
GD
181static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
182{
183 u32 wait_cnt = 50;
184 u8 ck_out_en;
185 struct mmc_host *mmc = host->mmc;
186
187 /* Poll for CK_OUT_EN bit. max. poll time = 50us */
188 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
189 CORE_CK_OUT_EN);
190
191 while (ck_out_en != poll) {
192 if (--wait_cnt == 0) {
193 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
194 mmc_hostname(mmc), poll);
195 return -ETIMEDOUT;
196 }
197 udelay(1);
198
199 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
200 CORE_CK_OUT_EN);
201 }
202
203 return 0;
204}
205
206static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
207{
208 int rc;
209 static const u8 grey_coded_phase_table[] = {
210 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
211 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
212 };
213 unsigned long flags;
214 u32 config;
215 struct mmc_host *mmc = host->mmc;
216
abf270e5
RH
217 if (phase > 0xf)
218 return -EINVAL;
219
415b5a75
GD
220 spin_lock_irqsave(&host->lock, flags);
221
222 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
223 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
224 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
225 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
226
227 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
228 rc = msm_dll_poll_ck_out_en(host, 0);
229 if (rc)
230 goto err_out;
231
232 /*
233 * Write the selected DLL clock output phase (0 ... 15)
234 * to CDR_SELEXT bit field of DLL_CONFIG register.
235 */
236 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
237 config &= ~CDR_SELEXT_MASK;
238 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
239 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
240
29301f40
RH
241 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
242 config |= CORE_CK_OUT_EN;
243 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
415b5a75
GD
244
245 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
246 rc = msm_dll_poll_ck_out_en(host, 1);
247 if (rc)
248 goto err_out;
249
250 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
251 config |= CORE_CDR_EN;
252 config &= ~CORE_CDR_EXT_EN;
253 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
254 goto out;
255
256err_out:
257 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
258 mmc_hostname(mmc), phase);
259out:
260 spin_unlock_irqrestore(&host->lock, flags);
261 return rc;
262}
263
264/*
265 * Find out the greatest range of consecuitive selected
266 * DLL clock output phases that can be used as sampling
267 * setting for SD3.0 UHS-I card read operation (in SDR104
ff06ce41
VG
268 * timing mode) or for eMMC4.5 card read operation (in
269 * HS400/HS200 timing mode).
415b5a75
GD
270 * Select the 3/4 of the range and configure the DLL with the
271 * selected DLL clock output phase.
272 */
273
274static int msm_find_most_appropriate_phase(struct sdhci_host *host,
275 u8 *phase_table, u8 total_phases)
276{
277 int ret;
278 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
279 u8 phases_per_row[MAX_PHASES] = { 0 };
280 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
281 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
282 bool phase_0_found = false, phase_15_found = false;
283 struct mmc_host *mmc = host->mmc;
284
285 if (!total_phases || (total_phases > MAX_PHASES)) {
286 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
287 mmc_hostname(mmc), total_phases);
288 return -EINVAL;
289 }
290
291 for (cnt = 0; cnt < total_phases; cnt++) {
292 ranges[row_index][col_index] = phase_table[cnt];
293 phases_per_row[row_index] += 1;
294 col_index++;
295
296 if ((cnt + 1) == total_phases) {
297 continue;
298 /* check if next phase in phase_table is consecutive or not */
299 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
300 row_index++;
301 col_index = 0;
302 }
303 }
304
305 if (row_index >= MAX_PHASES)
306 return -EINVAL;
307
308 /* Check if phase-0 is present in first valid window? */
309 if (!ranges[0][0]) {
310 phase_0_found = true;
311 phase_0_raw_index = 0;
312 /* Check if cycle exist between 2 valid windows */
313 for (cnt = 1; cnt <= row_index; cnt++) {
314 if (phases_per_row[cnt]) {
315 for (i = 0; i < phases_per_row[cnt]; i++) {
316 if (ranges[cnt][i] == 15) {
317 phase_15_found = true;
318 phase_15_raw_index = cnt;
319 break;
320 }
321 }
322 }
323 }
324 }
325
326 /* If 2 valid windows form cycle then merge them as single window */
327 if (phase_0_found && phase_15_found) {
328 /* number of phases in raw where phase 0 is present */
329 u8 phases_0 = phases_per_row[phase_0_raw_index];
330 /* number of phases in raw where phase 15 is present */
331 u8 phases_15 = phases_per_row[phase_15_raw_index];
332
333 if (phases_0 + phases_15 >= MAX_PHASES)
334 /*
335 * If there are more than 1 phase windows then total
336 * number of phases in both the windows should not be
337 * more than or equal to MAX_PHASES.
338 */
339 return -EINVAL;
340
341 /* Merge 2 cyclic windows */
342 i = phases_15;
343 for (cnt = 0; cnt < phases_0; cnt++) {
344 ranges[phase_15_raw_index][i] =
345 ranges[phase_0_raw_index][cnt];
346 if (++i >= MAX_PHASES)
347 break;
348 }
349
350 phases_per_row[phase_0_raw_index] = 0;
351 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
352 }
353
354 for (cnt = 0; cnt <= row_index; cnt++) {
355 if (phases_per_row[cnt] > curr_max) {
356 curr_max = phases_per_row[cnt];
357 selected_row_index = cnt;
358 }
359 }
360
361 i = (curr_max * 3) / 4;
362 if (i)
363 i--;
364
365 ret = ranges[selected_row_index][i];
366
367 if (ret >= MAX_PHASES) {
368 ret = -EINVAL;
369 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
370 mmc_hostname(mmc), ret);
371 }
372
373 return ret;
374}
375
376static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
0eb0d9f4 377{
415b5a75
GD
378 u32 mclk_freq = 0, config;
379
380 /* Program the MCLK value to MCLK_FREQ bit field */
381 if (host->clock <= 112000000)
382 mclk_freq = 0;
383 else if (host->clock <= 125000000)
384 mclk_freq = 1;
385 else if (host->clock <= 137000000)
386 mclk_freq = 2;
387 else if (host->clock <= 150000000)
388 mclk_freq = 3;
389 else if (host->clock <= 162000000)
390 mclk_freq = 4;
391 else if (host->clock <= 175000000)
392 mclk_freq = 5;
393 else if (host->clock <= 187000000)
394 mclk_freq = 6;
395 else if (host->clock <= 200000000)
396 mclk_freq = 7;
397
398 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
399 config &= ~CMUX_SHIFT_PHASE_MASK;
400 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
401 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
402}
403
404/* Initialize the DLL (Programmable Delay Line) */
405static int msm_init_cm_dll(struct sdhci_host *host)
406{
407 struct mmc_host *mmc = host->mmc;
83736352
VG
408 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
409 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
415b5a75
GD
410 int wait_cnt = 50;
411 unsigned long flags;
29301f40 412 u32 config;
415b5a75
GD
413
414 spin_lock_irqsave(&host->lock, flags);
415
0eb0d9f4 416 /*
415b5a75
GD
417 * Make sure that clock is always enabled when DLL
418 * tuning is in progress. Keeping PWRSAVE ON may
419 * turn off the clock.
0eb0d9f4 420 */
29301f40
RH
421 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
422 config &= ~CORE_CLK_PWRSAVE;
423 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
415b5a75 424
83736352
VG
425 if (msm_host->use_14lpp_dll_reset) {
426 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
427 config &= ~CORE_CK_OUT_EN;
428 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
429
430 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
431 config |= CORE_DLL_CLOCK_DISABLE;
432 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
433 }
434
29301f40
RH
435 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
436 config |= CORE_DLL_RST;
437 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
415b5a75 438
29301f40
RH
439 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
440 config |= CORE_DLL_PDN;
441 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
415b5a75
GD
442 msm_cm_dll_set_freq(host);
443
83736352
VG
444 if (msm_host->use_14lpp_dll_reset &&
445 !IS_ERR_OR_NULL(msm_host->xo_clk)) {
446 u32 mclk_freq = 0;
447
448 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
449 config &= CORE_FLL_CYCLE_CNT;
450 if (config)
451 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
452 clk_get_rate(msm_host->xo_clk));
453 else
454 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
455 clk_get_rate(msm_host->xo_clk));
456
457 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
458 config &= ~(0xFF << 10);
459 config |= mclk_freq << 10;
460
461 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
462 /* wait for 5us before enabling DLL clock */
463 udelay(5);
464 }
465
29301f40
RH
466 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
467 config &= ~CORE_DLL_RST;
468 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
415b5a75 469
29301f40
RH
470 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
471 config &= ~CORE_DLL_PDN;
472 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
415b5a75 473
83736352
VG
474 if (msm_host->use_14lpp_dll_reset) {
475 msm_cm_dll_set_freq(host);
476 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
477 config &= ~CORE_DLL_CLOCK_DISABLE;
478 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
479 }
480
29301f40
RH
481 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
482 config |= CORE_DLL_EN;
483 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
415b5a75 484
29301f40
RH
485 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
486 config |= CORE_CK_OUT_EN;
487 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
415b5a75
GD
488
489 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
490 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
491 CORE_DLL_LOCK)) {
492 /* max. wait for 50us sec for LOCK bit to be set */
493 if (--wait_cnt == 0) {
494 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
495 mmc_hostname(mmc));
496 spin_unlock_irqrestore(&host->lock, flags);
497 return -ETIMEDOUT;
498 }
499 udelay(1);
500 }
501
502 spin_unlock_irqrestore(&host->lock, flags);
0eb0d9f4
GD
503 return 0;
504}
505
b54aaa8a
RH
506static void msm_hc_select_default(struct sdhci_host *host)
507{
508 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
509 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
510 u32 config;
511
512 if (!msm_host->use_cdclp533) {
513 config = readl_relaxed(host->ioaddr +
514 CORE_VENDOR_SPEC3);
515 config &= ~CORE_PWRSAVE_DLL;
516 writel_relaxed(config, host->ioaddr +
517 CORE_VENDOR_SPEC3);
518 }
519
520 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
521 config &= ~CORE_HC_MCLK_SEL_MASK;
522 config |= CORE_HC_MCLK_SEL_DFLT;
523 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
524
525 /*
526 * Disable HC_SELECT_IN to be able to use the UHS mode select
527 * configuration from Host Control2 register for all other
528 * modes.
529 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
530 * in VENDOR_SPEC_FUNC
531 */
532 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
533 config &= ~CORE_HC_SELECT_IN_EN;
534 config &= ~CORE_HC_SELECT_IN_MASK;
535 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
536
537 /*
538 * Make sure above writes impacting free running MCLK are completed
539 * before changing the clk_rate at GCC.
540 */
541 wmb();
542}
543
544static void msm_hc_select_hs400(struct sdhci_host *host)
545{
546 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
547 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
548 u32 config, dll_lock;
549 int rc;
550
551 /* Select the divided clock (free running MCLK/2) */
552 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
553 config &= ~CORE_HC_MCLK_SEL_MASK;
554 config |= CORE_HC_MCLK_SEL_HS400;
555
556 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
557 /*
558 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
559 * register
560 */
561 if (msm_host->tuning_done && !msm_host->calibration_done) {
562 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
563 config |= CORE_HC_SELECT_IN_HS400;
564 config |= CORE_HC_SELECT_IN_EN;
565 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
566 }
567 if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
568 /*
569 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
570 * CORE_DLL_STATUS to be set. This should get set
571 * within 15 us at 200 MHz.
572 */
573 rc = readl_relaxed_poll_timeout(host->ioaddr +
574 CORE_DLL_STATUS,
575 dll_lock,
576 (dll_lock &
577 (CORE_DLL_LOCK |
578 CORE_DDR_DLL_LOCK)), 10,
579 1000);
580 if (rc == -ETIMEDOUT)
581 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
582 mmc_hostname(host->mmc), dll_lock);
583 }
584 /*
585 * Make sure above writes impacting free running MCLK are completed
586 * before changing the clk_rate at GCC.
587 */
588 wmb();
589}
590
591/*
592 * sdhci_msm_hc_select_mode :- In general all timing modes are
593 * controlled via UHS mode select in Host Control2 register.
594 * eMMC specific HS200/HS400 doesn't have their respective modes
595 * defined here, hence we use these values.
596 *
597 * HS200 - SDR104 (Since they both are equivalent in functionality)
598 * HS400 - This involves multiple configurations
599 * Initially SDR104 - when tuning is required as HS200
600 * Then when switching to DDR @ 400MHz (HS400) we use
601 * the vendor specific HC_SELECT_IN to control the mode.
602 *
603 * In addition to controlling the modes we also need to select the
604 * correct input clock for DLL depending on the mode.
605 *
606 * HS400 - divided clock (free running MCLK/2)
607 * All other modes - default (free running MCLK)
608 */
609void sdhci_msm_hc_select_mode(struct sdhci_host *host)
610{
611 struct mmc_ios ios = host->mmc->ios;
612
613 if (ios.timing == MMC_TIMING_MMC_HS400)
614 msm_hc_select_hs400(host);
615 else
616 msm_hc_select_default(host);
617}
618
cc392c58
RH
619static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
620{
621 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
622 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
623 u32 config, calib_done;
624 int ret;
625
626 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
627
628 /*
629 * Retuning in HS400 (DDR mode) will fail, just reset the
630 * tuning block and restore the saved tuning phase.
631 */
632 ret = msm_init_cm_dll(host);
633 if (ret)
634 goto out;
635
636 /* Set the selected phase in delay line hw block */
637 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
638 if (ret)
639 goto out;
640
641 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
642 config |= CORE_CMD_DAT_TRACK_SEL;
643 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
644
645 config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
646 config &= ~CORE_CDC_T4_DLY_SEL;
647 writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
648
649 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
650 config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
651 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
652
653 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
654 config |= CORE_CDC_SWITCH_RC_EN;
655 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
656
657 config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
658 config &= ~CORE_START_CDC_TRAFFIC;
659 writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
660
661 /*
662 * Perform CDC Register Initialization Sequence
663 *
664 * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
665 * CORE_CSR_CDC_CTLR_CFG1 0x3011111
666 * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
667 * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
668 * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
669 * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
670 * CORE_CSR_CDC_DELAY_CFG 0x3AC
671 * CORE_CDC_OFFSET_CFG 0x0
672 * CORE_CDC_SLAVE_DDA_CFG 0x16334
673 */
674
675 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
676 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
677 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
678 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
679 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
680 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
681 writel_relaxed(0x3AC, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
682 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
683 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
684
685 /* CDC HW Calibration */
686
687 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
688 config |= CORE_SW_TRIG_FULL_CALIB;
689 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
690
691 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
692 config &= ~CORE_SW_TRIG_FULL_CALIB;
693 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
694
695 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
696 config |= CORE_HW_AUTOCAL_ENA;
697 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
698
699 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
700 config |= CORE_TIMER_ENA;
701 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
702
703 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
704 calib_done,
705 (calib_done & CORE_CALIBRATION_DONE),
706 1, 50);
707
708 if (ret == -ETIMEDOUT) {
709 pr_err("%s: %s: CDC calibration was not completed\n",
710 mmc_hostname(host->mmc), __func__);
711 goto out;
712 }
713
714 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
715 & CORE_CDC_ERROR_CODE_MASK;
716 if (ret) {
717 pr_err("%s: %s: CDC error code %d\n",
718 mmc_hostname(host->mmc), __func__, ret);
719 ret = -EINVAL;
720 goto out;
721 }
722
723 config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
724 config |= CORE_START_CDC_TRAFFIC;
725 writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
726out:
727 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
728 __func__, ret);
729 return ret;
730}
731
02e4293d
RH
732static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
733{
734 u32 dll_status, config;
735 int ret;
736
737 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
738
739 /*
740 * Currently the CORE_DDR_CONFIG register defaults to desired
741 * configuration on reset. Currently reprogramming the power on
742 * reset (POR) value in case it might have been modified by
743 * bootloaders. In the future, if this changes, then the desired
744 * values will need to be programmed appropriately.
745 */
746 writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + CORE_DDR_CONFIG);
747
748 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
749 config |= CORE_DDR_CAL_EN;
750 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
751
752 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
753 dll_status,
754 (dll_status & CORE_DDR_DLL_LOCK),
755 10, 1000);
756
757 if (ret == -ETIMEDOUT) {
758 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
759 mmc_hostname(host->mmc), __func__);
760 goto out;
761 }
762
763 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3);
764 config |= CORE_PWRSAVE_DLL;
765 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC3);
766
767 /*
768 * Drain writebuffer to ensure above DLL calibration
769 * and PWRSAVE DLL is enabled.
770 */
771 wmb();
772out:
773 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
774 __func__, ret);
775 return ret;
776}
777
778static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
779{
780 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
781 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
782 int ret;
783 u32 config;
784
785 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
786
787 /*
788 * Retuning in HS400 (DDR mode) will fail, just reset the
789 * tuning block and restore the saved tuning phase.
790 */
791 ret = msm_init_cm_dll(host);
792 if (ret)
793 goto out;
794
795 /* Set the selected phase in delay line hw block */
796 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
797 if (ret)
798 goto out;
799
800 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
801 config |= CORE_CMD_DAT_TRACK_SEL;
802 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
803 if (msm_host->use_cdclp533)
804 ret = sdhci_msm_cdclp533_calibration(host);
805 else
806 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
807out:
808 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
809 __func__, ret);
810 return ret;
811}
812
415b5a75
GD
813static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
814{
815 int tuning_seq_cnt = 3;
33d73935 816 u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
415b5a75
GD
817 int rc;
818 struct mmc_host *mmc = host->mmc;
819 struct mmc_ios ios = host->mmc->ios;
abf270e5
RH
820 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
821 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
415b5a75
GD
822
823 /*
824 * Tuning is required for SDR104, HS200 and HS400 cards and
825 * if clock frequency is greater than 100MHz in these modes.
826 */
ff06ce41
VG
827 if (host->clock <= CORE_FREQ_100MHZ ||
828 !(ios.timing == MMC_TIMING_MMC_HS400 ||
829 ios.timing == MMC_TIMING_MMC_HS200 ||
830 ios.timing == MMC_TIMING_UHS_SDR104))
415b5a75
GD
831 return 0;
832
415b5a75
GD
833retry:
834 /* First of all reset the tuning block */
835 rc = msm_init_cm_dll(host);
836 if (rc)
33d73935 837 return rc;
415b5a75
GD
838
839 phase = 0;
840 do {
415b5a75
GD
841 /* Set the phase in delay line hw block */
842 rc = msm_config_cm_dll_phase(host, phase);
843 if (rc)
33d73935 844 return rc;
415b5a75 845
abf270e5 846 msm_host->saved_tuning_phase = phase;
9979dbe5 847 rc = mmc_send_tuning(mmc, opcode, NULL);
33d73935 848 if (!rc) {
415b5a75
GD
849 /* Tuning is successful at this tuning point */
850 tuned_phases[tuned_phase_cnt++] = phase;
851 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
852 mmc_hostname(mmc), phase);
853 }
854 } while (++phase < ARRAY_SIZE(tuned_phases));
855
856 if (tuned_phase_cnt) {
857 rc = msm_find_most_appropriate_phase(host, tuned_phases,
858 tuned_phase_cnt);
859 if (rc < 0)
33d73935 860 return rc;
415b5a75
GD
861 else
862 phase = rc;
863
864 /*
865 * Finally set the selected phase in delay
866 * line hw block.
867 */
868 rc = msm_config_cm_dll_phase(host, phase);
869 if (rc)
33d73935 870 return rc;
415b5a75
GD
871 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
872 mmc_hostname(mmc), phase);
873 } else {
874 if (--tuning_seq_cnt)
875 goto retry;
876 /* Tuning failed */
877 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
878 mmc_hostname(mmc));
879 rc = -EIO;
880 }
881
ff06ce41
VG
882 if (!rc)
883 msm_host->tuning_done = true;
415b5a75
GD
884 return rc;
885}
886
db9bd163
RH
887/*
888 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
889 * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
890 * fixed feedback clock is used.
891 */
892static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
893{
894 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
895 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
896 int ret;
897
898 if (host->clock > CORE_FREQ_100MHZ &&
899 msm_host->tuning_done && !msm_host->calibration_done) {
900 ret = sdhci_msm_hs400_dll_calibration(host);
901 if (!ret)
902 msm_host->calibration_done = true;
903 else
904 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
905 mmc_hostname(host->mmc), ret);
906 }
907}
908
ee320674
RH
909static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
910 unsigned int uhs)
911{
912 struct mmc_host *mmc = host->mmc;
ff06ce41
VG
913 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
914 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
ee320674 915 u16 ctrl_2;
ff06ce41 916 u32 config;
ee320674
RH
917
918 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
919 /* Select Bus Speed Mode for host */
920 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
921 switch (uhs) {
922 case MMC_TIMING_UHS_SDR12:
923 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
924 break;
925 case MMC_TIMING_UHS_SDR25:
926 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
927 break;
928 case MMC_TIMING_UHS_SDR50:
929 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
930 break;
ff06ce41 931 case MMC_TIMING_MMC_HS400:
ee320674
RH
932 case MMC_TIMING_MMC_HS200:
933 case MMC_TIMING_UHS_SDR104:
934 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
935 break;
936 case MMC_TIMING_UHS_DDR50:
937 case MMC_TIMING_MMC_DDR52:
938 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
939 break;
940 }
941
942 /*
943 * When clock frequency is less than 100MHz, the feedback clock must be
944 * provided and DLL must not be used so that tuning can be skipped. To
945 * provide feedback clock, the mode selection can be any value less
946 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
947 */
ff06ce41
VG
948 if (host->clock <= CORE_FREQ_100MHZ) {
949 if (uhs == MMC_TIMING_MMC_HS400 ||
950 uhs == MMC_TIMING_MMC_HS200 ||
951 uhs == MMC_TIMING_UHS_SDR104)
952 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
953 /*
954 * DLL is not required for clock <= 100MHz
955 * Thus, make sure DLL it is disabled when not required
956 */
957 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
958 config |= CORE_DLL_RST;
959 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
960
961 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
962 config |= CORE_DLL_PDN;
963 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
964
965 /*
966 * The DLL needs to be restored and CDCLP533 recalibrated
967 * when the clock frequency is set back to 400MHz.
968 */
969 msm_host->calibration_done = false;
970 }
ee320674
RH
971
972 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
973 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
974 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
cc392c58
RH
975
976 spin_unlock_irq(&host->lock);
db9bd163
RH
977
978 if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
979 sdhci_msm_hs400(host, &mmc->ios);
980
cc392c58 981 spin_lock_irq(&host->lock);
ee320674
RH
982}
983
ad81d387
GD
984static void sdhci_msm_voltage_switch(struct sdhci_host *host)
985{
986 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
987 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
988 u32 irq_status, irq_ack = 0;
989
990 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
991 irq_status &= INT_MASK;
992
993 writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
994
995 if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
996 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
997 if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH))
998 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
999
1000 /*
1001 * The driver has to acknowledge the interrupt, switch voltages and
1002 * report back if it succeded or not to this register. The voltage
1003 * switches are handled by the sdhci core, so just report success.
1004 */
1005 writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
1006}
1007
1008static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1009{
1010 struct sdhci_host *host = (struct sdhci_host *)data;
1011
1012 sdhci_msm_voltage_switch(host);
1013
1014 return IRQ_HANDLED;
1015}
1016
80031bde
RH
1017static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
1018{
1019 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1020 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1021
1022 return clk_round_rate(msm_host->clk, ULONG_MAX);
1023}
1024
1025static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
1026{
1027 return SDHCI_MSM_MIN_CLOCK;
1028}
1029
edc609fd
RH
1030/**
1031 * __sdhci_msm_set_clock - sdhci_msm clock control.
1032 *
1033 * Description:
1034 * MSM controller does not use internal divider and
1035 * instead directly control the GCC clock as per
1036 * HW recommendation.
1037 **/
1038void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1039{
1040 u16 clk;
1041 /*
1042 * Keep actual_clock as zero -
1043 * - since there is no divider used so no need of having actual_clock.
1044 * - MSM controller uses SDCLK for data timeout calculation. If
1045 * actual_clock is zero, host->clock is taken for calculation.
1046 */
1047 host->mmc->actual_clock = 0;
1048
1049 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1050
1051 if (clock == 0)
1052 return;
1053
1054 /*
1055 * MSM controller do not use clock divider.
1056 * Thus read SDHCI_CLOCK_CONTROL and only enable
1057 * clock with no divider value programmed.
1058 */
1059 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1060 sdhci_enable_clk(host, clk);
1061}
1062
1063/* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
1064static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1065{
1066 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1067 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
edc609fd
RH
1068
1069 if (!clock) {
1070 msm_host->clk_rate = clock;
1071 goto out;
1072 }
1073
1074 spin_unlock_irq(&host->lock);
ff06ce41 1075
b54aaa8a 1076 sdhci_msm_hc_select_mode(host);
edc609fd 1077
0fb8a3d4 1078 msm_set_clock_rate_for_bus_mode(host, clock);
edc609fd 1079
edc609fd
RH
1080 spin_lock_irq(&host->lock);
1081out:
1082 __sdhci_msm_set_clock(host, clock);
1083}
1084
0eb0d9f4
GD
1085static const struct of_device_id sdhci_msm_dt_match[] = {
1086 { .compatible = "qcom,sdhci-msm-v4" },
1087 {},
1088};
1089
1090MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
1091
a50396a4 1092static const struct sdhci_ops sdhci_msm_ops = {
0eb0d9f4 1093 .platform_execute_tuning = sdhci_msm_execute_tuning,
ed1761d7 1094 .reset = sdhci_reset,
edc609fd 1095 .set_clock = sdhci_msm_set_clock,
80031bde
RH
1096 .get_min_clock = sdhci_msm_get_min_clock,
1097 .get_max_clock = sdhci_msm_get_max_clock,
ed1761d7 1098 .set_bus_width = sdhci_set_bus_width,
ee320674 1099 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
ad81d387 1100 .voltage_switch = sdhci_msm_voltage_switch,
0eb0d9f4
GD
1101};
1102
a50396a4
JZ
1103static const struct sdhci_pltfm_data sdhci_msm_pdata = {
1104 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
9718f84b 1105 SDHCI_QUIRK_NO_CARD_NO_RESET |
a0e31428
RH
1106 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1107 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1108 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
a50396a4
JZ
1109 .ops = &sdhci_msm_ops,
1110};
1111
0eb0d9f4
GD
1112static int sdhci_msm_probe(struct platform_device *pdev)
1113{
1114 struct sdhci_host *host;
1115 struct sdhci_pltfm_host *pltfm_host;
1116 struct sdhci_msm_host *msm_host;
1117 struct resource *core_memres;
1118 int ret;
3a3ad3e9 1119 u16 host_version, core_minor;
29301f40 1120 u32 core_version, config;
3a3ad3e9 1121 u8 core_major;
0eb0d9f4 1122
6f699531 1123 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
0eb0d9f4
GD
1124 if (IS_ERR(host))
1125 return PTR_ERR(host);
1126
1127 pltfm_host = sdhci_priv(host);
6f699531 1128 msm_host = sdhci_pltfm_priv(pltfm_host);
0eb0d9f4
GD
1129 msm_host->mmc = host->mmc;
1130 msm_host->pdev = pdev;
1131
1132 ret = mmc_of_parse(host->mmc);
1133 if (ret)
1134 goto pltfm_free;
1135
1136 sdhci_get_of_property(pdev);
1137
abf270e5
RH
1138 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
1139
0eb0d9f4
GD
1140 /* Setup SDCC bus voter clock. */
1141 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
1142 if (!IS_ERR(msm_host->bus_clk)) {
1143 /* Vote for max. clk rate for max. performance */
1144 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
1145 if (ret)
1146 goto pltfm_free;
1147 ret = clk_prepare_enable(msm_host->bus_clk);
1148 if (ret)
1149 goto pltfm_free;
1150 }
1151
1152 /* Setup main peripheral bus clock */
1153 msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
1154 if (IS_ERR(msm_host->pclk)) {
1155 ret = PTR_ERR(msm_host->pclk);
2801b95e 1156 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
0eb0d9f4
GD
1157 goto bus_clk_disable;
1158 }
1159
1160 ret = clk_prepare_enable(msm_host->pclk);
1161 if (ret)
1162 goto bus_clk_disable;
1163
1164 /* Setup SDC MMC clock */
1165 msm_host->clk = devm_clk_get(&pdev->dev, "core");
1166 if (IS_ERR(msm_host->clk)) {
1167 ret = PTR_ERR(msm_host->clk);
1168 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
1169 goto pclk_disable;
1170 }
1171
83736352
VG
1172 /*
1173 * xo clock is needed for FLL feature of cm_dll.
1174 * In case if xo clock is not mentioned in DT, warn and proceed.
1175 */
1176 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
1177 if (IS_ERR(msm_host->xo_clk)) {
1178 ret = PTR_ERR(msm_host->xo_clk);
1179 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
1180 }
1181
951b8c87
II
1182 /* Vote for maximum clock rate for maximum performance */
1183 ret = clk_set_rate(msm_host->clk, INT_MAX);
1184 if (ret)
1185 dev_warn(&pdev->dev, "core clock boost failed\n");
1186
0eb0d9f4
GD
1187 ret = clk_prepare_enable(msm_host->clk);
1188 if (ret)
1189 goto pclk_disable;
1190
1191 core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1192 msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
1193
1194 if (IS_ERR(msm_host->core_mem)) {
1195 dev_err(&pdev->dev, "Failed to remap registers\n");
1196 ret = PTR_ERR(msm_host->core_mem);
1197 goto clk_disable;
1198 }
1199
29301f40
RH
1200 config = readl_relaxed(msm_host->core_mem + CORE_POWER);
1201 config |= CORE_SW_RST;
1202 writel_relaxed(config, msm_host->core_mem + CORE_POWER);
0eb0d9f4
GD
1203
1204 /* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */
1205 usleep_range(1000, 5000);
1206 if (readl(msm_host->core_mem + CORE_POWER) & CORE_SW_RST) {
1207 dev_err(&pdev->dev, "Stuck in reset\n");
1208 ret = -ETIMEDOUT;
1209 goto clk_disable;
1210 }
1211
1212 /* Set HC_MODE_EN bit in HC_MODE register */
1213 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
1214
ff06ce41
VG
1215 config = readl_relaxed(msm_host->core_mem + CORE_HC_MODE);
1216 config |= FF_CLK_SW_RST_DIS;
1217 writel_relaxed(config, msm_host->core_mem + CORE_HC_MODE);
1218
0eb0d9f4
GD
1219 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
1220 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
1221 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
1222 SDHCI_VENDOR_VER_SHIFT));
1223
3a3ad3e9
GD
1224 core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
1225 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
1226 CORE_VERSION_MAJOR_SHIFT;
1227 core_minor = core_version & CORE_VERSION_MINOR_MASK;
1228 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
1229 core_version, core_major, core_minor);
1230
83736352
VG
1231 if (core_major == 1 && core_minor >= 0x42)
1232 msm_host->use_14lpp_dll_reset = true;
1233
02e4293d
RH
1234 /*
1235 * SDCC 5 controller with major version 1, minor version 0x34 and later
1236 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
1237 */
1238 if (core_major == 1 && core_minor < 0x34)
1239 msm_host->use_cdclp533 = true;
1240
3a3ad3e9
GD
1241 /*
1242 * Support for some capabilities is not advertised by newer
1243 * controller versions and must be explicitly enabled.
1244 */
1245 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
29301f40
RH
1246 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
1247 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
1248 writel_relaxed(config, host->ioaddr +
3a3ad3e9
GD
1249 CORE_VENDOR_SPEC_CAPABILITIES0);
1250 }
1251
ad81d387
GD
1252 /* Setup IRQ for handling power/voltage tasks with PMIC */
1253 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
1254 if (msm_host->pwr_irq < 0) {
1255 dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
1256 msm_host->pwr_irq);
d1f63f0c 1257 ret = msm_host->pwr_irq;
ad81d387
GD
1258 goto clk_disable;
1259 }
1260
1261 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
1262 sdhci_msm_pwr_irq, IRQF_ONESHOT,
1263 dev_name(&pdev->dev), host);
1264 if (ret) {
1265 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
1266 goto clk_disable;
1267 }
1268
67e6db11
PG
1269 pm_runtime_get_noresume(&pdev->dev);
1270 pm_runtime_set_active(&pdev->dev);
1271 pm_runtime_enable(&pdev->dev);
1272 pm_runtime_set_autosuspend_delay(&pdev->dev,
1273 MSM_MMC_AUTOSUSPEND_DELAY_MS);
1274 pm_runtime_use_autosuspend(&pdev->dev);
1275
0eb0d9f4
GD
1276 ret = sdhci_add_host(host);
1277 if (ret)
67e6db11
PG
1278 goto pm_runtime_disable;
1279
1280 pm_runtime_mark_last_busy(&pdev->dev);
1281 pm_runtime_put_autosuspend(&pdev->dev);
0eb0d9f4
GD
1282
1283 return 0;
1284
67e6db11
PG
1285pm_runtime_disable:
1286 pm_runtime_disable(&pdev->dev);
1287 pm_runtime_set_suspended(&pdev->dev);
1288 pm_runtime_put_noidle(&pdev->dev);
0eb0d9f4
GD
1289clk_disable:
1290 clk_disable_unprepare(msm_host->clk);
1291pclk_disable:
1292 clk_disable_unprepare(msm_host->pclk);
1293bus_clk_disable:
1294 if (!IS_ERR(msm_host->bus_clk))
1295 clk_disable_unprepare(msm_host->bus_clk);
1296pltfm_free:
1297 sdhci_pltfm_free(pdev);
1298 return ret;
1299}
1300
1301static int sdhci_msm_remove(struct platform_device *pdev)
1302{
1303 struct sdhci_host *host = platform_get_drvdata(pdev);
1304 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
6f699531 1305 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
0eb0d9f4
GD
1306 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
1307 0xffffffff);
1308
1309 sdhci_remove_host(host, dead);
67e6db11
PG
1310
1311 pm_runtime_get_sync(&pdev->dev);
1312 pm_runtime_disable(&pdev->dev);
1313 pm_runtime_put_noidle(&pdev->dev);
1314
0eb0d9f4
GD
1315 clk_disable_unprepare(msm_host->clk);
1316 clk_disable_unprepare(msm_host->pclk);
1317 if (!IS_ERR(msm_host->bus_clk))
1318 clk_disable_unprepare(msm_host->bus_clk);
6f699531 1319 sdhci_pltfm_free(pdev);
0eb0d9f4
GD
1320 return 0;
1321}
1322
67e6db11
PG
1323#ifdef CONFIG_PM
1324static int sdhci_msm_runtime_suspend(struct device *dev)
1325{
1326 struct sdhci_host *host = dev_get_drvdata(dev);
1327 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1328 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1329
1330 clk_disable_unprepare(msm_host->clk);
1331 clk_disable_unprepare(msm_host->pclk);
1332
1333 return 0;
1334}
1335
1336static int sdhci_msm_runtime_resume(struct device *dev)
1337{
1338 struct sdhci_host *host = dev_get_drvdata(dev);
1339 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1340 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1341 int ret;
1342
1343 ret = clk_prepare_enable(msm_host->clk);
1344 if (ret) {
1345 dev_err(dev, "clk_enable failed for core_clk: %d\n", ret);
1346 return ret;
1347 }
1348 ret = clk_prepare_enable(msm_host->pclk);
1349 if (ret) {
1350 dev_err(dev, "clk_enable failed for iface_clk: %d\n", ret);
1351 clk_disable_unprepare(msm_host->clk);
1352 return ret;
1353 }
1354
1355 return 0;
1356}
1357#endif
1358
1359static const struct dev_pm_ops sdhci_msm_pm_ops = {
1360 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1361 pm_runtime_force_resume)
1362 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
1363 sdhci_msm_runtime_resume,
1364 NULL)
1365};
1366
0eb0d9f4
GD
1367static struct platform_driver sdhci_msm_driver = {
1368 .probe = sdhci_msm_probe,
1369 .remove = sdhci_msm_remove,
1370 .driver = {
1371 .name = "sdhci_msm",
0eb0d9f4 1372 .of_match_table = sdhci_msm_dt_match,
67e6db11 1373 .pm = &sdhci_msm_pm_ops,
0eb0d9f4
GD
1374 },
1375};
1376
1377module_platform_driver(sdhci_msm_driver);
1378
1379MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
1380MODULE_LICENSE("GPL v2");