]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/mmc/host/sdhci-msm.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / mmc / host / sdhci-msm.c
1 /*
2 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
3 *
4 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/delay.h>
20 #include <linux/mmc/mmc.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/slab.h>
23 #include <linux/iopoll.h>
24
25 #include "sdhci-pltfm.h"
26
27 #define CORE_MCI_VERSION 0x50
28 #define CORE_VERSION_MAJOR_SHIFT 28
29 #define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
30 #define CORE_VERSION_MINOR_MASK 0xff
31
32 #define CORE_MCI_GENERICS 0x70
33 #define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
34
35 #define CORE_HC_MODE 0x78
36 #define HC_MODE_EN 0x1
37 #define CORE_POWER 0x0
38 #define CORE_SW_RST BIT(7)
39 #define FF_CLK_SW_RST_DIS BIT(13)
40
41 #define CORE_PWRCTL_STATUS 0xdc
42 #define CORE_PWRCTL_MASK 0xe0
43 #define CORE_PWRCTL_CLEAR 0xe4
44 #define CORE_PWRCTL_CTL 0xe8
45 #define CORE_PWRCTL_BUS_OFF BIT(0)
46 #define CORE_PWRCTL_BUS_ON BIT(1)
47 #define CORE_PWRCTL_IO_LOW BIT(2)
48 #define CORE_PWRCTL_IO_HIGH BIT(3)
49 #define CORE_PWRCTL_BUS_SUCCESS BIT(0)
50 #define CORE_PWRCTL_IO_SUCCESS BIT(2)
51 #define REQ_BUS_OFF BIT(0)
52 #define REQ_BUS_ON BIT(1)
53 #define REQ_IO_LOW BIT(2)
54 #define REQ_IO_HIGH BIT(3)
55 #define INT_MASK 0xf
56 #define MAX_PHASES 16
57 #define CORE_DLL_LOCK BIT(7)
58 #define CORE_DDR_DLL_LOCK BIT(11)
59 #define CORE_DLL_EN BIT(16)
60 #define CORE_CDR_EN BIT(17)
61 #define CORE_CK_OUT_EN BIT(18)
62 #define CORE_CDR_EXT_EN BIT(19)
63 #define CORE_DLL_PDN BIT(29)
64 #define CORE_DLL_RST BIT(30)
65 #define CORE_DLL_CONFIG 0x100
66 #define CORE_CMD_DAT_TRACK_SEL BIT(0)
67 #define CORE_DLL_STATUS 0x108
68
69 #define CORE_DLL_CONFIG_2 0x1b4
70 #define CORE_DDR_CAL_EN BIT(0)
71 #define CORE_FLL_CYCLE_CNT BIT(18)
72 #define CORE_DLL_CLOCK_DISABLE BIT(21)
73
74 #define CORE_VENDOR_SPEC 0x10c
75 #define CORE_VENDOR_SPEC_POR_VAL 0xa1c
76 #define CORE_CLK_PWRSAVE BIT(1)
77 #define CORE_HC_MCLK_SEL_DFLT (2 << 8)
78 #define CORE_HC_MCLK_SEL_HS400 (3 << 8)
79 #define CORE_HC_MCLK_SEL_MASK (3 << 8)
80 #define CORE_HC_SELECT_IN_EN BIT(18)
81 #define CORE_HC_SELECT_IN_HS400 (6 << 19)
82 #define CORE_HC_SELECT_IN_MASK (7 << 19)
83
84 #define CORE_CSR_CDC_CTLR_CFG0 0x130
85 #define CORE_SW_TRIG_FULL_CALIB BIT(16)
86 #define CORE_HW_AUTOCAL_ENA BIT(17)
87
88 #define CORE_CSR_CDC_CTLR_CFG1 0x134
89 #define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
90 #define CORE_TIMER_ENA BIT(16)
91
92 #define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
93 #define CORE_CSR_CDC_REFCOUNT_CFG 0x140
94 #define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
95 #define CORE_CDC_OFFSET_CFG 0x14C
96 #define CORE_CSR_CDC_DELAY_CFG 0x150
97 #define CORE_CDC_SLAVE_DDA_CFG 0x160
98 #define CORE_CSR_CDC_STATUS0 0x164
99 #define CORE_CALIBRATION_DONE BIT(0)
100
101 #define CORE_CDC_ERROR_CODE_MASK 0x7000000
102
103 #define CORE_CSR_CDC_GEN_CFG 0x178
104 #define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
105 #define CORE_CDC_SWITCH_RC_EN BIT(1)
106
107 #define CORE_DDR_200_CFG 0x184
108 #define CORE_CDC_T4_DLY_SEL BIT(0)
109 #define CORE_CMDIN_RCLK_EN BIT(1)
110 #define CORE_START_CDC_TRAFFIC BIT(6)
111 #define CORE_VENDOR_SPEC3 0x1b0
112 #define CORE_PWRSAVE_DLL BIT(3)
113
114 #define CORE_DDR_CONFIG 0x1b8
115 #define DDR_CONFIG_POR_VAL 0x80040853
116
117 #define CORE_VENDOR_SPEC_CAPABILITIES0 0x11c
118
119 #define INVALID_TUNING_PHASE -1
120 #define SDHCI_MSM_MIN_CLOCK 400000
121 #define CORE_FREQ_100MHZ (100 * 1000 * 1000)
122
123 #define CDR_SELEXT_SHIFT 20
124 #define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
125 #define CMUX_SHIFT_PHASE_SHIFT 24
126 #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
127
128 #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
129
130 /* Timeout value to avoid infinite waiting for pwr_irq */
131 #define MSM_PWR_IRQ_TIMEOUT_MS 5000
132
133 struct sdhci_msm_host {
134 struct platform_device *pdev;
135 void __iomem *core_mem; /* MSM SDCC mapped address */
136 int pwr_irq; /* power irq */
137 struct clk *bus_clk; /* SDHC bus voter clock */
138 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
139 struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
140 unsigned long clk_rate;
141 struct mmc_host *mmc;
142 bool use_14lpp_dll_reset;
143 bool tuning_done;
144 bool calibration_done;
145 u8 saved_tuning_phase;
146 bool use_cdclp533;
147 bool use_cdr;
148 u32 transfer_mode;
149 u32 curr_pwr_state;
150 u32 curr_io_level;
151 wait_queue_head_t pwr_irq_wait;
152 bool pwr_irq_flag;
153 };
154
155 static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
156 unsigned int clock)
157 {
158 struct mmc_ios ios = host->mmc->ios;
159 /*
160 * The SDHC requires internal clock frequency to be double the
161 * actual clock that will be set for DDR mode. The controller
162 * uses the faster clock(100/400MHz) for some of its parts and
163 * send the actual required clock (50/200MHz) to the card.
164 */
165 if (ios.timing == MMC_TIMING_UHS_DDR50 ||
166 ios.timing == MMC_TIMING_MMC_DDR52 ||
167 ios.timing == MMC_TIMING_MMC_HS400 ||
168 host->flags & SDHCI_HS400_TUNING)
169 clock *= 2;
170 return clock;
171 }
172
173 static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
174 unsigned int clock)
175 {
176 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
177 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
178 struct mmc_ios curr_ios = host->mmc->ios;
179 struct clk *core_clk = msm_host->bulk_clks[0].clk;
180 int rc;
181
182 clock = msm_get_clock_rate_for_bus_mode(host, clock);
183 rc = clk_set_rate(core_clk, clock);
184 if (rc) {
185 pr_err("%s: Failed to set clock at rate %u at timing %d\n",
186 mmc_hostname(host->mmc), clock,
187 curr_ios.timing);
188 return;
189 }
190 msm_host->clk_rate = clock;
191 pr_debug("%s: Setting clock at rate %lu at timing %d\n",
192 mmc_hostname(host->mmc), clk_get_rate(core_clk),
193 curr_ios.timing);
194 }
195
196 /* Platform specific tuning */
197 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
198 {
199 u32 wait_cnt = 50;
200 u8 ck_out_en;
201 struct mmc_host *mmc = host->mmc;
202
203 /* Poll for CK_OUT_EN bit. max. poll time = 50us */
204 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
205 CORE_CK_OUT_EN);
206
207 while (ck_out_en != poll) {
208 if (--wait_cnt == 0) {
209 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
210 mmc_hostname(mmc), poll);
211 return -ETIMEDOUT;
212 }
213 udelay(1);
214
215 ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
216 CORE_CK_OUT_EN);
217 }
218
219 return 0;
220 }
221
222 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
223 {
224 int rc;
225 static const u8 grey_coded_phase_table[] = {
226 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
227 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
228 };
229 unsigned long flags;
230 u32 config;
231 struct mmc_host *mmc = host->mmc;
232
233 if (phase > 0xf)
234 return -EINVAL;
235
236 spin_lock_irqsave(&host->lock, flags);
237
238 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
239 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
240 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
241 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
242
243 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
244 rc = msm_dll_poll_ck_out_en(host, 0);
245 if (rc)
246 goto err_out;
247
248 /*
249 * Write the selected DLL clock output phase (0 ... 15)
250 * to CDR_SELEXT bit field of DLL_CONFIG register.
251 */
252 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
253 config &= ~CDR_SELEXT_MASK;
254 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
255 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
256
257 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
258 config |= CORE_CK_OUT_EN;
259 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
260
261 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
262 rc = msm_dll_poll_ck_out_en(host, 1);
263 if (rc)
264 goto err_out;
265
266 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
267 config |= CORE_CDR_EN;
268 config &= ~CORE_CDR_EXT_EN;
269 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
270 goto out;
271
272 err_out:
273 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
274 mmc_hostname(mmc), phase);
275 out:
276 spin_unlock_irqrestore(&host->lock, flags);
277 return rc;
278 }
279
280 /*
281 * Find out the greatest range of consecuitive selected
282 * DLL clock output phases that can be used as sampling
283 * setting for SD3.0 UHS-I card read operation (in SDR104
284 * timing mode) or for eMMC4.5 card read operation (in
285 * HS400/HS200 timing mode).
286 * Select the 3/4 of the range and configure the DLL with the
287 * selected DLL clock output phase.
288 */
289
290 static int msm_find_most_appropriate_phase(struct sdhci_host *host,
291 u8 *phase_table, u8 total_phases)
292 {
293 int ret;
294 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
295 u8 phases_per_row[MAX_PHASES] = { 0 };
296 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
297 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
298 bool phase_0_found = false, phase_15_found = false;
299 struct mmc_host *mmc = host->mmc;
300
301 if (!total_phases || (total_phases > MAX_PHASES)) {
302 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
303 mmc_hostname(mmc), total_phases);
304 return -EINVAL;
305 }
306
307 for (cnt = 0; cnt < total_phases; cnt++) {
308 ranges[row_index][col_index] = phase_table[cnt];
309 phases_per_row[row_index] += 1;
310 col_index++;
311
312 if ((cnt + 1) == total_phases) {
313 continue;
314 /* check if next phase in phase_table is consecutive or not */
315 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
316 row_index++;
317 col_index = 0;
318 }
319 }
320
321 if (row_index >= MAX_PHASES)
322 return -EINVAL;
323
324 /* Check if phase-0 is present in first valid window? */
325 if (!ranges[0][0]) {
326 phase_0_found = true;
327 phase_0_raw_index = 0;
328 /* Check if cycle exist between 2 valid windows */
329 for (cnt = 1; cnt <= row_index; cnt++) {
330 if (phases_per_row[cnt]) {
331 for (i = 0; i < phases_per_row[cnt]; i++) {
332 if (ranges[cnt][i] == 15) {
333 phase_15_found = true;
334 phase_15_raw_index = cnt;
335 break;
336 }
337 }
338 }
339 }
340 }
341
342 /* If 2 valid windows form cycle then merge them as single window */
343 if (phase_0_found && phase_15_found) {
344 /* number of phases in raw where phase 0 is present */
345 u8 phases_0 = phases_per_row[phase_0_raw_index];
346 /* number of phases in raw where phase 15 is present */
347 u8 phases_15 = phases_per_row[phase_15_raw_index];
348
349 if (phases_0 + phases_15 >= MAX_PHASES)
350 /*
351 * If there are more than 1 phase windows then total
352 * number of phases in both the windows should not be
353 * more than or equal to MAX_PHASES.
354 */
355 return -EINVAL;
356
357 /* Merge 2 cyclic windows */
358 i = phases_15;
359 for (cnt = 0; cnt < phases_0; cnt++) {
360 ranges[phase_15_raw_index][i] =
361 ranges[phase_0_raw_index][cnt];
362 if (++i >= MAX_PHASES)
363 break;
364 }
365
366 phases_per_row[phase_0_raw_index] = 0;
367 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
368 }
369
370 for (cnt = 0; cnt <= row_index; cnt++) {
371 if (phases_per_row[cnt] > curr_max) {
372 curr_max = phases_per_row[cnt];
373 selected_row_index = cnt;
374 }
375 }
376
377 i = (curr_max * 3) / 4;
378 if (i)
379 i--;
380
381 ret = ranges[selected_row_index][i];
382
383 if (ret >= MAX_PHASES) {
384 ret = -EINVAL;
385 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
386 mmc_hostname(mmc), ret);
387 }
388
389 return ret;
390 }
391
392 static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
393 {
394 u32 mclk_freq = 0, config;
395
396 /* Program the MCLK value to MCLK_FREQ bit field */
397 if (host->clock <= 112000000)
398 mclk_freq = 0;
399 else if (host->clock <= 125000000)
400 mclk_freq = 1;
401 else if (host->clock <= 137000000)
402 mclk_freq = 2;
403 else if (host->clock <= 150000000)
404 mclk_freq = 3;
405 else if (host->clock <= 162000000)
406 mclk_freq = 4;
407 else if (host->clock <= 175000000)
408 mclk_freq = 5;
409 else if (host->clock <= 187000000)
410 mclk_freq = 6;
411 else if (host->clock <= 200000000)
412 mclk_freq = 7;
413
414 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
415 config &= ~CMUX_SHIFT_PHASE_MASK;
416 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
417 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
418 }
419
420 /* Initialize the DLL (Programmable Delay Line) */
421 static int msm_init_cm_dll(struct sdhci_host *host)
422 {
423 struct mmc_host *mmc = host->mmc;
424 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
425 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
426 int wait_cnt = 50;
427 unsigned long flags, xo_clk = 0;
428 u32 config;
429
430 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
431 xo_clk = clk_get_rate(msm_host->xo_clk);
432
433 spin_lock_irqsave(&host->lock, flags);
434
435 /*
436 * Make sure that clock is always enabled when DLL
437 * tuning is in progress. Keeping PWRSAVE ON may
438 * turn off the clock.
439 */
440 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
441 config &= ~CORE_CLK_PWRSAVE;
442 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
443
444 if (msm_host->use_14lpp_dll_reset) {
445 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
446 config &= ~CORE_CK_OUT_EN;
447 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
448
449 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
450 config |= CORE_DLL_CLOCK_DISABLE;
451 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
452 }
453
454 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
455 config |= CORE_DLL_RST;
456 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
457
458 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
459 config |= CORE_DLL_PDN;
460 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
461 msm_cm_dll_set_freq(host);
462
463 if (msm_host->use_14lpp_dll_reset &&
464 !IS_ERR_OR_NULL(msm_host->xo_clk)) {
465 u32 mclk_freq = 0;
466
467 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
468 config &= CORE_FLL_CYCLE_CNT;
469 if (config)
470 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
471 xo_clk);
472 else
473 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
474 xo_clk);
475
476 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
477 config &= ~(0xFF << 10);
478 config |= mclk_freq << 10;
479
480 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
481 /* wait for 5us before enabling DLL clock */
482 udelay(5);
483 }
484
485 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
486 config &= ~CORE_DLL_RST;
487 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
488
489 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
490 config &= ~CORE_DLL_PDN;
491 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
492
493 if (msm_host->use_14lpp_dll_reset) {
494 msm_cm_dll_set_freq(host);
495 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
496 config &= ~CORE_DLL_CLOCK_DISABLE;
497 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
498 }
499
500 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
501 config |= CORE_DLL_EN;
502 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
503
504 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
505 config |= CORE_CK_OUT_EN;
506 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
507
508 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
509 while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
510 CORE_DLL_LOCK)) {
511 /* max. wait for 50us sec for LOCK bit to be set */
512 if (--wait_cnt == 0) {
513 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
514 mmc_hostname(mmc));
515 spin_unlock_irqrestore(&host->lock, flags);
516 return -ETIMEDOUT;
517 }
518 udelay(1);
519 }
520
521 spin_unlock_irqrestore(&host->lock, flags);
522 return 0;
523 }
524
525 static void msm_hc_select_default(struct sdhci_host *host)
526 {
527 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
528 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
529 u32 config;
530
531 if (!msm_host->use_cdclp533) {
532 config = readl_relaxed(host->ioaddr +
533 CORE_VENDOR_SPEC3);
534 config &= ~CORE_PWRSAVE_DLL;
535 writel_relaxed(config, host->ioaddr +
536 CORE_VENDOR_SPEC3);
537 }
538
539 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
540 config &= ~CORE_HC_MCLK_SEL_MASK;
541 config |= CORE_HC_MCLK_SEL_DFLT;
542 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
543
544 /*
545 * Disable HC_SELECT_IN to be able to use the UHS mode select
546 * configuration from Host Control2 register for all other
547 * modes.
548 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
549 * in VENDOR_SPEC_FUNC
550 */
551 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
552 config &= ~CORE_HC_SELECT_IN_EN;
553 config &= ~CORE_HC_SELECT_IN_MASK;
554 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
555
556 /*
557 * Make sure above writes impacting free running MCLK are completed
558 * before changing the clk_rate at GCC.
559 */
560 wmb();
561 }
562
563 static void msm_hc_select_hs400(struct sdhci_host *host)
564 {
565 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
566 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
567 struct mmc_ios ios = host->mmc->ios;
568 u32 config, dll_lock;
569 int rc;
570
571 /* Select the divided clock (free running MCLK/2) */
572 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
573 config &= ~CORE_HC_MCLK_SEL_MASK;
574 config |= CORE_HC_MCLK_SEL_HS400;
575
576 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
577 /*
578 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
579 * register
580 */
581 if ((msm_host->tuning_done || ios.enhanced_strobe) &&
582 !msm_host->calibration_done) {
583 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
584 config |= CORE_HC_SELECT_IN_HS400;
585 config |= CORE_HC_SELECT_IN_EN;
586 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);
587 }
588 if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
589 /*
590 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
591 * CORE_DLL_STATUS to be set. This should get set
592 * within 15 us at 200 MHz.
593 */
594 rc = readl_relaxed_poll_timeout(host->ioaddr +
595 CORE_DLL_STATUS,
596 dll_lock,
597 (dll_lock &
598 (CORE_DLL_LOCK |
599 CORE_DDR_DLL_LOCK)), 10,
600 1000);
601 if (rc == -ETIMEDOUT)
602 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
603 mmc_hostname(host->mmc), dll_lock);
604 }
605 /*
606 * Make sure above writes impacting free running MCLK are completed
607 * before changing the clk_rate at GCC.
608 */
609 wmb();
610 }
611
612 /*
613 * sdhci_msm_hc_select_mode :- In general all timing modes are
614 * controlled via UHS mode select in Host Control2 register.
615 * eMMC specific HS200/HS400 doesn't have their respective modes
616 * defined here, hence we use these values.
617 *
618 * HS200 - SDR104 (Since they both are equivalent in functionality)
619 * HS400 - This involves multiple configurations
620 * Initially SDR104 - when tuning is required as HS200
621 * Then when switching to DDR @ 400MHz (HS400) we use
622 * the vendor specific HC_SELECT_IN to control the mode.
623 *
624 * In addition to controlling the modes we also need to select the
625 * correct input clock for DLL depending on the mode.
626 *
627 * HS400 - divided clock (free running MCLK/2)
628 * All other modes - default (free running MCLK)
629 */
630 static void sdhci_msm_hc_select_mode(struct sdhci_host *host)
631 {
632 struct mmc_ios ios = host->mmc->ios;
633
634 if (ios.timing == MMC_TIMING_MMC_HS400 ||
635 host->flags & SDHCI_HS400_TUNING)
636 msm_hc_select_hs400(host);
637 else
638 msm_hc_select_default(host);
639 }
640
641 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
642 {
643 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
644 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
645 u32 config, calib_done;
646 int ret;
647
648 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
649
650 /*
651 * Retuning in HS400 (DDR mode) will fail, just reset the
652 * tuning block and restore the saved tuning phase.
653 */
654 ret = msm_init_cm_dll(host);
655 if (ret)
656 goto out;
657
658 /* Set the selected phase in delay line hw block */
659 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
660 if (ret)
661 goto out;
662
663 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
664 config |= CORE_CMD_DAT_TRACK_SEL;
665 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
666
667 config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
668 config &= ~CORE_CDC_T4_DLY_SEL;
669 writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
670
671 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
672 config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
673 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
674
675 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
676 config |= CORE_CDC_SWITCH_RC_EN;
677 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
678
679 config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
680 config &= ~CORE_START_CDC_TRAFFIC;
681 writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
682
683 /* Perform CDC Register Initialization Sequence */
684
685 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
686 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
687 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
688 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
689 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
690 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
691 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
692 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
693 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
694
695 /* CDC HW Calibration */
696
697 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
698 config |= CORE_SW_TRIG_FULL_CALIB;
699 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
700
701 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
702 config &= ~CORE_SW_TRIG_FULL_CALIB;
703 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
704
705 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
706 config |= CORE_HW_AUTOCAL_ENA;
707 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
708
709 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
710 config |= CORE_TIMER_ENA;
711 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
712
713 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
714 calib_done,
715 (calib_done & CORE_CALIBRATION_DONE),
716 1, 50);
717
718 if (ret == -ETIMEDOUT) {
719 pr_err("%s: %s: CDC calibration was not completed\n",
720 mmc_hostname(host->mmc), __func__);
721 goto out;
722 }
723
724 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
725 & CORE_CDC_ERROR_CODE_MASK;
726 if (ret) {
727 pr_err("%s: %s: CDC error code %d\n",
728 mmc_hostname(host->mmc), __func__, ret);
729 ret = -EINVAL;
730 goto out;
731 }
732
733 config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
734 config |= CORE_START_CDC_TRAFFIC;
735 writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
736 out:
737 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
738 __func__, ret);
739 return ret;
740 }
741
742 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
743 {
744 struct mmc_host *mmc = host->mmc;
745 u32 dll_status, config;
746 int ret;
747
748 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
749
750 /*
751 * Currently the CORE_DDR_CONFIG register defaults to desired
752 * configuration on reset. Currently reprogramming the power on
753 * reset (POR) value in case it might have been modified by
754 * bootloaders. In the future, if this changes, then the desired
755 * values will need to be programmed appropriately.
756 */
757 writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + CORE_DDR_CONFIG);
758
759 if (mmc->ios.enhanced_strobe) {
760 config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
761 config |= CORE_CMDIN_RCLK_EN;
762 writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
763 }
764
765 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
766 config |= CORE_DDR_CAL_EN;
767 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
768
769 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
770 dll_status,
771 (dll_status & CORE_DDR_DLL_LOCK),
772 10, 1000);
773
774 if (ret == -ETIMEDOUT) {
775 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
776 mmc_hostname(host->mmc), __func__);
777 goto out;
778 }
779
780 config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3);
781 config |= CORE_PWRSAVE_DLL;
782 writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC3);
783
784 /*
785 * Drain writebuffer to ensure above DLL calibration
786 * and PWRSAVE DLL is enabled.
787 */
788 wmb();
789 out:
790 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
791 __func__, ret);
792 return ret;
793 }
794
795 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
796 {
797 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
798 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
799 struct mmc_host *mmc = host->mmc;
800 int ret;
801 u32 config;
802
803 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
804
805 /*
806 * Retuning in HS400 (DDR mode) will fail, just reset the
807 * tuning block and restore the saved tuning phase.
808 */
809 ret = msm_init_cm_dll(host);
810 if (ret)
811 goto out;
812
813 if (!mmc->ios.enhanced_strobe) {
814 /* Set the selected phase in delay line hw block */
815 ret = msm_config_cm_dll_phase(host,
816 msm_host->saved_tuning_phase);
817 if (ret)
818 goto out;
819 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
820 config |= CORE_CMD_DAT_TRACK_SEL;
821 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
822 }
823
824 if (msm_host->use_cdclp533)
825 ret = sdhci_msm_cdclp533_calibration(host);
826 else
827 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
828 out:
829 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
830 __func__, ret);
831 return ret;
832 }
833
834 static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
835 {
836 u32 config, oldconfig = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
837
838 config = oldconfig;
839 if (enable) {
840 config |= CORE_CDR_EN;
841 config &= ~CORE_CDR_EXT_EN;
842 } else {
843 config &= ~CORE_CDR_EN;
844 config |= CORE_CDR_EXT_EN;
845 }
846
847 if (config != oldconfig)
848 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
849 }
850
851 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
852 {
853 struct sdhci_host *host = mmc_priv(mmc);
854 int tuning_seq_cnt = 3;
855 u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
856 int rc;
857 struct mmc_ios ios = host->mmc->ios;
858 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
859 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
860
861 /*
862 * Tuning is required for SDR104, HS200 and HS400 cards and
863 * if clock frequency is greater than 100MHz in these modes.
864 */
865 if (host->clock <= CORE_FREQ_100MHZ ||
866 !(ios.timing == MMC_TIMING_MMC_HS400 ||
867 ios.timing == MMC_TIMING_MMC_HS200 ||
868 ios.timing == MMC_TIMING_UHS_SDR104)) {
869 msm_host->use_cdr = false;
870 sdhci_msm_set_cdr(host, false);
871 return 0;
872 }
873
874 /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
875 msm_host->use_cdr = true;
876
877 /*
878 * For HS400 tuning in HS200 timing requires:
879 * - select MCLK/2 in VENDOR_SPEC
880 * - program MCLK to 400MHz (or nearest supported) in GCC
881 */
882 if (host->flags & SDHCI_HS400_TUNING) {
883 sdhci_msm_hc_select_mode(host);
884 msm_set_clock_rate_for_bus_mode(host, ios.clock);
885 host->flags &= ~SDHCI_HS400_TUNING;
886 }
887
888 retry:
889 /* First of all reset the tuning block */
890 rc = msm_init_cm_dll(host);
891 if (rc)
892 return rc;
893
894 phase = 0;
895 do {
896 /* Set the phase in delay line hw block */
897 rc = msm_config_cm_dll_phase(host, phase);
898 if (rc)
899 return rc;
900
901 msm_host->saved_tuning_phase = phase;
902 rc = mmc_send_tuning(mmc, opcode, NULL);
903 if (!rc) {
904 /* Tuning is successful at this tuning point */
905 tuned_phases[tuned_phase_cnt++] = phase;
906 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
907 mmc_hostname(mmc), phase);
908 }
909 } while (++phase < ARRAY_SIZE(tuned_phases));
910
911 if (tuned_phase_cnt) {
912 rc = msm_find_most_appropriate_phase(host, tuned_phases,
913 tuned_phase_cnt);
914 if (rc < 0)
915 return rc;
916 else
917 phase = rc;
918
919 /*
920 * Finally set the selected phase in delay
921 * line hw block.
922 */
923 rc = msm_config_cm_dll_phase(host, phase);
924 if (rc)
925 return rc;
926 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
927 mmc_hostname(mmc), phase);
928 } else {
929 if (--tuning_seq_cnt)
930 goto retry;
931 /* Tuning failed */
932 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
933 mmc_hostname(mmc));
934 rc = -EIO;
935 }
936
937 if (!rc)
938 msm_host->tuning_done = true;
939 return rc;
940 }
941
942 /*
943 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
944 * This needs to be done for both tuning and enhanced_strobe mode.
945 * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
946 * fixed feedback clock is used.
947 */
948 static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
949 {
950 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
951 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
952 int ret;
953
954 if (host->clock > CORE_FREQ_100MHZ &&
955 (msm_host->tuning_done || ios->enhanced_strobe) &&
956 !msm_host->calibration_done) {
957 ret = sdhci_msm_hs400_dll_calibration(host);
958 if (!ret)
959 msm_host->calibration_done = true;
960 else
961 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
962 mmc_hostname(host->mmc), ret);
963 }
964 }
965
966 static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
967 unsigned int uhs)
968 {
969 struct mmc_host *mmc = host->mmc;
970 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
971 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
972 u16 ctrl_2;
973 u32 config;
974
975 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
976 /* Select Bus Speed Mode for host */
977 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
978 switch (uhs) {
979 case MMC_TIMING_UHS_SDR12:
980 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
981 break;
982 case MMC_TIMING_UHS_SDR25:
983 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
984 break;
985 case MMC_TIMING_UHS_SDR50:
986 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
987 break;
988 case MMC_TIMING_MMC_HS400:
989 case MMC_TIMING_MMC_HS200:
990 case MMC_TIMING_UHS_SDR104:
991 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
992 break;
993 case MMC_TIMING_UHS_DDR50:
994 case MMC_TIMING_MMC_DDR52:
995 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
996 break;
997 }
998
999 /*
1000 * When clock frequency is less than 100MHz, the feedback clock must be
1001 * provided and DLL must not be used so that tuning can be skipped. To
1002 * provide feedback clock, the mode selection can be any value less
1003 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
1004 */
1005 if (host->clock <= CORE_FREQ_100MHZ) {
1006 if (uhs == MMC_TIMING_MMC_HS400 ||
1007 uhs == MMC_TIMING_MMC_HS200 ||
1008 uhs == MMC_TIMING_UHS_SDR104)
1009 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1010 /*
1011 * DLL is not required for clock <= 100MHz
1012 * Thus, make sure DLL it is disabled when not required
1013 */
1014 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
1015 config |= CORE_DLL_RST;
1016 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
1017
1018 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
1019 config |= CORE_DLL_PDN;
1020 writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
1021
1022 /*
1023 * The DLL needs to be restored and CDCLP533 recalibrated
1024 * when the clock frequency is set back to 400MHz.
1025 */
1026 msm_host->calibration_done = false;
1027 }
1028
1029 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
1030 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
1031 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1032
1033 if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
1034 sdhci_msm_hs400(host, &mmc->ios);
1035 }
1036
1037 static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
1038 {
1039 init_waitqueue_head(&msm_host->pwr_irq_wait);
1040 }
1041
1042 static inline void sdhci_msm_complete_pwr_irq_wait(
1043 struct sdhci_msm_host *msm_host)
1044 {
1045 wake_up(&msm_host->pwr_irq_wait);
1046 }
1047
1048 /*
1049 * sdhci_msm_check_power_status API should be called when registers writes
1050 * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
1051 * To what state the register writes will change the IO lines should be passed
1052 * as the argument req_type. This API will check whether the IO line's state
1053 * is already the expected state and will wait for power irq only if
1054 * power irq is expected to be trigerred based on the current IO line state
1055 * and expected IO line state.
1056 */
1057 static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
1058 {
1059 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1060 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1061 bool done = false;
1062 u32 val;
1063
1064 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
1065 mmc_hostname(host->mmc), __func__, req_type,
1066 msm_host->curr_pwr_state, msm_host->curr_io_level);
1067
1068 /*
1069 * The power interrupt will not be generated for signal voltage
1070 * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
1071 */
1072 val = readl(msm_host->core_mem + CORE_MCI_GENERICS);
1073 if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
1074 !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
1075 return;
1076 }
1077
1078 /*
1079 * The IRQ for request type IO High/LOW will be generated when -
1080 * there is a state change in 1.8V enable bit (bit 3) of
1081 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
1082 * which indicates 3.3V IO voltage. So, when MMC core layer tries
1083 * to set it to 3.3V before card detection happens, the
1084 * IRQ doesn't get triggered as there is no state change in this bit.
1085 * The driver already handles this case by changing the IO voltage
1086 * level to high as part of controller power up sequence. Hence, check
1087 * for host->pwr to handle a case where IO voltage high request is
1088 * issued even before controller power up.
1089 */
1090 if ((req_type & REQ_IO_HIGH) && !host->pwr) {
1091 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
1092 mmc_hostname(host->mmc), req_type);
1093 return;
1094 }
1095 if ((req_type & msm_host->curr_pwr_state) ||
1096 (req_type & msm_host->curr_io_level))
1097 done = true;
1098 /*
1099 * This is needed here to handle cases where register writes will
1100 * not change the current bus state or io level of the controller.
1101 * In this case, no power irq will be triggerred and we should
1102 * not wait.
1103 */
1104 if (!done) {
1105 if (!wait_event_timeout(msm_host->pwr_irq_wait,
1106 msm_host->pwr_irq_flag,
1107 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
1108 dev_warn(&msm_host->pdev->dev,
1109 "%s: pwr_irq for req: (%d) timed out\n",
1110 mmc_hostname(host->mmc), req_type);
1111 }
1112 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
1113 __func__, req_type);
1114 }
1115
1116 static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
1117 {
1118 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1119 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1120
1121 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
1122 mmc_hostname(host->mmc),
1123 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
1124 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
1125 readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
1126 }
1127
1128 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
1129 {
1130 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1131 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1132 u32 irq_status, irq_ack = 0;
1133 int retry = 10;
1134 int pwr_state = 0, io_level = 0;
1135
1136
1137 irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
1138 irq_status &= INT_MASK;
1139
1140 writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
1141
1142 /*
1143 * There is a rare HW scenario where the first clear pulse could be
1144 * lost when actual reset and clear/read of status register is
1145 * happening at a time. Hence, retry for at least 10 times to make
1146 * sure status register is cleared. Otherwise, this will result in
1147 * a spurious power IRQ resulting in system instability.
1148 */
1149 while (irq_status & readl_relaxed(msm_host->core_mem +
1150 CORE_PWRCTL_STATUS)) {
1151 if (retry == 0) {
1152 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
1153 mmc_hostname(host->mmc), irq_status);
1154 sdhci_msm_dump_pwr_ctrl_regs(host);
1155 WARN_ON(1);
1156 break;
1157 }
1158 writel_relaxed(irq_status,
1159 msm_host->core_mem + CORE_PWRCTL_CLEAR);
1160 retry--;
1161 udelay(10);
1162 }
1163
1164 /* Handle BUS ON/OFF*/
1165 if (irq_status & CORE_PWRCTL_BUS_ON) {
1166 pwr_state = REQ_BUS_ON;
1167 io_level = REQ_IO_HIGH;
1168 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1169 }
1170 if (irq_status & CORE_PWRCTL_BUS_OFF) {
1171 pwr_state = REQ_BUS_OFF;
1172 io_level = REQ_IO_LOW;
1173 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1174 }
1175 /* Handle IO LOW/HIGH */
1176 if (irq_status & CORE_PWRCTL_IO_LOW) {
1177 io_level = REQ_IO_LOW;
1178 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1179 }
1180 if (irq_status & CORE_PWRCTL_IO_HIGH) {
1181 io_level = REQ_IO_HIGH;
1182 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1183 }
1184
1185 /*
1186 * The driver has to acknowledge the interrupt, switch voltages and
1187 * report back if it succeded or not to this register. The voltage
1188 * switches are handled by the sdhci core, so just report success.
1189 */
1190 writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
1191
1192 if (pwr_state)
1193 msm_host->curr_pwr_state = pwr_state;
1194 if (io_level)
1195 msm_host->curr_io_level = io_level;
1196
1197 pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
1198 mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
1199 irq_ack);
1200 }
1201
1202 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1203 {
1204 struct sdhci_host *host = (struct sdhci_host *)data;
1205 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1206 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1207
1208 sdhci_msm_handle_pwr_irq(host, irq);
1209 msm_host->pwr_irq_flag = 1;
1210 sdhci_msm_complete_pwr_irq_wait(msm_host);
1211
1212
1213 return IRQ_HANDLED;
1214 }
1215
1216 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
1217 {
1218 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1219 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1220 struct clk *core_clk = msm_host->bulk_clks[0].clk;
1221
1222 return clk_round_rate(core_clk, ULONG_MAX);
1223 }
1224
1225 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
1226 {
1227 return SDHCI_MSM_MIN_CLOCK;
1228 }
1229
1230 /**
1231 * __sdhci_msm_set_clock - sdhci_msm clock control.
1232 *
1233 * Description:
1234 * MSM controller does not use internal divider and
1235 * instead directly control the GCC clock as per
1236 * HW recommendation.
1237 **/
1238 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1239 {
1240 u16 clk;
1241 /*
1242 * Keep actual_clock as zero -
1243 * - since there is no divider used so no need of having actual_clock.
1244 * - MSM controller uses SDCLK for data timeout calculation. If
1245 * actual_clock is zero, host->clock is taken for calculation.
1246 */
1247 host->mmc->actual_clock = 0;
1248
1249 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1250
1251 if (clock == 0)
1252 return;
1253
1254 /*
1255 * MSM controller do not use clock divider.
1256 * Thus read SDHCI_CLOCK_CONTROL and only enable
1257 * clock with no divider value programmed.
1258 */
1259 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1260 sdhci_enable_clk(host, clk);
1261 }
1262
1263 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
1264 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1265 {
1266 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1267 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1268
1269 if (!clock) {
1270 msm_host->clk_rate = clock;
1271 goto out;
1272 }
1273
1274 sdhci_msm_hc_select_mode(host);
1275
1276 msm_set_clock_rate_for_bus_mode(host, clock);
1277 out:
1278 __sdhci_msm_set_clock(host, clock);
1279 }
1280
1281 /*
1282 * Platform specific register write functions. This is so that, if any
1283 * register write needs to be followed up by platform specific actions,
1284 * they can be added here. These functions can go to sleep when writes
1285 * to certain registers are done.
1286 * These functions are relying on sdhci_set_ios not using spinlock.
1287 */
1288 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
1289 {
1290 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1291 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1292 u32 req_type = 0;
1293
1294 switch (reg) {
1295 case SDHCI_HOST_CONTROL2:
1296 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
1297 REQ_IO_HIGH;
1298 break;
1299 case SDHCI_SOFTWARE_RESET:
1300 if (host->pwr && (val & SDHCI_RESET_ALL))
1301 req_type = REQ_BUS_OFF;
1302 break;
1303 case SDHCI_POWER_CONTROL:
1304 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
1305 break;
1306 case SDHCI_TRANSFER_MODE:
1307 msm_host->transfer_mode = val;
1308 break;
1309 case SDHCI_COMMAND:
1310 if (!msm_host->use_cdr)
1311 break;
1312 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
1313 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
1314 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
1315 sdhci_msm_set_cdr(host, true);
1316 else
1317 sdhci_msm_set_cdr(host, false);
1318 break;
1319 }
1320
1321 if (req_type) {
1322 msm_host->pwr_irq_flag = 0;
1323 /*
1324 * Since this register write may trigger a power irq, ensure
1325 * all previous register writes are complete by this point.
1326 */
1327 mb();
1328 }
1329 return req_type;
1330 }
1331
1332 /* This function may sleep*/
1333 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
1334 {
1335 u32 req_type = 0;
1336
1337 req_type = __sdhci_msm_check_write(host, val, reg);
1338 writew_relaxed(val, host->ioaddr + reg);
1339
1340 if (req_type)
1341 sdhci_msm_check_power_status(host, req_type);
1342 }
1343
1344 /* This function may sleep*/
1345 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
1346 {
1347 u32 req_type = 0;
1348
1349 req_type = __sdhci_msm_check_write(host, val, reg);
1350
1351 writeb_relaxed(val, host->ioaddr + reg);
1352
1353 if (req_type)
1354 sdhci_msm_check_power_status(host, req_type);
1355 }
1356
1357 static const struct of_device_id sdhci_msm_dt_match[] = {
1358 { .compatible = "qcom,sdhci-msm-v4" },
1359 {},
1360 };
1361
1362 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
1363
1364 static const struct sdhci_ops sdhci_msm_ops = {
1365 .reset = sdhci_reset,
1366 .set_clock = sdhci_msm_set_clock,
1367 .get_min_clock = sdhci_msm_get_min_clock,
1368 .get_max_clock = sdhci_msm_get_max_clock,
1369 .set_bus_width = sdhci_set_bus_width,
1370 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
1371 .write_w = sdhci_msm_writew,
1372 .write_b = sdhci_msm_writeb,
1373 };
1374
1375 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
1376 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1377 SDHCI_QUIRK_NO_CARD_NO_RESET |
1378 SDHCI_QUIRK_SINGLE_POWER_WRITE |
1379 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1380 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1381 .ops = &sdhci_msm_ops,
1382 };
1383
1384 static int sdhci_msm_probe(struct platform_device *pdev)
1385 {
1386 struct sdhci_host *host;
1387 struct sdhci_pltfm_host *pltfm_host;
1388 struct sdhci_msm_host *msm_host;
1389 struct resource *core_memres;
1390 struct clk *clk;
1391 int ret;
1392 u16 host_version, core_minor;
1393 u32 core_version, config;
1394 u8 core_major;
1395
1396 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
1397 if (IS_ERR(host))
1398 return PTR_ERR(host);
1399
1400 host->sdma_boundary = 0;
1401 pltfm_host = sdhci_priv(host);
1402 msm_host = sdhci_pltfm_priv(pltfm_host);
1403 msm_host->mmc = host->mmc;
1404 msm_host->pdev = pdev;
1405
1406 ret = mmc_of_parse(host->mmc);
1407 if (ret)
1408 goto pltfm_free;
1409
1410 sdhci_get_of_property(pdev);
1411
1412 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
1413
1414 /* Setup SDCC bus voter clock. */
1415 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
1416 if (!IS_ERR(msm_host->bus_clk)) {
1417 /* Vote for max. clk rate for max. performance */
1418 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
1419 if (ret)
1420 goto pltfm_free;
1421 ret = clk_prepare_enable(msm_host->bus_clk);
1422 if (ret)
1423 goto pltfm_free;
1424 }
1425
1426 /* Setup main peripheral bus clock */
1427 clk = devm_clk_get(&pdev->dev, "iface");
1428 if (IS_ERR(clk)) {
1429 ret = PTR_ERR(clk);
1430 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
1431 goto bus_clk_disable;
1432 }
1433 msm_host->bulk_clks[1].clk = clk;
1434
1435 /* Setup SDC MMC clock */
1436 clk = devm_clk_get(&pdev->dev, "core");
1437 if (IS_ERR(clk)) {
1438 ret = PTR_ERR(clk);
1439 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
1440 goto bus_clk_disable;
1441 }
1442 msm_host->bulk_clks[0].clk = clk;
1443
1444 /* Vote for maximum clock rate for maximum performance */
1445 ret = clk_set_rate(clk, INT_MAX);
1446 if (ret)
1447 dev_warn(&pdev->dev, "core clock boost failed\n");
1448
1449 clk = devm_clk_get(&pdev->dev, "cal");
1450 if (IS_ERR(clk))
1451 clk = NULL;
1452 msm_host->bulk_clks[2].clk = clk;
1453
1454 clk = devm_clk_get(&pdev->dev, "sleep");
1455 if (IS_ERR(clk))
1456 clk = NULL;
1457 msm_host->bulk_clks[3].clk = clk;
1458
1459 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
1460 msm_host->bulk_clks);
1461 if (ret)
1462 goto bus_clk_disable;
1463
1464 /*
1465 * xo clock is needed for FLL feature of cm_dll.
1466 * In case if xo clock is not mentioned in DT, warn and proceed.
1467 */
1468 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
1469 if (IS_ERR(msm_host->xo_clk)) {
1470 ret = PTR_ERR(msm_host->xo_clk);
1471 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
1472 }
1473
1474 core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1475 msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
1476
1477 if (IS_ERR(msm_host->core_mem)) {
1478 dev_err(&pdev->dev, "Failed to remap registers\n");
1479 ret = PTR_ERR(msm_host->core_mem);
1480 goto clk_disable;
1481 }
1482
1483 /* Reset the vendor spec register to power on reset state */
1484 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
1485 host->ioaddr + CORE_VENDOR_SPEC);
1486
1487 /* Set HC_MODE_EN bit in HC_MODE register */
1488 writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
1489
1490 config = readl_relaxed(msm_host->core_mem + CORE_HC_MODE);
1491 config |= FF_CLK_SW_RST_DIS;
1492 writel_relaxed(config, msm_host->core_mem + CORE_HC_MODE);
1493
1494 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
1495 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
1496 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
1497 SDHCI_VENDOR_VER_SHIFT));
1498
1499 core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
1500 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
1501 CORE_VERSION_MAJOR_SHIFT;
1502 core_minor = core_version & CORE_VERSION_MINOR_MASK;
1503 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
1504 core_version, core_major, core_minor);
1505
1506 if (core_major == 1 && core_minor >= 0x42)
1507 msm_host->use_14lpp_dll_reset = true;
1508
1509 /*
1510 * SDCC 5 controller with major version 1, minor version 0x34 and later
1511 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
1512 */
1513 if (core_major == 1 && core_minor < 0x34)
1514 msm_host->use_cdclp533 = true;
1515
1516 /*
1517 * Support for some capabilities is not advertised by newer
1518 * controller versions and must be explicitly enabled.
1519 */
1520 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
1521 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
1522 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
1523 writel_relaxed(config, host->ioaddr +
1524 CORE_VENDOR_SPEC_CAPABILITIES0);
1525 }
1526
1527 /*
1528 * Power on reset state may trigger power irq if previous status of
1529 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
1530 * interrupt in GIC, any pending power irq interrupt should be
1531 * acknowledged. Otherwise power irq interrupt handler would be
1532 * fired prematurely.
1533 */
1534 sdhci_msm_handle_pwr_irq(host, 0);
1535
1536 /*
1537 * Ensure that above writes are propogated before interrupt enablement
1538 * in GIC.
1539 */
1540 mb();
1541
1542 /* Setup IRQ for handling power/voltage tasks with PMIC */
1543 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
1544 if (msm_host->pwr_irq < 0) {
1545 dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
1546 msm_host->pwr_irq);
1547 ret = msm_host->pwr_irq;
1548 goto clk_disable;
1549 }
1550
1551 sdhci_msm_init_pwr_irq_wait(msm_host);
1552 /* Enable pwr irq interrupts */
1553 writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK);
1554
1555 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
1556 sdhci_msm_pwr_irq, IRQF_ONESHOT,
1557 dev_name(&pdev->dev), host);
1558 if (ret) {
1559 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
1560 goto clk_disable;
1561 }
1562
1563 pm_runtime_get_noresume(&pdev->dev);
1564 pm_runtime_set_active(&pdev->dev);
1565 pm_runtime_enable(&pdev->dev);
1566 pm_runtime_set_autosuspend_delay(&pdev->dev,
1567 MSM_MMC_AUTOSUSPEND_DELAY_MS);
1568 pm_runtime_use_autosuspend(&pdev->dev);
1569
1570 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
1571 ret = sdhci_add_host(host);
1572 if (ret)
1573 goto pm_runtime_disable;
1574
1575 pm_runtime_mark_last_busy(&pdev->dev);
1576 pm_runtime_put_autosuspend(&pdev->dev);
1577
1578 return 0;
1579
1580 pm_runtime_disable:
1581 pm_runtime_disable(&pdev->dev);
1582 pm_runtime_set_suspended(&pdev->dev);
1583 pm_runtime_put_noidle(&pdev->dev);
1584 clk_disable:
1585 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
1586 msm_host->bulk_clks);
1587 bus_clk_disable:
1588 if (!IS_ERR(msm_host->bus_clk))
1589 clk_disable_unprepare(msm_host->bus_clk);
1590 pltfm_free:
1591 sdhci_pltfm_free(pdev);
1592 return ret;
1593 }
1594
1595 static int sdhci_msm_remove(struct platform_device *pdev)
1596 {
1597 struct sdhci_host *host = platform_get_drvdata(pdev);
1598 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1599 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1600 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
1601 0xffffffff);
1602
1603 sdhci_remove_host(host, dead);
1604
1605 pm_runtime_get_sync(&pdev->dev);
1606 pm_runtime_disable(&pdev->dev);
1607 pm_runtime_put_noidle(&pdev->dev);
1608
1609 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
1610 msm_host->bulk_clks);
1611 if (!IS_ERR(msm_host->bus_clk))
1612 clk_disable_unprepare(msm_host->bus_clk);
1613 sdhci_pltfm_free(pdev);
1614 return 0;
1615 }
1616
1617 #ifdef CONFIG_PM
1618 static int sdhci_msm_runtime_suspend(struct device *dev)
1619 {
1620 struct sdhci_host *host = dev_get_drvdata(dev);
1621 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1622 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1623
1624 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
1625 msm_host->bulk_clks);
1626
1627 return 0;
1628 }
1629
1630 static int sdhci_msm_runtime_resume(struct device *dev)
1631 {
1632 struct sdhci_host *host = dev_get_drvdata(dev);
1633 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1634 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1635
1636 return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
1637 msm_host->bulk_clks);
1638 }
1639 #endif
1640
1641 static const struct dev_pm_ops sdhci_msm_pm_ops = {
1642 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1643 pm_runtime_force_resume)
1644 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
1645 sdhci_msm_runtime_resume,
1646 NULL)
1647 };
1648
1649 static struct platform_driver sdhci_msm_driver = {
1650 .probe = sdhci_msm_probe,
1651 .remove = sdhci_msm_remove,
1652 .driver = {
1653 .name = "sdhci_msm",
1654 .of_match_table = sdhci_msm_dt_match,
1655 .pm = &sdhci_msm_pm_ops,
1656 },
1657 };
1658
1659 module_platform_driver(sdhci_msm_driver);
1660
1661 MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
1662 MODULE_LICENSE("GPL v2");