]>
Commit | Line | Data |
---|---|---|
adaafaa3 YG |
1 | /* |
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 and | |
6 | * only version 2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include "phy-qcom-ufs-i.h" | |
16 | ||
17 | #define MAX_PROP_NAME 32 | |
18 | #define VDDA_PHY_MIN_UV 1000000 | |
19 | #define VDDA_PHY_MAX_UV 1000000 | |
20 | #define VDDA_PLL_MIN_UV 1800000 | |
21 | #define VDDA_PLL_MAX_UV 1800000 | |
22 | #define VDDP_REF_CLK_MIN_UV 1200000 | |
23 | #define VDDP_REF_CLK_MAX_UV 1200000 | |
24 | ||
25 | static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *, | |
26 | const char *, bool); | |
27 | static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *, | |
28 | const char *); | |
29 | static int ufs_qcom_phy_base_init(struct platform_device *pdev, | |
30 | struct ufs_qcom_phy *phy_common); | |
31 | ||
32 | int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, | |
33 | struct ufs_qcom_phy_calibration *tbl_A, | |
34 | int tbl_size_A, | |
35 | struct ufs_qcom_phy_calibration *tbl_B, | |
36 | int tbl_size_B, bool is_rate_B) | |
37 | { | |
38 | int i; | |
39 | int ret = 0; | |
40 | ||
41 | if (!tbl_A) { | |
42 | dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__); | |
43 | ret = EINVAL; | |
44 | goto out; | |
45 | } | |
46 | ||
47 | for (i = 0; i < tbl_size_A; i++) | |
48 | writel_relaxed(tbl_A[i].cfg_value, | |
49 | ufs_qcom_phy->mmio + tbl_A[i].reg_offset); | |
50 | ||
51 | /* | |
52 | * In case we would like to work in rate B, we need | |
53 | * to override a registers that were configured in rate A table | |
54 | * with registers of rate B table. | |
55 | * table. | |
56 | */ | |
57 | if (is_rate_B) { | |
58 | if (!tbl_B) { | |
59 | dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL", | |
60 | __func__); | |
61 | ret = EINVAL; | |
62 | goto out; | |
63 | } | |
64 | ||
65 | for (i = 0; i < tbl_size_B; i++) | |
66 | writel_relaxed(tbl_B[i].cfg_value, | |
67 | ufs_qcom_phy->mmio + tbl_B[i].reg_offset); | |
68 | } | |
69 | ||
70 | /* flush buffered writes */ | |
71 | mb(); | |
72 | ||
73 | out: | |
74 | return ret; | |
75 | } | |
358d6c87 | 76 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate); |
adaafaa3 YG |
77 | |
78 | struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev, | |
79 | struct ufs_qcom_phy *common_cfg, | |
4a9e5ca1 | 80 | const struct phy_ops *ufs_qcom_phy_gen_ops, |
adaafaa3 YG |
81 | struct ufs_qcom_phy_specific_ops *phy_spec_ops) |
82 | { | |
83 | int err; | |
84 | struct device *dev = &pdev->dev; | |
85 | struct phy *generic_phy = NULL; | |
86 | struct phy_provider *phy_provider; | |
87 | ||
88 | err = ufs_qcom_phy_base_init(pdev, common_cfg); | |
89 | if (err) { | |
90 | dev_err(dev, "%s: phy base init failed %d\n", __func__, err); | |
91 | goto out; | |
92 | } | |
93 | ||
94 | phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); | |
95 | if (IS_ERR(phy_provider)) { | |
96 | err = PTR_ERR(phy_provider); | |
97 | dev_err(dev, "%s: failed to register phy %d\n", __func__, err); | |
98 | goto out; | |
99 | } | |
100 | ||
101 | generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops); | |
102 | if (IS_ERR(generic_phy)) { | |
103 | err = PTR_ERR(generic_phy); | |
104 | dev_err(dev, "%s: failed to create phy %d\n", __func__, err); | |
d89a7f69 | 105 | generic_phy = NULL; |
adaafaa3 YG |
106 | goto out; |
107 | } | |
108 | ||
109 | common_cfg->phy_spec_ops = phy_spec_ops; | |
110 | common_cfg->dev = dev; | |
111 | ||
112 | out: | |
113 | return generic_phy; | |
114 | } | |
358d6c87 | 115 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe); |
adaafaa3 YG |
116 | |
117 | /* | |
118 | * This assumes the embedded phy structure inside generic_phy is of type | |
119 | * struct ufs_qcom_phy. In order to function properly it's crucial | |
120 | * to keep the embedded struct "struct ufs_qcom_phy common_cfg" | |
121 | * as the first inside generic_phy. | |
122 | */ | |
123 | struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy) | |
124 | { | |
125 | return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy); | |
126 | } | |
358d6c87 | 127 | EXPORT_SYMBOL_GPL(get_ufs_qcom_phy); |
adaafaa3 YG |
128 | |
129 | static | |
130 | int ufs_qcom_phy_base_init(struct platform_device *pdev, | |
131 | struct ufs_qcom_phy *phy_common) | |
132 | { | |
133 | struct device *dev = &pdev->dev; | |
134 | struct resource *res; | |
135 | int err = 0; | |
136 | ||
137 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem"); | |
adaafaa3 YG |
138 | phy_common->mmio = devm_ioremap_resource(dev, res); |
139 | if (IS_ERR((void const *)phy_common->mmio)) { | |
140 | err = PTR_ERR((void const *)phy_common->mmio); | |
141 | phy_common->mmio = NULL; | |
142 | dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n", | |
143 | __func__, err); | |
52ea796b | 144 | return err; |
adaafaa3 YG |
145 | } |
146 | ||
147 | /* "dev_ref_clk_ctrl_mem" is optional resource */ | |
148 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | |
149 | "dev_ref_clk_ctrl_mem"); | |
adaafaa3 | 150 | phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res); |
52ea796b | 151 | if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio)) |
adaafaa3 | 152 | phy_common->dev_ref_clk_ctrl_mmio = NULL; |
adaafaa3 | 153 | |
52ea796b | 154 | return 0; |
adaafaa3 YG |
155 | } |
156 | ||
157 | static int __ufs_qcom_phy_clk_get(struct phy *phy, | |
158 | const char *name, struct clk **clk_out, bool err_print) | |
159 | { | |
160 | struct clk *clk; | |
161 | int err = 0; | |
162 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | |
163 | struct device *dev = ufs_qcom_phy->dev; | |
164 | ||
165 | clk = devm_clk_get(dev, name); | |
166 | if (IS_ERR(clk)) { | |
167 | err = PTR_ERR(clk); | |
168 | if (err_print) | |
169 | dev_err(dev, "failed to get %s err %d", name, err); | |
170 | } else { | |
171 | *clk_out = clk; | |
172 | } | |
173 | ||
174 | return err; | |
175 | } | |
176 | ||
177 | static | |
178 | int ufs_qcom_phy_clk_get(struct phy *phy, | |
179 | const char *name, struct clk **clk_out) | |
180 | { | |
181 | return __ufs_qcom_phy_clk_get(phy, name, clk_out, true); | |
182 | } | |
183 | ||
184 | int | |
185 | ufs_qcom_phy_init_clks(struct phy *generic_phy, | |
186 | struct ufs_qcom_phy *phy_common) | |
187 | { | |
188 | int err; | |
189 | ||
190 | err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk", | |
191 | &phy_common->tx_iface_clk); | |
192 | if (err) | |
193 | goto out; | |
194 | ||
195 | err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk", | |
196 | &phy_common->rx_iface_clk); | |
197 | if (err) | |
198 | goto out; | |
199 | ||
200 | err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src", | |
201 | &phy_common->ref_clk_src); | |
202 | if (err) | |
203 | goto out; | |
204 | ||
205 | /* | |
206 | * "ref_clk_parent" is optional hence don't abort init if it's not | |
207 | * found. | |
208 | */ | |
209 | __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent", | |
210 | &phy_common->ref_clk_parent, false); | |
211 | ||
212 | err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk", | |
213 | &phy_common->ref_clk); | |
214 | ||
215 | out: | |
216 | return err; | |
217 | } | |
358d6c87 | 218 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks); |
adaafaa3 YG |
219 | |
220 | int | |
221 | ufs_qcom_phy_init_vregulators(struct phy *generic_phy, | |
222 | struct ufs_qcom_phy *phy_common) | |
223 | { | |
224 | int err; | |
225 | ||
226 | err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll, | |
227 | "vdda-pll"); | |
228 | if (err) | |
229 | goto out; | |
230 | ||
231 | err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy, | |
232 | "vdda-phy"); | |
233 | ||
234 | if (err) | |
235 | goto out; | |
236 | ||
237 | /* vddp-ref-clk-* properties are optional */ | |
238 | __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk, | |
239 | "vddp-ref-clk", true); | |
240 | out: | |
241 | return err; | |
242 | } | |
358d6c87 | 243 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators); |
adaafaa3 YG |
244 | |
245 | static int __ufs_qcom_phy_init_vreg(struct phy *phy, | |
246 | struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional) | |
247 | { | |
248 | int err = 0; | |
249 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | |
250 | struct device *dev = ufs_qcom_phy->dev; | |
251 | ||
252 | char prop_name[MAX_PROP_NAME]; | |
253 | ||
add78fc0 | 254 | vreg->name = devm_kstrdup(dev, name, GFP_KERNEL); |
adaafaa3 YG |
255 | if (!vreg->name) { |
256 | err = -ENOMEM; | |
257 | goto out; | |
258 | } | |
259 | ||
260 | vreg->reg = devm_regulator_get(dev, name); | |
261 | if (IS_ERR(vreg->reg)) { | |
262 | err = PTR_ERR(vreg->reg); | |
263 | vreg->reg = NULL; | |
264 | if (!optional) | |
265 | dev_err(dev, "failed to get %s, %d\n", name, err); | |
266 | goto out; | |
267 | } | |
268 | ||
269 | if (dev->of_node) { | |
270 | snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name); | |
271 | err = of_property_read_u32(dev->of_node, | |
272 | prop_name, &vreg->max_uA); | |
273 | if (err && err != -EINVAL) { | |
274 | dev_err(dev, "%s: failed to read %s\n", | |
275 | __func__, prop_name); | |
276 | goto out; | |
277 | } else if (err == -EINVAL || !vreg->max_uA) { | |
278 | if (regulator_count_voltages(vreg->reg) > 0) { | |
279 | dev_err(dev, "%s: %s is mandatory\n", | |
280 | __func__, prop_name); | |
281 | goto out; | |
282 | } | |
283 | err = 0; | |
284 | } | |
285 | snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name); | |
3ea981ed JL |
286 | vreg->is_always_on = of_property_read_bool(dev->of_node, |
287 | prop_name); | |
adaafaa3 YG |
288 | } |
289 | ||
290 | if (!strcmp(name, "vdda-pll")) { | |
291 | vreg->max_uV = VDDA_PLL_MAX_UV; | |
292 | vreg->min_uV = VDDA_PLL_MIN_UV; | |
293 | } else if (!strcmp(name, "vdda-phy")) { | |
294 | vreg->max_uV = VDDA_PHY_MAX_UV; | |
295 | vreg->min_uV = VDDA_PHY_MIN_UV; | |
296 | } else if (!strcmp(name, "vddp-ref-clk")) { | |
297 | vreg->max_uV = VDDP_REF_CLK_MAX_UV; | |
298 | vreg->min_uV = VDDP_REF_CLK_MIN_UV; | |
299 | } | |
300 | ||
301 | out: | |
302 | if (err) | |
303 | kfree(vreg->name); | |
304 | return err; | |
305 | } | |
306 | ||
307 | static int ufs_qcom_phy_init_vreg(struct phy *phy, | |
308 | struct ufs_qcom_phy_vreg *vreg, const char *name) | |
309 | { | |
310 | return __ufs_qcom_phy_init_vreg(phy, vreg, name, false); | |
311 | } | |
312 | ||
313 | static | |
314 | int ufs_qcom_phy_cfg_vreg(struct phy *phy, | |
315 | struct ufs_qcom_phy_vreg *vreg, bool on) | |
316 | { | |
317 | int ret = 0; | |
318 | struct regulator *reg = vreg->reg; | |
319 | const char *name = vreg->name; | |
320 | int min_uV; | |
321 | int uA_load; | |
322 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | |
323 | struct device *dev = ufs_qcom_phy->dev; | |
324 | ||
adaafaa3 YG |
325 | if (regulator_count_voltages(reg) > 0) { |
326 | min_uV = on ? vreg->min_uV : 0; | |
327 | ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); | |
328 | if (ret) { | |
329 | dev_err(dev, "%s: %s set voltage failed, err=%d\n", | |
330 | __func__, name, ret); | |
331 | goto out; | |
332 | } | |
333 | uA_load = on ? vreg->max_uA : 0; | |
7e476c7d | 334 | ret = regulator_set_load(reg, uA_load); |
adaafaa3 YG |
335 | if (ret >= 0) { |
336 | /* | |
7e476c7d | 337 | * regulator_set_load() returns new regulator |
adaafaa3 YG |
338 | * mode upon success. |
339 | */ | |
340 | ret = 0; | |
341 | } else { | |
342 | dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n", | |
343 | __func__, name, uA_load, ret); | |
344 | goto out; | |
345 | } | |
346 | } | |
347 | out: | |
348 | return ret; | |
349 | } | |
350 | ||
351 | static | |
352 | int ufs_qcom_phy_enable_vreg(struct phy *phy, | |
353 | struct ufs_qcom_phy_vreg *vreg) | |
354 | { | |
355 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | |
356 | struct device *dev = ufs_qcom_phy->dev; | |
357 | int ret = 0; | |
358 | ||
359 | if (!vreg || vreg->enabled) | |
360 | goto out; | |
361 | ||
362 | ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true); | |
363 | if (ret) { | |
364 | dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n", | |
365 | __func__, ret); | |
366 | goto out; | |
367 | } | |
368 | ||
369 | ret = regulator_enable(vreg->reg); | |
370 | if (ret) { | |
371 | dev_err(dev, "%s: enable failed, err=%d\n", | |
372 | __func__, ret); | |
373 | goto out; | |
374 | } | |
375 | ||
376 | vreg->enabled = true; | |
377 | out: | |
378 | return ret; | |
379 | } | |
380 | ||
381 | int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy) | |
382 | { | |
383 | int ret = 0; | |
384 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | |
385 | ||
386 | if (phy->is_ref_clk_enabled) | |
387 | goto out; | |
388 | ||
389 | /* | |
390 | * reference clock is propagated in a daisy-chained manner from | |
391 | * source to phy, so ungate them at each stage. | |
392 | */ | |
393 | ret = clk_prepare_enable(phy->ref_clk_src); | |
394 | if (ret) { | |
395 | dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n", | |
396 | __func__, ret); | |
397 | goto out; | |
398 | } | |
399 | ||
400 | /* | |
401 | * "ref_clk_parent" is optional clock hence make sure that clk reference | |
402 | * is available before trying to enable the clock. | |
403 | */ | |
404 | if (phy->ref_clk_parent) { | |
405 | ret = clk_prepare_enable(phy->ref_clk_parent); | |
406 | if (ret) { | |
407 | dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n", | |
408 | __func__, ret); | |
409 | goto out_disable_src; | |
410 | } | |
411 | } | |
412 | ||
413 | ret = clk_prepare_enable(phy->ref_clk); | |
414 | if (ret) { | |
415 | dev_err(phy->dev, "%s: ref_clk enable failed %d\n", | |
416 | __func__, ret); | |
417 | goto out_disable_parent; | |
418 | } | |
419 | ||
420 | phy->is_ref_clk_enabled = true; | |
421 | goto out; | |
422 | ||
423 | out_disable_parent: | |
424 | if (phy->ref_clk_parent) | |
425 | clk_disable_unprepare(phy->ref_clk_parent); | |
426 | out_disable_src: | |
427 | clk_disable_unprepare(phy->ref_clk_src); | |
428 | out: | |
429 | return ret; | |
430 | } | |
65d49b3d | 431 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk); |
adaafaa3 YG |
432 | |
433 | static | |
434 | int ufs_qcom_phy_disable_vreg(struct phy *phy, | |
435 | struct ufs_qcom_phy_vreg *vreg) | |
436 | { | |
437 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | |
438 | struct device *dev = ufs_qcom_phy->dev; | |
439 | int ret = 0; | |
440 | ||
441 | if (!vreg || !vreg->enabled || vreg->is_always_on) | |
442 | goto out; | |
443 | ||
444 | ret = regulator_disable(vreg->reg); | |
445 | ||
446 | if (!ret) { | |
447 | /* ignore errors on applying disable config */ | |
448 | ufs_qcom_phy_cfg_vreg(phy, vreg, false); | |
449 | vreg->enabled = false; | |
450 | } else { | |
451 | dev_err(dev, "%s: %s disable failed, err=%d\n", | |
452 | __func__, vreg->name, ret); | |
453 | } | |
454 | out: | |
455 | return ret; | |
456 | } | |
457 | ||
458 | void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy) | |
459 | { | |
460 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | |
461 | ||
462 | if (phy->is_ref_clk_enabled) { | |
463 | clk_disable_unprepare(phy->ref_clk); | |
464 | /* | |
465 | * "ref_clk_parent" is optional clock hence make sure that clk | |
466 | * reference is available before trying to disable the clock. | |
467 | */ | |
468 | if (phy->ref_clk_parent) | |
469 | clk_disable_unprepare(phy->ref_clk_parent); | |
470 | clk_disable_unprepare(phy->ref_clk_src); | |
471 | phy->is_ref_clk_enabled = false; | |
472 | } | |
473 | } | |
65d49b3d | 474 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk); |
adaafaa3 YG |
475 | |
476 | #define UFS_REF_CLK_EN (1 << 5) | |
477 | ||
478 | static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable) | |
479 | { | |
480 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | |
481 | ||
482 | if (phy->dev_ref_clk_ctrl_mmio && | |
483 | (enable ^ phy->is_dev_ref_clk_enabled)) { | |
484 | u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio); | |
485 | ||
486 | if (enable) | |
487 | temp |= UFS_REF_CLK_EN; | |
488 | else | |
489 | temp &= ~UFS_REF_CLK_EN; | |
490 | ||
491 | /* | |
492 | * If we are here to disable this clock immediately after | |
493 | * entering into hibern8, we need to make sure that device | |
494 | * ref_clk is active atleast 1us after the hibern8 enter. | |
495 | */ | |
496 | if (!enable) | |
497 | udelay(1); | |
498 | ||
499 | writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio); | |
500 | /* ensure that ref_clk is enabled/disabled before we return */ | |
501 | wmb(); | |
502 | /* | |
503 | * If we call hibern8 exit after this, we need to make sure that | |
504 | * device ref_clk is stable for atleast 1us before the hibern8 | |
505 | * exit command. | |
506 | */ | |
507 | if (enable) | |
508 | udelay(1); | |
509 | ||
510 | phy->is_dev_ref_clk_enabled = enable; | |
511 | } | |
512 | } | |
513 | ||
514 | void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy) | |
515 | { | |
516 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); | |
517 | } | |
65d49b3d | 518 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk); |
adaafaa3 YG |
519 | |
520 | void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) | |
521 | { | |
522 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); | |
523 | } | |
65d49b3d | 524 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk); |
adaafaa3 YG |
525 | |
526 | /* Turn ON M-PHY RMMI interface clocks */ | |
527 | int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) | |
528 | { | |
529 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | |
530 | int ret = 0; | |
531 | ||
532 | if (phy->is_iface_clk_enabled) | |
533 | goto out; | |
534 | ||
535 | ret = clk_prepare_enable(phy->tx_iface_clk); | |
536 | if (ret) { | |
537 | dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n", | |
538 | __func__, ret); | |
539 | goto out; | |
540 | } | |
541 | ret = clk_prepare_enable(phy->rx_iface_clk); | |
542 | if (ret) { | |
543 | clk_disable_unprepare(phy->tx_iface_clk); | |
544 | dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n", | |
545 | __func__, ret); | |
546 | goto out; | |
547 | } | |
548 | phy->is_iface_clk_enabled = true; | |
549 | ||
550 | out: | |
551 | return ret; | |
552 | } | |
65d49b3d | 553 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk); |
adaafaa3 YG |
554 | |
555 | /* Turn OFF M-PHY RMMI interface clocks */ | |
556 | void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) | |
557 | { | |
558 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | |
559 | ||
560 | if (phy->is_iface_clk_enabled) { | |
561 | clk_disable_unprepare(phy->tx_iface_clk); | |
562 | clk_disable_unprepare(phy->rx_iface_clk); | |
563 | phy->is_iface_clk_enabled = false; | |
564 | } | |
565 | } | |
65d49b3d | 566 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk); |
adaafaa3 YG |
567 | |
568 | int ufs_qcom_phy_start_serdes(struct phy *generic_phy) | |
569 | { | |
570 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
571 | int ret = 0; | |
572 | ||
573 | if (!ufs_qcom_phy->phy_spec_ops->start_serdes) { | |
574 | dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n", | |
575 | __func__); | |
576 | ret = -ENOTSUPP; | |
577 | } else { | |
578 | ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy); | |
579 | } | |
580 | ||
581 | return ret; | |
582 | } | |
65d49b3d | 583 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes); |
adaafaa3 YG |
584 | |
585 | int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) | |
586 | { | |
587 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
588 | int ret = 0; | |
589 | ||
590 | if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) { | |
591 | dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n", | |
592 | __func__); | |
593 | ret = -ENOTSUPP; | |
594 | } else { | |
595 | ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy, | |
596 | tx_lanes); | |
597 | } | |
598 | ||
599 | return ret; | |
600 | } | |
65d49b3d | 601 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable); |
adaafaa3 YG |
602 | |
603 | void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, | |
604 | u8 major, u16 minor, u16 step) | |
605 | { | |
606 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
607 | ||
608 | ufs_qcom_phy->host_ctrl_rev_major = major; | |
609 | ufs_qcom_phy->host_ctrl_rev_minor = minor; | |
610 | ufs_qcom_phy->host_ctrl_rev_step = step; | |
611 | } | |
65d49b3d | 612 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version); |
adaafaa3 YG |
613 | |
614 | int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) | |
615 | { | |
616 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
617 | int ret = 0; | |
618 | ||
619 | if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) { | |
620 | dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n", | |
621 | __func__); | |
622 | ret = -ENOTSUPP; | |
623 | } else { | |
624 | ret = ufs_qcom_phy->phy_spec_ops-> | |
625 | calibrate_phy(ufs_qcom_phy, is_rate_B); | |
626 | if (ret) | |
627 | dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n", | |
628 | __func__, ret); | |
629 | } | |
630 | ||
631 | return ret; | |
632 | } | |
65d49b3d | 633 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy); |
adaafaa3 YG |
634 | |
635 | int ufs_qcom_phy_remove(struct phy *generic_phy, | |
636 | struct ufs_qcom_phy *ufs_qcom_phy) | |
637 | { | |
638 | phy_power_off(generic_phy); | |
639 | ||
adaafaa3 YG |
640 | return 0; |
641 | } | |
358d6c87 | 642 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_remove); |
adaafaa3 YG |
643 | |
644 | int ufs_qcom_phy_exit(struct phy *generic_phy) | |
645 | { | |
646 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
647 | ||
648 | if (ufs_qcom_phy->is_powered_on) | |
649 | phy_power_off(generic_phy); | |
650 | ||
651 | return 0; | |
652 | } | |
358d6c87 | 653 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_exit); |
adaafaa3 YG |
654 | |
655 | int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy) | |
656 | { | |
657 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
658 | ||
659 | if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) { | |
660 | dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n", | |
661 | __func__); | |
662 | return -ENOTSUPP; | |
663 | } | |
664 | ||
665 | return ufs_qcom_phy->phy_spec_ops-> | |
666 | is_physical_coding_sublayer_ready(ufs_qcom_phy); | |
667 | } | |
65d49b3d | 668 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready); |
adaafaa3 YG |
669 | |
670 | int ufs_qcom_phy_power_on(struct phy *generic_phy) | |
671 | { | |
672 | struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); | |
673 | struct device *dev = phy_common->dev; | |
674 | int err; | |
675 | ||
676 | err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy); | |
677 | if (err) { | |
678 | dev_err(dev, "%s enable vdda_phy failed, err=%d\n", | |
679 | __func__, err); | |
680 | goto out; | |
681 | } | |
682 | ||
683 | phy_common->phy_spec_ops->power_control(phy_common, true); | |
684 | ||
685 | /* vdda_pll also enables ref clock LDOs so enable it first */ | |
686 | err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll); | |
687 | if (err) { | |
688 | dev_err(dev, "%s enable vdda_pll failed, err=%d\n", | |
689 | __func__, err); | |
690 | goto out_disable_phy; | |
691 | } | |
692 | ||
693 | err = ufs_qcom_phy_enable_ref_clk(generic_phy); | |
694 | if (err) { | |
695 | dev_err(dev, "%s enable phy ref clock failed, err=%d\n", | |
696 | __func__, err); | |
697 | goto out_disable_pll; | |
698 | } | |
699 | ||
700 | /* enable device PHY ref_clk pad rail */ | |
701 | if (phy_common->vddp_ref_clk.reg) { | |
702 | err = ufs_qcom_phy_enable_vreg(generic_phy, | |
703 | &phy_common->vddp_ref_clk); | |
704 | if (err) { | |
705 | dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n", | |
706 | __func__, err); | |
707 | goto out_disable_ref_clk; | |
708 | } | |
709 | } | |
710 | ||
711 | phy_common->is_powered_on = true; | |
712 | goto out; | |
713 | ||
714 | out_disable_ref_clk: | |
715 | ufs_qcom_phy_disable_ref_clk(generic_phy); | |
716 | out_disable_pll: | |
717 | ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll); | |
718 | out_disable_phy: | |
719 | ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy); | |
720 | out: | |
721 | return err; | |
722 | } | |
358d6c87 | 723 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on); |
adaafaa3 YG |
724 | |
725 | int ufs_qcom_phy_power_off(struct phy *generic_phy) | |
726 | { | |
727 | struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); | |
728 | ||
729 | phy_common->phy_spec_ops->power_control(phy_common, false); | |
730 | ||
731 | if (phy_common->vddp_ref_clk.reg) | |
732 | ufs_qcom_phy_disable_vreg(generic_phy, | |
733 | &phy_common->vddp_ref_clk); | |
734 | ufs_qcom_phy_disable_ref_clk(generic_phy); | |
735 | ||
736 | ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll); | |
737 | ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy); | |
738 | phy_common->is_powered_on = false; | |
739 | ||
740 | return 0; | |
741 | } | |
358d6c87 | 742 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); |