phy: samsung_usb2: Fixup samsung_usb2_phy_power_on/off paths
[deliverable/linux.git] / drivers / phy / phy-qcom-ufs.c
CommitLineData
adaafaa3
YG
1/*
2 * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include "phy-qcom-ufs-i.h"
16
17#define MAX_PROP_NAME 32
18#define VDDA_PHY_MIN_UV 1000000
19#define VDDA_PHY_MAX_UV 1000000
20#define VDDA_PLL_MIN_UV 1800000
21#define VDDA_PLL_MAX_UV 1800000
22#define VDDP_REF_CLK_MIN_UV 1200000
23#define VDDP_REF_CLK_MAX_UV 1200000
24
25static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
26 const char *, bool);
27static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
28 const char *);
29static int ufs_qcom_phy_base_init(struct platform_device *pdev,
30 struct ufs_qcom_phy *phy_common);
31
32int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
33 struct ufs_qcom_phy_calibration *tbl_A,
34 int tbl_size_A,
35 struct ufs_qcom_phy_calibration *tbl_B,
36 int tbl_size_B, bool is_rate_B)
37{
38 int i;
39 int ret = 0;
40
41 if (!tbl_A) {
42 dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__);
43 ret = EINVAL;
44 goto out;
45 }
46
47 for (i = 0; i < tbl_size_A; i++)
48 writel_relaxed(tbl_A[i].cfg_value,
49 ufs_qcom_phy->mmio + tbl_A[i].reg_offset);
50
51 /*
52 * In case we would like to work in rate B, we need
53 * to override a registers that were configured in rate A table
54 * with registers of rate B table.
55 * table.
56 */
57 if (is_rate_B) {
58 if (!tbl_B) {
59 dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL",
60 __func__);
61 ret = EINVAL;
62 goto out;
63 }
64
65 for (i = 0; i < tbl_size_B; i++)
66 writel_relaxed(tbl_B[i].cfg_value,
67 ufs_qcom_phy->mmio + tbl_B[i].reg_offset);
68 }
69
70 /* flush buffered writes */
71 mb();
72
73out:
74 return ret;
75}
76
77struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
78 struct ufs_qcom_phy *common_cfg,
79 struct phy_ops *ufs_qcom_phy_gen_ops,
80 struct ufs_qcom_phy_specific_ops *phy_spec_ops)
81{
82 int err;
83 struct device *dev = &pdev->dev;
84 struct phy *generic_phy = NULL;
85 struct phy_provider *phy_provider;
86
87 err = ufs_qcom_phy_base_init(pdev, common_cfg);
88 if (err) {
89 dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
90 goto out;
91 }
92
93 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
94 if (IS_ERR(phy_provider)) {
95 err = PTR_ERR(phy_provider);
96 dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
97 goto out;
98 }
99
100 generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
101 if (IS_ERR(generic_phy)) {
102 err = PTR_ERR(generic_phy);
103 dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
d89a7f69 104 generic_phy = NULL;
adaafaa3
YG
105 goto out;
106 }
107
108 common_cfg->phy_spec_ops = phy_spec_ops;
109 common_cfg->dev = dev;
110
111out:
112 return generic_phy;
113}
114
115/*
116 * This assumes the embedded phy structure inside generic_phy is of type
117 * struct ufs_qcom_phy. In order to function properly it's crucial
118 * to keep the embedded struct "struct ufs_qcom_phy common_cfg"
119 * as the first inside generic_phy.
120 */
121struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy)
122{
123 return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy);
124}
125
126static
127int ufs_qcom_phy_base_init(struct platform_device *pdev,
128 struct ufs_qcom_phy *phy_common)
129{
130 struct device *dev = &pdev->dev;
131 struct resource *res;
132 int err = 0;
133
134 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
135 if (!res) {
136 dev_err(dev, "%s: phy_mem resource not found\n", __func__);
137 err = -ENOMEM;
138 goto out;
139 }
140
141 phy_common->mmio = devm_ioremap_resource(dev, res);
142 if (IS_ERR((void const *)phy_common->mmio)) {
143 err = PTR_ERR((void const *)phy_common->mmio);
144 phy_common->mmio = NULL;
145 dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
146 __func__, err);
147 goto out;
148 }
149
150 /* "dev_ref_clk_ctrl_mem" is optional resource */
151 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
152 "dev_ref_clk_ctrl_mem");
153 if (!res) {
154 dev_dbg(dev, "%s: dev_ref_clk_ctrl_mem resource not found\n",
155 __func__);
156 goto out;
157 }
158
159 phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res);
160 if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio)) {
161 err = PTR_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio);
162 phy_common->dev_ref_clk_ctrl_mmio = NULL;
163 dev_err(dev, "%s: ioremap for dev_ref_clk_ctrl_mem resource failed %d\n",
164 __func__, err);
165 }
166
167out:
168 return err;
169}
170
171static int __ufs_qcom_phy_clk_get(struct phy *phy,
172 const char *name, struct clk **clk_out, bool err_print)
173{
174 struct clk *clk;
175 int err = 0;
176 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
177 struct device *dev = ufs_qcom_phy->dev;
178
179 clk = devm_clk_get(dev, name);
180 if (IS_ERR(clk)) {
181 err = PTR_ERR(clk);
182 if (err_print)
183 dev_err(dev, "failed to get %s err %d", name, err);
184 } else {
185 *clk_out = clk;
186 }
187
188 return err;
189}
190
191static
192int ufs_qcom_phy_clk_get(struct phy *phy,
193 const char *name, struct clk **clk_out)
194{
195 return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
196}
197
198int
199ufs_qcom_phy_init_clks(struct phy *generic_phy,
200 struct ufs_qcom_phy *phy_common)
201{
202 int err;
203
204 err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
205 &phy_common->tx_iface_clk);
206 if (err)
207 goto out;
208
209 err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
210 &phy_common->rx_iface_clk);
211 if (err)
212 goto out;
213
214 err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
215 &phy_common->ref_clk_src);
216 if (err)
217 goto out;
218
219 /*
220 * "ref_clk_parent" is optional hence don't abort init if it's not
221 * found.
222 */
223 __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
224 &phy_common->ref_clk_parent, false);
225
226 err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
227 &phy_common->ref_clk);
228
229out:
230 return err;
231}
232
233int
234ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
235 struct ufs_qcom_phy *phy_common)
236{
237 int err;
238
239 err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
240 "vdda-pll");
241 if (err)
242 goto out;
243
244 err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
245 "vdda-phy");
246
247 if (err)
248 goto out;
249
250 /* vddp-ref-clk-* properties are optional */
251 __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
252 "vddp-ref-clk", true);
253out:
254 return err;
255}
256
257static int __ufs_qcom_phy_init_vreg(struct phy *phy,
258 struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
259{
260 int err = 0;
261 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
262 struct device *dev = ufs_qcom_phy->dev;
263
264 char prop_name[MAX_PROP_NAME];
265
266 vreg->name = kstrdup(name, GFP_KERNEL);
267 if (!vreg->name) {
268 err = -ENOMEM;
269 goto out;
270 }
271
272 vreg->reg = devm_regulator_get(dev, name);
273 if (IS_ERR(vreg->reg)) {
274 err = PTR_ERR(vreg->reg);
275 vreg->reg = NULL;
276 if (!optional)
277 dev_err(dev, "failed to get %s, %d\n", name, err);
278 goto out;
279 }
280
281 if (dev->of_node) {
282 snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name);
283 err = of_property_read_u32(dev->of_node,
284 prop_name, &vreg->max_uA);
285 if (err && err != -EINVAL) {
286 dev_err(dev, "%s: failed to read %s\n",
287 __func__, prop_name);
288 goto out;
289 } else if (err == -EINVAL || !vreg->max_uA) {
290 if (regulator_count_voltages(vreg->reg) > 0) {
291 dev_err(dev, "%s: %s is mandatory\n",
292 __func__, prop_name);
293 goto out;
294 }
295 err = 0;
296 }
297 snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name);
298 if (of_get_property(dev->of_node, prop_name, NULL))
299 vreg->is_always_on = true;
300 else
301 vreg->is_always_on = false;
302 }
303
304 if (!strcmp(name, "vdda-pll")) {
305 vreg->max_uV = VDDA_PLL_MAX_UV;
306 vreg->min_uV = VDDA_PLL_MIN_UV;
307 } else if (!strcmp(name, "vdda-phy")) {
308 vreg->max_uV = VDDA_PHY_MAX_UV;
309 vreg->min_uV = VDDA_PHY_MIN_UV;
310 } else if (!strcmp(name, "vddp-ref-clk")) {
311 vreg->max_uV = VDDP_REF_CLK_MAX_UV;
312 vreg->min_uV = VDDP_REF_CLK_MIN_UV;
313 }
314
315out:
316 if (err)
317 kfree(vreg->name);
318 return err;
319}
320
321static int ufs_qcom_phy_init_vreg(struct phy *phy,
322 struct ufs_qcom_phy_vreg *vreg, const char *name)
323{
324 return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
325}
326
327static
328int ufs_qcom_phy_cfg_vreg(struct phy *phy,
329 struct ufs_qcom_phy_vreg *vreg, bool on)
330{
331 int ret = 0;
332 struct regulator *reg = vreg->reg;
333 const char *name = vreg->name;
334 int min_uV;
335 int uA_load;
336 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
337 struct device *dev = ufs_qcom_phy->dev;
338
339 BUG_ON(!vreg);
340
341 if (regulator_count_voltages(reg) > 0) {
342 min_uV = on ? vreg->min_uV : 0;
343 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
344 if (ret) {
345 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
346 __func__, name, ret);
347 goto out;
348 }
349 uA_load = on ? vreg->max_uA : 0;
350 ret = regulator_set_optimum_mode(reg, uA_load);
351 if (ret >= 0) {
352 /*
353 * regulator_set_optimum_mode() returns new regulator
354 * mode upon success.
355 */
356 ret = 0;
357 } else {
358 dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
359 __func__, name, uA_load, ret);
360 goto out;
361 }
362 }
363out:
364 return ret;
365}
366
367static
368int ufs_qcom_phy_enable_vreg(struct phy *phy,
369 struct ufs_qcom_phy_vreg *vreg)
370{
371 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
372 struct device *dev = ufs_qcom_phy->dev;
373 int ret = 0;
374
375 if (!vreg || vreg->enabled)
376 goto out;
377
378 ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
379 if (ret) {
380 dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
381 __func__, ret);
382 goto out;
383 }
384
385 ret = regulator_enable(vreg->reg);
386 if (ret) {
387 dev_err(dev, "%s: enable failed, err=%d\n",
388 __func__, ret);
389 goto out;
390 }
391
392 vreg->enabled = true;
393out:
394 return ret;
395}
396
397int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
398{
399 int ret = 0;
400 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
401
402 if (phy->is_ref_clk_enabled)
403 goto out;
404
405 /*
406 * reference clock is propagated in a daisy-chained manner from
407 * source to phy, so ungate them at each stage.
408 */
409 ret = clk_prepare_enable(phy->ref_clk_src);
410 if (ret) {
411 dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n",
412 __func__, ret);
413 goto out;
414 }
415
416 /*
417 * "ref_clk_parent" is optional clock hence make sure that clk reference
418 * is available before trying to enable the clock.
419 */
420 if (phy->ref_clk_parent) {
421 ret = clk_prepare_enable(phy->ref_clk_parent);
422 if (ret) {
423 dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n",
424 __func__, ret);
425 goto out_disable_src;
426 }
427 }
428
429 ret = clk_prepare_enable(phy->ref_clk);
430 if (ret) {
431 dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
432 __func__, ret);
433 goto out_disable_parent;
434 }
435
436 phy->is_ref_clk_enabled = true;
437 goto out;
438
439out_disable_parent:
440 if (phy->ref_clk_parent)
441 clk_disable_unprepare(phy->ref_clk_parent);
442out_disable_src:
443 clk_disable_unprepare(phy->ref_clk_src);
444out:
445 return ret;
446}
447
448static
449int ufs_qcom_phy_disable_vreg(struct phy *phy,
450 struct ufs_qcom_phy_vreg *vreg)
451{
452 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
453 struct device *dev = ufs_qcom_phy->dev;
454 int ret = 0;
455
456 if (!vreg || !vreg->enabled || vreg->is_always_on)
457 goto out;
458
459 ret = regulator_disable(vreg->reg);
460
461 if (!ret) {
462 /* ignore errors on applying disable config */
463 ufs_qcom_phy_cfg_vreg(phy, vreg, false);
464 vreg->enabled = false;
465 } else {
466 dev_err(dev, "%s: %s disable failed, err=%d\n",
467 __func__, vreg->name, ret);
468 }
469out:
470 return ret;
471}
472
473void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
474{
475 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
476
477 if (phy->is_ref_clk_enabled) {
478 clk_disable_unprepare(phy->ref_clk);
479 /*
480 * "ref_clk_parent" is optional clock hence make sure that clk
481 * reference is available before trying to disable the clock.
482 */
483 if (phy->ref_clk_parent)
484 clk_disable_unprepare(phy->ref_clk_parent);
485 clk_disable_unprepare(phy->ref_clk_src);
486 phy->is_ref_clk_enabled = false;
487 }
488}
489
490#define UFS_REF_CLK_EN (1 << 5)
491
492static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable)
493{
494 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
495
496 if (phy->dev_ref_clk_ctrl_mmio &&
497 (enable ^ phy->is_dev_ref_clk_enabled)) {
498 u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio);
499
500 if (enable)
501 temp |= UFS_REF_CLK_EN;
502 else
503 temp &= ~UFS_REF_CLK_EN;
504
505 /*
506 * If we are here to disable this clock immediately after
507 * entering into hibern8, we need to make sure that device
508 * ref_clk is active atleast 1us after the hibern8 enter.
509 */
510 if (!enable)
511 udelay(1);
512
513 writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio);
514 /* ensure that ref_clk is enabled/disabled before we return */
515 wmb();
516 /*
517 * If we call hibern8 exit after this, we need to make sure that
518 * device ref_clk is stable for atleast 1us before the hibern8
519 * exit command.
520 */
521 if (enable)
522 udelay(1);
523
524 phy->is_dev_ref_clk_enabled = enable;
525 }
526}
527
528void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
529{
530 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
531}
532
533void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
534{
535 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
536}
537
538/* Turn ON M-PHY RMMI interface clocks */
539int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
540{
541 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
542 int ret = 0;
543
544 if (phy->is_iface_clk_enabled)
545 goto out;
546
547 ret = clk_prepare_enable(phy->tx_iface_clk);
548 if (ret) {
549 dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
550 __func__, ret);
551 goto out;
552 }
553 ret = clk_prepare_enable(phy->rx_iface_clk);
554 if (ret) {
555 clk_disable_unprepare(phy->tx_iface_clk);
556 dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n",
557 __func__, ret);
558 goto out;
559 }
560 phy->is_iface_clk_enabled = true;
561
562out:
563 return ret;
564}
565
566/* Turn OFF M-PHY RMMI interface clocks */
567void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
568{
569 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
570
571 if (phy->is_iface_clk_enabled) {
572 clk_disable_unprepare(phy->tx_iface_clk);
573 clk_disable_unprepare(phy->rx_iface_clk);
574 phy->is_iface_clk_enabled = false;
575 }
576}
577
578int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
579{
580 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
581 int ret = 0;
582
583 if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
584 dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n",
585 __func__);
586 ret = -ENOTSUPP;
587 } else {
588 ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy);
589 }
590
591 return ret;
592}
593
594int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
595{
596 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
597 int ret = 0;
598
599 if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) {
600 dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n",
601 __func__);
602 ret = -ENOTSUPP;
603 } else {
604 ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
605 tx_lanes);
606 }
607
608 return ret;
609}
610
611void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
612 u8 major, u16 minor, u16 step)
613{
614 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
615
616 ufs_qcom_phy->host_ctrl_rev_major = major;
617 ufs_qcom_phy->host_ctrl_rev_minor = minor;
618 ufs_qcom_phy->host_ctrl_rev_step = step;
619}
620
621int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
622{
623 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
624 int ret = 0;
625
626 if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) {
627 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n",
628 __func__);
629 ret = -ENOTSUPP;
630 } else {
631 ret = ufs_qcom_phy->phy_spec_ops->
632 calibrate_phy(ufs_qcom_phy, is_rate_B);
633 if (ret)
634 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n",
635 __func__, ret);
636 }
637
638 return ret;
639}
640
641int ufs_qcom_phy_remove(struct phy *generic_phy,
642 struct ufs_qcom_phy *ufs_qcom_phy)
643{
644 phy_power_off(generic_phy);
645
646 kfree(ufs_qcom_phy->vdda_pll.name);
647 kfree(ufs_qcom_phy->vdda_phy.name);
648
649 return 0;
650}
651
652int ufs_qcom_phy_exit(struct phy *generic_phy)
653{
654 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
655
656 if (ufs_qcom_phy->is_powered_on)
657 phy_power_off(generic_phy);
658
659 return 0;
660}
661
662int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
663{
664 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
665
666 if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
667 dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
668 __func__);
669 return -ENOTSUPP;
670 }
671
672 return ufs_qcom_phy->phy_spec_ops->
673 is_physical_coding_sublayer_ready(ufs_qcom_phy);
674}
675
676int ufs_qcom_phy_power_on(struct phy *generic_phy)
677{
678 struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
679 struct device *dev = phy_common->dev;
680 int err;
681
682 err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
683 if (err) {
684 dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
685 __func__, err);
686 goto out;
687 }
688
689 phy_common->phy_spec_ops->power_control(phy_common, true);
690
691 /* vdda_pll also enables ref clock LDOs so enable it first */
692 err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
693 if (err) {
694 dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
695 __func__, err);
696 goto out_disable_phy;
697 }
698
699 err = ufs_qcom_phy_enable_ref_clk(generic_phy);
700 if (err) {
701 dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
702 __func__, err);
703 goto out_disable_pll;
704 }
705
706 /* enable device PHY ref_clk pad rail */
707 if (phy_common->vddp_ref_clk.reg) {
708 err = ufs_qcom_phy_enable_vreg(generic_phy,
709 &phy_common->vddp_ref_clk);
710 if (err) {
711 dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
712 __func__, err);
713 goto out_disable_ref_clk;
714 }
715 }
716
717 phy_common->is_powered_on = true;
718 goto out;
719
720out_disable_ref_clk:
721 ufs_qcom_phy_disable_ref_clk(generic_phy);
722out_disable_pll:
723 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
724out_disable_phy:
725 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
726out:
727 return err;
728}
729
730int ufs_qcom_phy_power_off(struct phy *generic_phy)
731{
732 struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
733
734 phy_common->phy_spec_ops->power_control(phy_common, false);
735
736 if (phy_common->vddp_ref_clk.reg)
737 ufs_qcom_phy_disable_vreg(generic_phy,
738 &phy_common->vddp_ref_clk);
739 ufs_qcom_phy_disable_ref_clk(generic_phy);
740
741 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
742 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
743 phy_common->is_powered_on = false;
744
745 return 0;
746}
This page took 0.05682 seconds and 5 git commands to generate.