Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.10-rc5 708 lines 18 kB view raw
1/* 2 * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 */ 14 15#include "phy-qcom-ufs-i.h" 16 17#define MAX_PROP_NAME 32 18#define VDDA_PHY_MIN_UV 1000000 19#define VDDA_PHY_MAX_UV 1000000 20#define VDDA_PLL_MIN_UV 1800000 21#define VDDA_PLL_MAX_UV 1800000 22#define VDDP_REF_CLK_MIN_UV 1200000 23#define VDDP_REF_CLK_MAX_UV 1200000 24 25int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, 26 struct ufs_qcom_phy_calibration *tbl_A, 27 int tbl_size_A, 28 struct ufs_qcom_phy_calibration *tbl_B, 29 int tbl_size_B, bool is_rate_B) 30{ 31 int i; 32 int ret = 0; 33 34 if (!tbl_A) { 35 dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__); 36 ret = EINVAL; 37 goto out; 38 } 39 40 for (i = 0; i < tbl_size_A; i++) 41 writel_relaxed(tbl_A[i].cfg_value, 42 ufs_qcom_phy->mmio + tbl_A[i].reg_offset); 43 44 /* 45 * In case we would like to work in rate B, we need 46 * to override a registers that were configured in rate A table 47 * with registers of rate B table. 48 * table. 49 */ 50 if (is_rate_B) { 51 if (!tbl_B) { 52 dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL", 53 __func__); 54 ret = EINVAL; 55 goto out; 56 } 57 58 for (i = 0; i < tbl_size_B; i++) 59 writel_relaxed(tbl_B[i].cfg_value, 60 ufs_qcom_phy->mmio + tbl_B[i].reg_offset); 61 } 62 63 /* flush buffered writes */ 64 mb(); 65 66out: 67 return ret; 68} 69EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate); 70 71/* 72 * This assumes the embedded phy structure inside generic_phy is of type 73 * struct ufs_qcom_phy. In order to function properly it's crucial 74 * to keep the embedded struct "struct ufs_qcom_phy common_cfg" 75 * as the first inside generic_phy. 76 */ 77struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy) 78{ 79 return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy); 80} 81EXPORT_SYMBOL_GPL(get_ufs_qcom_phy); 82 83static 84int ufs_qcom_phy_base_init(struct platform_device *pdev, 85 struct ufs_qcom_phy *phy_common) 86{ 87 struct device *dev = &pdev->dev; 88 struct resource *res; 89 int err = 0; 90 91 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem"); 92 phy_common->mmio = devm_ioremap_resource(dev, res); 93 if (IS_ERR((void const *)phy_common->mmio)) { 94 err = PTR_ERR((void const *)phy_common->mmio); 95 phy_common->mmio = NULL; 96 dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n", 97 __func__, err); 98 return err; 99 } 100 101 /* "dev_ref_clk_ctrl_mem" is optional resource */ 102 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 103 "dev_ref_clk_ctrl_mem"); 104 phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res); 105 if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio)) 106 phy_common->dev_ref_clk_ctrl_mmio = NULL; 107 108 return 0; 109} 110 111struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev, 112 struct ufs_qcom_phy *common_cfg, 113 const struct phy_ops *ufs_qcom_phy_gen_ops, 114 struct ufs_qcom_phy_specific_ops *phy_spec_ops) 115{ 116 int err; 117 struct device *dev = &pdev->dev; 118 struct phy *generic_phy = NULL; 119 struct phy_provider *phy_provider; 120 121 err = ufs_qcom_phy_base_init(pdev, common_cfg); 122 if (err) { 123 dev_err(dev, "%s: phy base init failed %d\n", __func__, err); 124 goto out; 125 } 126 127 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 128 if (IS_ERR(phy_provider)) { 129 err = PTR_ERR(phy_provider); 130 dev_err(dev, "%s: failed to register phy %d\n", __func__, err); 131 goto out; 132 } 133 134 generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops); 135 if (IS_ERR(generic_phy)) { 136 err = PTR_ERR(generic_phy); 137 dev_err(dev, "%s: failed to create phy %d\n", __func__, err); 138 generic_phy = NULL; 139 goto out; 140 } 141 142 common_cfg->phy_spec_ops = phy_spec_ops; 143 common_cfg->dev = dev; 144 145out: 146 return generic_phy; 147} 148EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe); 149 150static int __ufs_qcom_phy_clk_get(struct device *dev, 151 const char *name, struct clk **clk_out, bool err_print) 152{ 153 struct clk *clk; 154 int err = 0; 155 156 clk = devm_clk_get(dev, name); 157 if (IS_ERR(clk)) { 158 err = PTR_ERR(clk); 159 if (err_print) 160 dev_err(dev, "failed to get %s err %d", name, err); 161 } else { 162 *clk_out = clk; 163 } 164 165 return err; 166} 167 168static int ufs_qcom_phy_clk_get(struct device *dev, 169 const char *name, struct clk **clk_out) 170{ 171 return __ufs_qcom_phy_clk_get(dev, name, clk_out, true); 172} 173 174int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common) 175{ 176 int err; 177 178 if (of_device_is_compatible(phy_common->dev->of_node, 179 "qcom,msm8996-ufs-phy-qmp-14nm")) 180 goto skip_txrx_clk; 181 182 err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk", 183 &phy_common->tx_iface_clk); 184 if (err) 185 goto out; 186 187 err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk", 188 &phy_common->rx_iface_clk); 189 if (err) 190 goto out; 191 192 err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src", 193 &phy_common->ref_clk_src); 194 if (err) 195 goto out; 196 197skip_txrx_clk: 198 /* 199 * "ref_clk_parent" is optional hence don't abort init if it's not 200 * found. 201 */ 202 __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent", 203 &phy_common->ref_clk_parent, false); 204 205 err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk", 206 &phy_common->ref_clk); 207 208out: 209 return err; 210} 211EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks); 212 213static int __ufs_qcom_phy_init_vreg(struct device *dev, 214 struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional) 215{ 216 int err = 0; 217 218 char prop_name[MAX_PROP_NAME]; 219 220 vreg->name = devm_kstrdup(dev, name, GFP_KERNEL); 221 if (!vreg->name) { 222 err = -ENOMEM; 223 goto out; 224 } 225 226 vreg->reg = devm_regulator_get(dev, name); 227 if (IS_ERR(vreg->reg)) { 228 err = PTR_ERR(vreg->reg); 229 vreg->reg = NULL; 230 if (!optional) 231 dev_err(dev, "failed to get %s, %d\n", name, err); 232 goto out; 233 } 234 235 if (dev->of_node) { 236 snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name); 237 err = of_property_read_u32(dev->of_node, 238 prop_name, &vreg->max_uA); 239 if (err && err != -EINVAL) { 240 dev_err(dev, "%s: failed to read %s\n", 241 __func__, prop_name); 242 goto out; 243 } else if (err == -EINVAL || !vreg->max_uA) { 244 if (regulator_count_voltages(vreg->reg) > 0) { 245 dev_err(dev, "%s: %s is mandatory\n", 246 __func__, prop_name); 247 goto out; 248 } 249 err = 0; 250 } 251 snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name); 252 vreg->is_always_on = of_property_read_bool(dev->of_node, 253 prop_name); 254 } 255 256 if (!strcmp(name, "vdda-pll")) { 257 vreg->max_uV = VDDA_PLL_MAX_UV; 258 vreg->min_uV = VDDA_PLL_MIN_UV; 259 } else if (!strcmp(name, "vdda-phy")) { 260 vreg->max_uV = VDDA_PHY_MAX_UV; 261 vreg->min_uV = VDDA_PHY_MIN_UV; 262 } else if (!strcmp(name, "vddp-ref-clk")) { 263 vreg->max_uV = VDDP_REF_CLK_MAX_UV; 264 vreg->min_uV = VDDP_REF_CLK_MIN_UV; 265 } 266 267out: 268 if (err) 269 kfree(vreg->name); 270 return err; 271} 272 273static int ufs_qcom_phy_init_vreg(struct device *dev, 274 struct ufs_qcom_phy_vreg *vreg, const char *name) 275{ 276 return __ufs_qcom_phy_init_vreg(dev, vreg, name, false); 277} 278 279int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common) 280{ 281 int err; 282 283 err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll, 284 "vdda-pll"); 285 if (err) 286 goto out; 287 288 err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy, 289 "vdda-phy"); 290 291 if (err) 292 goto out; 293 294 /* vddp-ref-clk-* properties are optional */ 295 __ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk, 296 "vddp-ref-clk", true); 297out: 298 return err; 299} 300EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators); 301 302static int ufs_qcom_phy_cfg_vreg(struct device *dev, 303 struct ufs_qcom_phy_vreg *vreg, bool on) 304{ 305 int ret = 0; 306 struct regulator *reg = vreg->reg; 307 const char *name = vreg->name; 308 int min_uV; 309 int uA_load; 310 311 if (regulator_count_voltages(reg) > 0) { 312 min_uV = on ? vreg->min_uV : 0; 313 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); 314 if (ret) { 315 dev_err(dev, "%s: %s set voltage failed, err=%d\n", 316 __func__, name, ret); 317 goto out; 318 } 319 uA_load = on ? vreg->max_uA : 0; 320 ret = regulator_set_load(reg, uA_load); 321 if (ret >= 0) { 322 /* 323 * regulator_set_load() returns new regulator 324 * mode upon success. 325 */ 326 ret = 0; 327 } else { 328 dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n", 329 __func__, name, uA_load, ret); 330 goto out; 331 } 332 } 333out: 334 return ret; 335} 336 337static int ufs_qcom_phy_enable_vreg(struct device *dev, 338 struct ufs_qcom_phy_vreg *vreg) 339{ 340 int ret = 0; 341 342 if (!vreg || vreg->enabled) 343 goto out; 344 345 ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true); 346 if (ret) { 347 dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n", 348 __func__, ret); 349 goto out; 350 } 351 352 ret = regulator_enable(vreg->reg); 353 if (ret) { 354 dev_err(dev, "%s: enable failed, err=%d\n", 355 __func__, ret); 356 goto out; 357 } 358 359 vreg->enabled = true; 360out: 361 return ret; 362} 363 364static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy) 365{ 366 int ret = 0; 367 368 if (phy->is_ref_clk_enabled) 369 goto out; 370 371 /* 372 * reference clock is propagated in a daisy-chained manner from 373 * source to phy, so ungate them at each stage. 374 */ 375 ret = clk_prepare_enable(phy->ref_clk_src); 376 if (ret) { 377 dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n", 378 __func__, ret); 379 goto out; 380 } 381 382 /* 383 * "ref_clk_parent" is optional clock hence make sure that clk reference 384 * is available before trying to enable the clock. 385 */ 386 if (phy->ref_clk_parent) { 387 ret = clk_prepare_enable(phy->ref_clk_parent); 388 if (ret) { 389 dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n", 390 __func__, ret); 391 goto out_disable_src; 392 } 393 } 394 395 ret = clk_prepare_enable(phy->ref_clk); 396 if (ret) { 397 dev_err(phy->dev, "%s: ref_clk enable failed %d\n", 398 __func__, ret); 399 goto out_disable_parent; 400 } 401 402 phy->is_ref_clk_enabled = true; 403 goto out; 404 405out_disable_parent: 406 if (phy->ref_clk_parent) 407 clk_disable_unprepare(phy->ref_clk_parent); 408out_disable_src: 409 clk_disable_unprepare(phy->ref_clk_src); 410out: 411 return ret; 412} 413 414static int ufs_qcom_phy_disable_vreg(struct device *dev, 415 struct ufs_qcom_phy_vreg *vreg) 416{ 417 int ret = 0; 418 419 if (!vreg || !vreg->enabled || vreg->is_always_on) 420 goto out; 421 422 ret = regulator_disable(vreg->reg); 423 424 if (!ret) { 425 /* ignore errors on applying disable config */ 426 ufs_qcom_phy_cfg_vreg(dev, vreg, false); 427 vreg->enabled = false; 428 } else { 429 dev_err(dev, "%s: %s disable failed, err=%d\n", 430 __func__, vreg->name, ret); 431 } 432out: 433 return ret; 434} 435 436static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy) 437{ 438 if (phy->is_ref_clk_enabled) { 439 clk_disable_unprepare(phy->ref_clk); 440 /* 441 * "ref_clk_parent" is optional clock hence make sure that clk 442 * reference is available before trying to disable the clock. 443 */ 444 if (phy->ref_clk_parent) 445 clk_disable_unprepare(phy->ref_clk_parent); 446 clk_disable_unprepare(phy->ref_clk_src); 447 phy->is_ref_clk_enabled = false; 448 } 449} 450 451#define UFS_REF_CLK_EN (1 << 5) 452 453static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable) 454{ 455 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); 456 457 if (phy->dev_ref_clk_ctrl_mmio && 458 (enable ^ phy->is_dev_ref_clk_enabled)) { 459 u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio); 460 461 if (enable) 462 temp |= UFS_REF_CLK_EN; 463 else 464 temp &= ~UFS_REF_CLK_EN; 465 466 /* 467 * If we are here to disable this clock immediately after 468 * entering into hibern8, we need to make sure that device 469 * ref_clk is active atleast 1us after the hibern8 enter. 470 */ 471 if (!enable) 472 udelay(1); 473 474 writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio); 475 /* ensure that ref_clk is enabled/disabled before we return */ 476 wmb(); 477 /* 478 * If we call hibern8 exit after this, we need to make sure that 479 * device ref_clk is stable for atleast 1us before the hibern8 480 * exit command. 481 */ 482 if (enable) 483 udelay(1); 484 485 phy->is_dev_ref_clk_enabled = enable; 486 } 487} 488 489void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy) 490{ 491 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); 492} 493EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk); 494 495void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) 496{ 497 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); 498} 499EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk); 500 501/* Turn ON M-PHY RMMI interface clocks */ 502static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy) 503{ 504 int ret = 0; 505 506 if (phy->is_iface_clk_enabled) 507 goto out; 508 509 ret = clk_prepare_enable(phy->tx_iface_clk); 510 if (ret) { 511 dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n", 512 __func__, ret); 513 goto out; 514 } 515 ret = clk_prepare_enable(phy->rx_iface_clk); 516 if (ret) { 517 clk_disable_unprepare(phy->tx_iface_clk); 518 dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n", 519 __func__, ret); 520 goto out; 521 } 522 phy->is_iface_clk_enabled = true; 523 524out: 525 return ret; 526} 527 528/* Turn OFF M-PHY RMMI interface clocks */ 529void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy) 530{ 531 if (phy->is_iface_clk_enabled) { 532 clk_disable_unprepare(phy->tx_iface_clk); 533 clk_disable_unprepare(phy->rx_iface_clk); 534 phy->is_iface_clk_enabled = false; 535 } 536} 537 538int ufs_qcom_phy_start_serdes(struct phy *generic_phy) 539{ 540 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); 541 int ret = 0; 542 543 if (!ufs_qcom_phy->phy_spec_ops->start_serdes) { 544 dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n", 545 __func__); 546 ret = -ENOTSUPP; 547 } else { 548 ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy); 549 } 550 551 return ret; 552} 553EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes); 554 555int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) 556{ 557 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); 558 int ret = 0; 559 560 if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) { 561 dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n", 562 __func__); 563 ret = -ENOTSUPP; 564 } else { 565 ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy, 566 tx_lanes); 567 } 568 569 return ret; 570} 571EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable); 572 573void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, 574 u8 major, u16 minor, u16 step) 575{ 576 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); 577 578 ufs_qcom_phy->host_ctrl_rev_major = major; 579 ufs_qcom_phy->host_ctrl_rev_minor = minor; 580 ufs_qcom_phy->host_ctrl_rev_step = step; 581} 582EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version); 583 584int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) 585{ 586 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); 587 int ret = 0; 588 589 if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) { 590 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n", 591 __func__); 592 ret = -ENOTSUPP; 593 } else { 594 ret = ufs_qcom_phy->phy_spec_ops-> 595 calibrate_phy(ufs_qcom_phy, is_rate_B); 596 if (ret) 597 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n", 598 __func__, ret); 599 } 600 601 return ret; 602} 603EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy); 604 605int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy) 606{ 607 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); 608 609 if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) { 610 dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n", 611 __func__); 612 return -ENOTSUPP; 613 } 614 615 return ufs_qcom_phy->phy_spec_ops-> 616 is_physical_coding_sublayer_ready(ufs_qcom_phy); 617} 618EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready); 619 620int ufs_qcom_phy_power_on(struct phy *generic_phy) 621{ 622 struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); 623 struct device *dev = phy_common->dev; 624 int err; 625 626 if (phy_common->is_powered_on) 627 return 0; 628 629 err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy); 630 if (err) { 631 dev_err(dev, "%s enable vdda_phy failed, err=%d\n", 632 __func__, err); 633 goto out; 634 } 635 636 phy_common->phy_spec_ops->power_control(phy_common, true); 637 638 /* vdda_pll also enables ref clock LDOs so enable it first */ 639 err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll); 640 if (err) { 641 dev_err(dev, "%s enable vdda_pll failed, err=%d\n", 642 __func__, err); 643 goto out_disable_phy; 644 } 645 646 err = ufs_qcom_phy_enable_iface_clk(phy_common); 647 if (err) { 648 dev_err(dev, "%s enable phy iface clock failed, err=%d\n", 649 __func__, err); 650 goto out_disable_pll; 651 } 652 653 err = ufs_qcom_phy_enable_ref_clk(phy_common); 654 if (err) { 655 dev_err(dev, "%s enable phy ref clock failed, err=%d\n", 656 __func__, err); 657 goto out_disable_iface_clk; 658 } 659 660 /* enable device PHY ref_clk pad rail */ 661 if (phy_common->vddp_ref_clk.reg) { 662 err = ufs_qcom_phy_enable_vreg(dev, 663 &phy_common->vddp_ref_clk); 664 if (err) { 665 dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n", 666 __func__, err); 667 goto out_disable_ref_clk; 668 } 669 } 670 671 phy_common->is_powered_on = true; 672 goto out; 673 674out_disable_ref_clk: 675 ufs_qcom_phy_disable_ref_clk(phy_common); 676out_disable_iface_clk: 677 ufs_qcom_phy_disable_iface_clk(phy_common); 678out_disable_pll: 679 ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll); 680out_disable_phy: 681 ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy); 682out: 683 return err; 684} 685EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on); 686 687int ufs_qcom_phy_power_off(struct phy *generic_phy) 688{ 689 struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); 690 691 if (!phy_common->is_powered_on) 692 return 0; 693 694 phy_common->phy_spec_ops->power_control(phy_common, false); 695 696 if (phy_common->vddp_ref_clk.reg) 697 ufs_qcom_phy_disable_vreg(phy_common->dev, 698 &phy_common->vddp_ref_clk); 699 ufs_qcom_phy_disable_ref_clk(phy_common); 700 ufs_qcom_phy_disable_iface_clk(phy_common); 701 702 ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll); 703 ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy); 704 phy_common->is_powered_on = false; 705 706 return 0; 707} 708EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);