Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.11-rc3 691 lines 17 kB view raw
1/* 2 * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 */ 14 15#include "phy-qcom-ufs-i.h" 16 17#define MAX_PROP_NAME 32 18#define VDDA_PHY_MIN_UV 1000000 19#define VDDA_PHY_MAX_UV 1000000 20#define VDDA_PLL_MIN_UV 1800000 21#define VDDA_PLL_MAX_UV 1800000 22#define VDDP_REF_CLK_MIN_UV 1200000 23#define VDDP_REF_CLK_MAX_UV 1200000 24 25int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, 26 struct ufs_qcom_phy_calibration *tbl_A, 27 int tbl_size_A, 28 struct ufs_qcom_phy_calibration *tbl_B, 29 int tbl_size_B, bool is_rate_B) 30{ 31 int i; 32 int ret = 0; 33 34 if (!tbl_A) { 35 dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__); 36 ret = EINVAL; 37 goto out; 38 } 39 40 for (i = 0; i < tbl_size_A; i++) 41 writel_relaxed(tbl_A[i].cfg_value, 42 ufs_qcom_phy->mmio + tbl_A[i].reg_offset); 43 44 /* 45 * In case we would like to work in rate B, we need 46 * to override a registers that were configured in rate A table 47 * with registers of rate B table. 48 * table. 49 */ 50 if (is_rate_B) { 51 if (!tbl_B) { 52 dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL", 53 __func__); 54 ret = EINVAL; 55 goto out; 56 } 57 58 for (i = 0; i < tbl_size_B; i++) 59 writel_relaxed(tbl_B[i].cfg_value, 60 ufs_qcom_phy->mmio + tbl_B[i].reg_offset); 61 } 62 63 /* flush buffered writes */ 64 mb(); 65 66out: 67 return ret; 68} 69EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate); 70 71/* 72 * This assumes the embedded phy structure inside generic_phy is of type 73 * struct ufs_qcom_phy. In order to function properly it's crucial 74 * to keep the embedded struct "struct ufs_qcom_phy common_cfg" 75 * as the first inside generic_phy. 76 */ 77struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy) 78{ 79 return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy); 80} 81EXPORT_SYMBOL_GPL(get_ufs_qcom_phy); 82 83static 84int ufs_qcom_phy_base_init(struct platform_device *pdev, 85 struct ufs_qcom_phy *phy_common) 86{ 87 struct device *dev = &pdev->dev; 88 struct resource *res; 89 int err = 0; 90 91 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem"); 92 phy_common->mmio = devm_ioremap_resource(dev, res); 93 if (IS_ERR((void const *)phy_common->mmio)) { 94 err = PTR_ERR((void const *)phy_common->mmio); 95 phy_common->mmio = NULL; 96 dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n", 97 __func__, err); 98 return err; 99 } 100 101 /* "dev_ref_clk_ctrl_mem" is optional resource */ 102 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 103 "dev_ref_clk_ctrl_mem"); 104 phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res); 105 if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio)) 106 phy_common->dev_ref_clk_ctrl_mmio = NULL; 107 108 return 0; 109} 110 111struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev, 112 struct ufs_qcom_phy *common_cfg, 113 const struct phy_ops *ufs_qcom_phy_gen_ops, 114 struct ufs_qcom_phy_specific_ops *phy_spec_ops) 115{ 116 int err; 117 struct device *dev = &pdev->dev; 118 struct phy *generic_phy = NULL; 119 struct phy_provider *phy_provider; 120 121 err = ufs_qcom_phy_base_init(pdev, common_cfg); 122 if (err) { 123 dev_err(dev, "%s: phy base init failed %d\n", __func__, err); 124 goto out; 125 } 126 127 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 128 if (IS_ERR(phy_provider)) { 129 err = PTR_ERR(phy_provider); 130 dev_err(dev, "%s: failed to register phy %d\n", __func__, err); 131 goto out; 132 } 133 134 generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops); 135 if (IS_ERR(generic_phy)) { 136 err = PTR_ERR(generic_phy); 137 dev_err(dev, "%s: failed to create phy %d\n", __func__, err); 138 generic_phy = NULL; 139 goto out; 140 } 141 142 common_cfg->phy_spec_ops = phy_spec_ops; 143 common_cfg->dev = dev; 144 145out: 146 return generic_phy; 147} 148EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe); 149 150static int __ufs_qcom_phy_clk_get(struct device *dev, 151 const char *name, struct clk **clk_out, bool err_print) 152{ 153 struct clk *clk; 154 int err = 0; 155 156 clk = devm_clk_get(dev, name); 157 if (IS_ERR(clk)) { 158 err = PTR_ERR(clk); 159 if (err_print) 160 dev_err(dev, "failed to get %s err %d", name, err); 161 } else { 162 *clk_out = clk; 163 } 164 165 return err; 166} 167 168static int ufs_qcom_phy_clk_get(struct device *dev, 169 const char *name, struct clk **clk_out) 170{ 171 return __ufs_qcom_phy_clk_get(dev, name, clk_out, true); 172} 173 174int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common) 175{ 176 int err; 177 178 if (of_device_is_compatible(phy_common->dev->of_node, 179 "qcom,msm8996-ufs-phy-qmp-14nm")) 180 goto skip_txrx_clk; 181 182 err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk", 183 &phy_common->tx_iface_clk); 184 if (err) 185 goto out; 186 187 err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk", 188 &phy_common->rx_iface_clk); 189 if (err) 190 goto out; 191 192skip_txrx_clk: 193 err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src", 194 &phy_common->ref_clk_src); 195 if (err) 196 goto out; 197 198 /* 199 * "ref_clk_parent" is optional hence don't abort init if it's not 200 * found. 201 */ 202 __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent", 203 &phy_common->ref_clk_parent, false); 204 205 err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk", 206 &phy_common->ref_clk); 207 208out: 209 return err; 210} 211EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks); 212 213static int ufs_qcom_phy_init_vreg(struct device *dev, 214 struct ufs_qcom_phy_vreg *vreg, 215 const char *name) 216{ 217 int err = 0; 218 219 char prop_name[MAX_PROP_NAME]; 220 221 vreg->name = name; 222 vreg->reg = devm_regulator_get(dev, name); 223 if (IS_ERR(vreg->reg)) { 224 err = PTR_ERR(vreg->reg); 225 dev_err(dev, "failed to get %s, %d\n", name, err); 226 goto out; 227 } 228 229 if (dev->of_node) { 230 snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name); 231 err = of_property_read_u32(dev->of_node, 232 prop_name, &vreg->max_uA); 233 if (err && err != -EINVAL) { 234 dev_err(dev, "%s: failed to read %s\n", 235 __func__, prop_name); 236 goto out; 237 } else if (err == -EINVAL || !vreg->max_uA) { 238 if (regulator_count_voltages(vreg->reg) > 0) { 239 dev_err(dev, "%s: %s is mandatory\n", 240 __func__, prop_name); 241 goto out; 242 } 243 err = 0; 244 } 245 } 246 247 if (!strcmp(name, "vdda-pll")) { 248 vreg->max_uV = VDDA_PLL_MAX_UV; 249 vreg->min_uV = VDDA_PLL_MIN_UV; 250 } else if (!strcmp(name, "vdda-phy")) { 251 vreg->max_uV = VDDA_PHY_MAX_UV; 252 vreg->min_uV = VDDA_PHY_MIN_UV; 253 } else if (!strcmp(name, "vddp-ref-clk")) { 254 vreg->max_uV = VDDP_REF_CLK_MAX_UV; 255 vreg->min_uV = VDDP_REF_CLK_MIN_UV; 256 } 257 258out: 259 return err; 260} 261 262int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common) 263{ 264 int err; 265 266 err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll, 267 "vdda-pll"); 268 if (err) 269 goto out; 270 271 err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy, 272 "vdda-phy"); 273 274 if (err) 275 goto out; 276 277 err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk, 278 "vddp-ref-clk"); 279 280out: 281 return err; 282} 283EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators); 284 285static int ufs_qcom_phy_cfg_vreg(struct device *dev, 286 struct ufs_qcom_phy_vreg *vreg, bool on) 287{ 288 int ret = 0; 289 struct regulator *reg = vreg->reg; 290 const char *name = vreg->name; 291 int min_uV; 292 int uA_load; 293 294 if (regulator_count_voltages(reg) > 0) { 295 min_uV = on ? vreg->min_uV : 0; 296 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); 297 if (ret) { 298 dev_err(dev, "%s: %s set voltage failed, err=%d\n", 299 __func__, name, ret); 300 goto out; 301 } 302 uA_load = on ? vreg->max_uA : 0; 303 ret = regulator_set_load(reg, uA_load); 304 if (ret >= 0) { 305 /* 306 * regulator_set_load() returns new regulator 307 * mode upon success. 308 */ 309 ret = 0; 310 } else { 311 dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n", 312 __func__, name, uA_load, ret); 313 goto out; 314 } 315 } 316out: 317 return ret; 318} 319 320static int ufs_qcom_phy_enable_vreg(struct device *dev, 321 struct ufs_qcom_phy_vreg *vreg) 322{ 323 int ret = 0; 324 325 if (!vreg || vreg->enabled) 326 goto out; 327 328 ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true); 329 if (ret) { 330 dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n", 331 __func__, ret); 332 goto out; 333 } 334 335 ret = regulator_enable(vreg->reg); 336 if (ret) { 337 dev_err(dev, "%s: enable failed, err=%d\n", 338 __func__, ret); 339 goto out; 340 } 341 342 vreg->enabled = true; 343out: 344 return ret; 345} 346 347static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy) 348{ 349 int ret = 0; 350 351 if (phy->is_ref_clk_enabled) 352 goto out; 353 354 /* 355 * reference clock is propagated in a daisy-chained manner from 356 * source to phy, so ungate them at each stage. 357 */ 358 ret = clk_prepare_enable(phy->ref_clk_src); 359 if (ret) { 360 dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n", 361 __func__, ret); 362 goto out; 363 } 364 365 /* 366 * "ref_clk_parent" is optional clock hence make sure that clk reference 367 * is available before trying to enable the clock. 368 */ 369 if (phy->ref_clk_parent) { 370 ret = clk_prepare_enable(phy->ref_clk_parent); 371 if (ret) { 372 dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n", 373 __func__, ret); 374 goto out_disable_src; 375 } 376 } 377 378 ret = clk_prepare_enable(phy->ref_clk); 379 if (ret) { 380 dev_err(phy->dev, "%s: ref_clk enable failed %d\n", 381 __func__, ret); 382 goto out_disable_parent; 383 } 384 385 phy->is_ref_clk_enabled = true; 386 goto out; 387 388out_disable_parent: 389 if (phy->ref_clk_parent) 390 clk_disable_unprepare(phy->ref_clk_parent); 391out_disable_src: 392 clk_disable_unprepare(phy->ref_clk_src); 393out: 394 return ret; 395} 396 397static int ufs_qcom_phy_disable_vreg(struct device *dev, 398 struct ufs_qcom_phy_vreg *vreg) 399{ 400 int ret = 0; 401 402 if (!vreg || !vreg->enabled) 403 goto out; 404 405 ret = regulator_disable(vreg->reg); 406 407 if (!ret) { 408 /* ignore errors on applying disable config */ 409 ufs_qcom_phy_cfg_vreg(dev, vreg, false); 410 vreg->enabled = false; 411 } else { 412 dev_err(dev, "%s: %s disable failed, err=%d\n", 413 __func__, vreg->name, ret); 414 } 415out: 416 return ret; 417} 418 419static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy) 420{ 421 if (phy->is_ref_clk_enabled) { 422 clk_disable_unprepare(phy->ref_clk); 423 /* 424 * "ref_clk_parent" is optional clock hence make sure that clk 425 * reference is available before trying to disable the clock. 426 */ 427 if (phy->ref_clk_parent) 428 clk_disable_unprepare(phy->ref_clk_parent); 429 clk_disable_unprepare(phy->ref_clk_src); 430 phy->is_ref_clk_enabled = false; 431 } 432} 433 434#define UFS_REF_CLK_EN (1 << 5) 435 436static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable) 437{ 438 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); 439 440 if (phy->dev_ref_clk_ctrl_mmio && 441 (enable ^ phy->is_dev_ref_clk_enabled)) { 442 u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio); 443 444 if (enable) 445 temp |= UFS_REF_CLK_EN; 446 else 447 temp &= ~UFS_REF_CLK_EN; 448 449 /* 450 * If we are here to disable this clock immediately after 451 * entering into hibern8, we need to make sure that device 452 * ref_clk is active atleast 1us after the hibern8 enter. 453 */ 454 if (!enable) 455 udelay(1); 456 457 writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio); 458 /* ensure that ref_clk is enabled/disabled before we return */ 459 wmb(); 460 /* 461 * If we call hibern8 exit after this, we need to make sure that 462 * device ref_clk is stable for atleast 1us before the hibern8 463 * exit command. 464 */ 465 if (enable) 466 udelay(1); 467 468 phy->is_dev_ref_clk_enabled = enable; 469 } 470} 471 472void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy) 473{ 474 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); 475} 476EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk); 477 478void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) 479{ 480 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); 481} 482EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk); 483 484/* Turn ON M-PHY RMMI interface clocks */ 485static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy) 486{ 487 int ret = 0; 488 489 if (phy->is_iface_clk_enabled) 490 goto out; 491 492 ret = clk_prepare_enable(phy->tx_iface_clk); 493 if (ret) { 494 dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n", 495 __func__, ret); 496 goto out; 497 } 498 ret = clk_prepare_enable(phy->rx_iface_clk); 499 if (ret) { 500 clk_disable_unprepare(phy->tx_iface_clk); 501 dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n", 502 __func__, ret); 503 goto out; 504 } 505 phy->is_iface_clk_enabled = true; 506 507out: 508 return ret; 509} 510 511/* Turn OFF M-PHY RMMI interface clocks */ 512void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy) 513{ 514 if (phy->is_iface_clk_enabled) { 515 clk_disable_unprepare(phy->tx_iface_clk); 516 clk_disable_unprepare(phy->rx_iface_clk); 517 phy->is_iface_clk_enabled = false; 518 } 519} 520 521int ufs_qcom_phy_start_serdes(struct phy *generic_phy) 522{ 523 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); 524 int ret = 0; 525 526 if (!ufs_qcom_phy->phy_spec_ops->start_serdes) { 527 dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n", 528 __func__); 529 ret = -ENOTSUPP; 530 } else { 531 ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy); 532 } 533 534 return ret; 535} 536EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes); 537 538int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) 539{ 540 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); 541 int ret = 0; 542 543 if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) { 544 dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n", 545 __func__); 546 ret = -ENOTSUPP; 547 } else { 548 ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy, 549 tx_lanes); 550 } 551 552 return ret; 553} 554EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable); 555 556void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, 557 u8 major, u16 minor, u16 step) 558{ 559 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); 560 561 ufs_qcom_phy->host_ctrl_rev_major = major; 562 ufs_qcom_phy->host_ctrl_rev_minor = minor; 563 ufs_qcom_phy->host_ctrl_rev_step = step; 564} 565EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version); 566 567int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) 568{ 569 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); 570 int ret = 0; 571 572 if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) { 573 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n", 574 __func__); 575 ret = -ENOTSUPP; 576 } else { 577 ret = ufs_qcom_phy->phy_spec_ops-> 578 calibrate_phy(ufs_qcom_phy, is_rate_B); 579 if (ret) 580 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n", 581 __func__, ret); 582 } 583 584 return ret; 585} 586EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy); 587 588int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy) 589{ 590 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); 591 592 if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) { 593 dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n", 594 __func__); 595 return -ENOTSUPP; 596 } 597 598 return ufs_qcom_phy->phy_spec_ops-> 599 is_physical_coding_sublayer_ready(ufs_qcom_phy); 600} 601EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready); 602 603int ufs_qcom_phy_power_on(struct phy *generic_phy) 604{ 605 struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); 606 struct device *dev = phy_common->dev; 607 int err; 608 609 if (phy_common->is_powered_on) 610 return 0; 611 612 err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy); 613 if (err) { 614 dev_err(dev, "%s enable vdda_phy failed, err=%d\n", 615 __func__, err); 616 goto out; 617 } 618 619 phy_common->phy_spec_ops->power_control(phy_common, true); 620 621 /* vdda_pll also enables ref clock LDOs so enable it first */ 622 err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll); 623 if (err) { 624 dev_err(dev, "%s enable vdda_pll failed, err=%d\n", 625 __func__, err); 626 goto out_disable_phy; 627 } 628 629 err = ufs_qcom_phy_enable_iface_clk(phy_common); 630 if (err) { 631 dev_err(dev, "%s enable phy iface clock failed, err=%d\n", 632 __func__, err); 633 goto out_disable_pll; 634 } 635 636 err = ufs_qcom_phy_enable_ref_clk(phy_common); 637 if (err) { 638 dev_err(dev, "%s enable phy ref clock failed, err=%d\n", 639 __func__, err); 640 goto out_disable_iface_clk; 641 } 642 643 /* enable device PHY ref_clk pad rail */ 644 if (phy_common->vddp_ref_clk.reg) { 645 err = ufs_qcom_phy_enable_vreg(dev, 646 &phy_common->vddp_ref_clk); 647 if (err) { 648 dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n", 649 __func__, err); 650 goto out_disable_ref_clk; 651 } 652 } 653 654 phy_common->is_powered_on = true; 655 goto out; 656 657out_disable_ref_clk: 658 ufs_qcom_phy_disable_ref_clk(phy_common); 659out_disable_iface_clk: 660 ufs_qcom_phy_disable_iface_clk(phy_common); 661out_disable_pll: 662 ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll); 663out_disable_phy: 664 ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy); 665out: 666 return err; 667} 668EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on); 669 670int ufs_qcom_phy_power_off(struct phy *generic_phy) 671{ 672 struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); 673 674 if (!phy_common->is_powered_on) 675 return 0; 676 677 phy_common->phy_spec_ops->power_control(phy_common, false); 678 679 if (phy_common->vddp_ref_clk.reg) 680 ufs_qcom_phy_disable_vreg(phy_common->dev, 681 &phy_common->vddp_ref_clk); 682 ufs_qcom_phy_disable_ref_clk(phy_common); 683 ufs_qcom_phy_disable_iface_clk(phy_common); 684 685 ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll); 686 ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy); 687 phy_common->is_powered_on = false; 688 689 return 0; 690} 691EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);