Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: ufs: core: Enable power management for wlun

During runtime-suspend of ufs host, the SCSI devices are already suspended
and so are the queues associated with them. However, the ufs host sends SSU
(START_STOP_UNIT) to the wlun during runtime-suspend.

During the process blk_queue_enter() checks if the queue is not in suspended
state. If so, it waits for the queue to resume, and never comes out of
it. Commit 52abca64fd94 ("scsi: block: Do not accept any requests while
suspended") adds the check to see if the queue is in suspended state in
blk_queue_enter().

Call trace:
__switch_to+0x174/0x2c4
__schedule+0x478/0x764
schedule+0x9c/0xe0
blk_queue_enter+0x158/0x228
blk_mq_alloc_request+0x40/0xa4
blk_get_request+0x2c/0x70
__scsi_execute+0x60/0x1c4
ufshcd_set_dev_pwr_mode+0x124/0x1e4
ufshcd_suspend+0x208/0x83c
ufshcd_runtime_suspend+0x40/0x154
ufshcd_pltfrm_runtime_suspend+0x14/0x20
pm_generic_runtime_suspend+0x28/0x3c
__rpm_callback+0x80/0x2a4
rpm_suspend+0x308/0x614
rpm_idle+0x158/0x228
pm_runtime_work+0x84/0xac
process_one_work+0x1f0/0x470
worker_thread+0x26c/0x4c8
kthread+0x13c/0x320
ret_from_fork+0x10/0x18

Fix this by registering ufs device wlun as a SCSI driver and registering it
for block runtime-pm. Also make this a supplier for all other LUNs. This
way the wlun device suspends after all the consumers and resumes after HBA
resumes. This also registers a new SCSI driver for rpmb wlun. This new
driver is mostly used to clear rpmb uac.

[mkp: resolve merge conflict with 5.13-rc1 and fix doc warning]

Fixed smatch warnings:
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>

Link: https://lore.kernel.org/r/4662c462e79e3e7f541f54f88f8993f421026d83.1619223249.git.asutoshd@codeaurora.org
Reviewed-by: Adrian Hunter <adrian.hunter@intel.com>
Co-developed-by: Can Guo <cang@codeaurora.org>
Signed-off-by: Can Guo <cang@codeaurora.org>
Signed-off-by: Asutosh Das <asutoshd@codeaurora.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Asutosh Das and committed by
Martin K. Petersen
b294ff3e ed26297d

+573 -255
+2
drivers/scsi/ufs/cdns-pltfrm.c
··· 323 323 .runtime_suspend = ufshcd_pltfrm_runtime_suspend, 324 324 .runtime_resume = ufshcd_pltfrm_runtime_resume, 325 325 .runtime_idle = ufshcd_pltfrm_runtime_idle, 326 + .prepare = ufshcd_suspend_prepare, 327 + .complete = ufshcd_resume_complete, 326 328 }; 327 329 328 330 static struct platform_driver cdns_ufs_pltfrm_driver = {
+2
drivers/scsi/ufs/tc-dwc-g210-pci.c
··· 148 148 .runtime_suspend = tc_dwc_g210_pci_runtime_suspend, 149 149 .runtime_resume = tc_dwc_g210_pci_runtime_resume, 150 150 .runtime_idle = tc_dwc_g210_pci_runtime_idle, 151 + .prepare = ufshcd_suspend_prepare, 152 + .complete = ufshcd_resume_complete, 151 153 }; 152 154 153 155 static const struct pci_device_id tc_dwc_g210_pci_tbl[] = {
+3 -3
drivers/scsi/ufs/ufs-debugfs.c
··· 13 13 ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL); 14 14 } 15 15 16 - void __exit ufs_debugfs_exit(void) 16 + void ufs_debugfs_exit(void) 17 17 { 18 18 debugfs_remove_recursive(ufs_debugfs_root); 19 19 } ··· 60 60 up(&hba->host_sem); 61 61 return -EBUSY; 62 62 } 63 - pm_runtime_get_sync(hba->dev); 63 + ufshcd_rpm_get_sync(hba); 64 64 return 0; 65 65 } 66 66 67 67 static void ufs_debugfs_put_user_access(struct ufs_hba *hba) 68 68 __releases(&hba->host_sem) 69 69 { 70 - pm_runtime_put_sync(hba->dev); 70 + ufshcd_rpm_put_sync(hba); 71 71 up(&hba->host_sem); 72 72 } 73 73
+1 -1
drivers/scsi/ufs/ufs-debugfs.h
··· 9 9 10 10 #ifdef CONFIG_DEBUG_FS 11 11 void __init ufs_debugfs_init(void); 12 - void __exit ufs_debugfs_exit(void); 12 + void ufs_debugfs_exit(void); 13 13 void ufs_debugfs_hba_init(struct ufs_hba *hba); 14 14 void ufs_debugfs_hba_exit(struct ufs_hba *hba); 15 15 void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status);
+2
drivers/scsi/ufs/ufs-exynos.c
··· 1267 1267 .runtime_suspend = ufshcd_pltfrm_runtime_suspend, 1268 1268 .runtime_resume = ufshcd_pltfrm_runtime_resume, 1269 1269 .runtime_idle = ufshcd_pltfrm_runtime_idle, 1270 + .prepare = ufshcd_suspend_prepare, 1271 + .complete = ufshcd_resume_complete, 1270 1272 }; 1271 1273 1272 1274 static struct platform_driver exynos_ufs_pltform = {
+2
drivers/scsi/ufs/ufs-hisi.c
··· 574 574 .runtime_suspend = ufshcd_pltfrm_runtime_suspend, 575 575 .runtime_resume = ufshcd_pltfrm_runtime_resume, 576 576 .runtime_idle = ufshcd_pltfrm_runtime_idle, 577 + .prepare = ufshcd_suspend_prepare, 578 + .complete = ufshcd_resume_complete, 577 579 }; 578 580 579 581 static struct platform_driver ufs_hisi_pltform = {
+6 -6
drivers/scsi/ufs/ufs-mediatek.c
··· 810 810 /* enable unipro clock gating feature */ 811 811 ufs_mtk_cfg_unipro_cg(hba, true); 812 812 813 - /* configure auto-hibern8 timer to 10ms */ 814 - if (ufshcd_is_auto_hibern8_supported(hba)) { 815 - ufshcd_auto_hibern8_update(hba, 816 - FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | 817 - FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3)); 818 - } 813 + /* will be configured during probe hba */ 814 + if (ufshcd_is_auto_hibern8_supported(hba)) 815 + hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | 816 + FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); 819 817 820 818 ufs_mtk_setup_clk_gating(hba); 821 819 ··· 1095 1097 .runtime_suspend = ufshcd_pltfrm_runtime_suspend, 1096 1098 .runtime_resume = ufshcd_pltfrm_runtime_resume, 1097 1099 .runtime_idle = ufshcd_pltfrm_runtime_idle, 1100 + .prepare = ufshcd_suspend_prepare, 1101 + .complete = ufshcd_resume_complete, 1098 1102 }; 1099 1103 1100 1104 static struct platform_driver ufs_mtk_pltform = {
+2
drivers/scsi/ufs/ufs-qcom.c
··· 1551 1551 .runtime_suspend = ufshcd_pltfrm_runtime_suspend, 1552 1552 .runtime_resume = ufshcd_pltfrm_runtime_resume, 1553 1553 .runtime_idle = ufshcd_pltfrm_runtime_idle, 1554 + .prepare = ufshcd_suspend_prepare, 1555 + .complete = ufshcd_resume_complete, 1554 1556 }; 1555 1557 1556 1558 static struct platform_driver ufs_qcom_pltform = {
+3 -3
drivers/scsi/ufs/ufs_bsg.c
··· 97 97 98 98 bsg_reply->reply_payload_rcv_len = 0; 99 99 100 - pm_runtime_get_sync(hba->dev); 100 + ufshcd_rpm_get_sync(hba); 101 101 102 102 msgcode = bsg_request->msgcode; 103 103 switch (msgcode) { ··· 106 106 ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff, 107 107 &desc_len, desc_op); 108 108 if (ret) { 109 - pm_runtime_put_sync(hba->dev); 109 + ufshcd_rpm_put_sync(hba); 110 110 goto out; 111 111 } 112 112 ··· 138 138 break; 139 139 } 140 140 141 - pm_runtime_put_sync(hba->dev); 141 + ufshcd_rpm_put_sync(hba); 142 142 143 143 if (!desc_buff) 144 144 goto out;
+5 -31
drivers/scsi/ufs/ufshcd-pci.c
··· 410 410 return ufshcd_system_resume(dev_get_drvdata(dev)); 411 411 } 412 412 413 - /** 414 - * ufshcd_pci_poweroff - suspend-to-disk poweroff function 415 - * @dev: pointer to PCI device handle 416 - * 417 - * Returns 0 if successful 418 - * Returns non-zero otherwise 419 - */ 420 - static int ufshcd_pci_poweroff(struct device *dev) 421 - { 422 - struct ufs_hba *hba = dev_get_drvdata(dev); 423 - int spm_lvl = hba->spm_lvl; 424 - int ret; 425 - 426 - /* 427 - * For poweroff we need to set the UFS device to PowerDown mode. 428 - * Force spm_lvl to ensure that. 429 - */ 430 - hba->spm_lvl = 5; 431 - ret = ufshcd_system_suspend(hba); 432 - hba->spm_lvl = spm_lvl; 433 - return ret; 434 - } 435 - 436 413 #endif /* !CONFIG_PM_SLEEP */ 437 414 438 415 #ifdef CONFIG_PM ··· 510 533 } 511 534 512 535 static const struct dev_pm_ops ufshcd_pci_pm_ops = { 513 - #ifdef CONFIG_PM_SLEEP 514 - .suspend = ufshcd_pci_suspend, 515 - .resume = ufshcd_pci_resume, 516 - .freeze = ufshcd_pci_suspend, 517 - .thaw = ufshcd_pci_resume, 518 - .poweroff = ufshcd_pci_poweroff, 519 - .restore = ufshcd_pci_resume, 520 - #endif 521 536 SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend, 522 537 ufshcd_pci_runtime_resume, 523 538 ufshcd_pci_runtime_idle) 539 + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend, ufshcd_pci_resume) 540 + #ifdef CONFIG_PM_SLEEP 541 + .prepare = ufshcd_suspend_prepare, 542 + .complete = ufshcd_resume_complete, 543 + #endif 524 544 }; 525 545 526 546 static const struct pci_device_id ufshcd_pci_tbl[] = {
+492 -211
drivers/scsi/ufs/ufshcd.c
··· 16 16 #include <linux/bitfield.h> 17 17 #include <linux/blk-pm.h> 18 18 #include <linux/blkdev.h> 19 + #include <scsi/scsi_driver.h> 19 20 #include "ufshcd.h" 20 21 #include "ufs_quirks.h" 21 22 #include "unipro.h" ··· 77 76 78 77 /* Polling time to wait for fDeviceInit */ 79 78 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */ 79 + 80 + #define wlun_dev_to_hba(dv) shost_priv(to_scsi_device(dv)->host) 80 81 81 82 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ 82 83 ({ \ ··· 1554 1551 if (value == hba->clk_scaling.is_enabled) 1555 1552 goto out; 1556 1553 1557 - pm_runtime_get_sync(hba->dev); 1554 + ufshcd_rpm_get_sync(hba); 1558 1555 ufshcd_hold(hba, false); 1559 1556 1560 1557 hba->clk_scaling.is_enabled = value; ··· 1570 1567 } 1571 1568 1572 1569 ufshcd_release(hba); 1573 - pm_runtime_put_sync(hba->dev); 1570 + ufshcd_rpm_put_sync(hba); 1574 1571 out: 1575 1572 up(&hba->host_sem); 1576 1573 return err ? err : count; ··· 2566 2563 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) 2567 2564 { 2568 2565 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; 2566 + } 2567 + 2568 + static inline bool is_rpmb_wlun(struct scsi_device *sdev) 2569 + { 2570 + return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN); 2571 + } 2572 + 2573 + static inline bool is_device_wlun(struct scsi_device *sdev) 2574 + { 2575 + return sdev->lun == 2576 + ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN); 2569 2577 } 2570 2578 2571 2579 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) ··· 4112 4098 } 4113 4099 spin_unlock_irqrestore(hba->host->host_lock, flags); 4114 4100 4115 - if (update && !pm_runtime_suspended(hba->dev)) { 4116 - pm_runtime_get_sync(hba->dev); 4101 + if (update && 4102 + !pm_runtime_suspended(&hba->sdev_ufs_device->sdev_gendev)) { 4103 + ufshcd_rpm_get_sync(hba); 4117 4104 ufshcd_hold(hba, false); 4118 4105 ufshcd_auto_hibern8_enable(hba); 4119 4106 ufshcd_release(hba); 4120 - pm_runtime_put(hba->dev); 4107 + ufshcd_rpm_put_sync(hba); 4121 4108 } 4122 4109 } 4123 4110 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update); ··· 4816 4801 } 4817 4802 4818 4803 /** 4804 + * ufshcd_setup_links - associate link b/w device wlun and other luns 4805 + * @sdev: pointer to SCSI device 4806 + * @hba: pointer to ufs hba 4807 + */ 4808 + static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev) 4809 + { 4810 + struct device_link *link; 4811 + 4812 + /* 4813 + * Device wlun is the supplier & rest of the luns are consumers. 4814 + * This ensures that device wlun suspends after all other luns. 4815 + */ 4816 + if (hba->sdev_ufs_device) { 4817 + link = device_link_add(&sdev->sdev_gendev, 4818 + &hba->sdev_ufs_device->sdev_gendev, 4819 + DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); 4820 + if (!link) { 4821 + dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n", 4822 + dev_name(&hba->sdev_ufs_device->sdev_gendev)); 4823 + return; 4824 + } 4825 + hba->luns_avail--; 4826 + /* Ignore REPORT_LUN wlun probing */ 4827 + if (hba->luns_avail == 1) { 4828 + ufshcd_rpm_put(hba); 4829 + return; 4830 + } 4831 + } else { 4832 + /* 4833 + * Device wlun is probed. The assumption is that WLUNs are 4834 + * scanned before other LUNs. 4835 + */ 4836 + hba->luns_avail--; 4837 + } 4838 + } 4839 + 4840 + /** 4819 4841 * ufshcd_slave_alloc - handle initial SCSI device configurations 4820 4842 * @sdev: pointer to SCSI device 4821 4843 * ··· 4882 4830 ufshcd_set_queue_depth(sdev); 4883 4831 4884 4832 ufshcd_get_lu_power_on_wp_status(hba, sdev); 4833 + 4834 + ufshcd_setup_links(hba, sdev); 4885 4835 4886 4836 return 0; 4887 4837 } ··· 4916 4862 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); 4917 4863 if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE) 4918 4864 blk_queue_update_dma_alignment(q, PAGE_SIZE - 1); 4919 - 4920 - if (ufshcd_is_rpm_autosuspend_allowed(hba)) 4865 + /* 4866 + * Block runtime-pm until all consumers are added. 4867 + * Refer ufshcd_setup_links(). 4868 + */ 4869 + if (is_device_wlun(sdev)) 4870 + pm_runtime_get_noresume(&sdev->sdev_gendev); 4871 + else if (ufshcd_is_rpm_autosuspend_allowed(hba)) 4921 4872 sdev->rpm_autosuspend = 1; 4922 4873 4923 4874 ufshcd_crypto_setup_rq_keyslot_manager(hba, q); ··· 5038 4979 */ 5039 4980 if (!hba->pm_op_in_progress && 5040 4981 !ufshcd_eh_in_progress(hba) && 5041 - ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) && 5042 - schedule_work(&hba->eeh_work)) { 5043 - /* 5044 - * Prevent suspend once eeh_work is scheduled 5045 - * to avoid deadlock between ufshcd_suspend 5046 - * and exception event handler. 5047 - */ 5048 - pm_runtime_get_noresume(hba->dev); 5049 - } 4982 + ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) 4983 + /* Flushed in suspend */ 4984 + schedule_work(&hba->eeh_work); 5050 4985 break; 5051 4986 case UPIU_TRANSACTION_REJECT_UPIU: 5052 4987 /* TODO: handle Reject UPIU Response */ ··· 5639 5586 * after a certain delay to recheck the threshold by next runtime 5640 5587 * suspend. 5641 5588 */ 5642 - pm_runtime_get_sync(hba->dev); 5643 - pm_runtime_put_sync(hba->dev); 5589 + ufshcd_rpm_get_sync(hba); 5590 + ufshcd_rpm_put_sync(hba); 5644 5591 } 5645 5592 5646 5593 /** ··· 5657 5604 u32 status = 0; 5658 5605 hba = container_of(work, struct ufs_hba, eeh_work); 5659 5606 5660 - pm_runtime_get_sync(hba->dev); 5661 5607 ufshcd_scsi_block_requests(hba); 5662 5608 err = ufshcd_get_ee_status(hba, &status); 5663 5609 if (err) { ··· 5673 5621 ufs_debugfs_exception_event(hba, status); 5674 5622 out: 5675 5623 ufshcd_scsi_unblock_requests(hba); 5676 - /* 5677 - * pm_runtime_get_noresume is called while scheduling 5678 - * eeh_work to avoid suspend racing with exception work. 5679 - * Hence decrement usage counter using pm_runtime_put_noidle 5680 - * to allow suspend on completion of exception event handler. 5681 - */ 5682 - pm_runtime_put_noidle(hba->dev); 5683 - pm_runtime_put(hba->dev); 5684 5624 return; 5685 5625 } 5686 5626 ··· 5797 5753 5798 5754 static void ufshcd_err_handling_prepare(struct ufs_hba *hba) 5799 5755 { 5800 - pm_runtime_get_sync(hba->dev); 5801 - if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) { 5756 + ufshcd_rpm_get_sync(hba); 5757 + if (pm_runtime_status_suspended(&hba->sdev_ufs_device->sdev_gendev) || 5758 + hba->is_sys_suspended) { 5802 5759 enum ufs_pm_op pm_op; 5803 5760 5804 5761 /* 5805 - * Don't assume anything of pm_runtime_get_sync(), if 5762 + * Don't assume anything of resume, if 5806 5763 * resume fails, irq and clocks can be OFF, and powers 5807 5764 * can be OFF or in LPM. 5808 5765 */ ··· 5839 5794 if (ufshcd_is_clkscaling_supported(hba)) 5840 5795 ufshcd_clk_scaling_suspend(hba, false); 5841 5796 ufshcd_clear_ua_wluns(hba); 5842 - pm_runtime_put(hba->dev); 5797 + ufshcd_rpm_put(hba); 5843 5798 } 5844 5799 5845 5800 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) 5846 5801 { 5847 5802 return (!hba->is_powered || hba->shutting_down || 5803 + !hba->sdev_ufs_device || 5848 5804 hba->ufshcd_state == UFSHCD_STATE_ERROR || 5849 5805 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || 5850 5806 ufshcd_is_link_broken(hba)))); ··· 5861 5815 5862 5816 hba->is_sys_suspended = false; 5863 5817 /* 5864 - * Set RPM status of hba device to RPM_ACTIVE, 5818 + * Set RPM status of wlun device to RPM_ACTIVE, 5865 5819 * this also clears its runtime error. 5866 5820 */ 5867 - ret = pm_runtime_set_active(hba->dev); 5821 + ret = pm_runtime_set_active(&hba->sdev_ufs_device->sdev_gendev); 5822 + 5823 + /* hba device might have a runtime error otherwise */ 5824 + if (ret) 5825 + ret = pm_runtime_set_active(hba->dev); 5868 5826 /* 5869 - * If hba device had runtime error, we also need to resume those 5870 - * scsi devices under hba in case any of them has failed to be 5871 - * resumed due to hba runtime resume failure. This is to unblock 5827 + * If wlun device had runtime error, we also need to resume those 5828 + * consumer scsi devices in case any of them has failed to be 5829 + * resumed due to supplier runtime resume failure. This is to unblock 5872 5830 * blk_queue_enter in case there are bios waiting inside it. 5873 5831 */ 5874 5832 if (!ret) { ··· 7296 7246 hba->sdev_ufs_device = NULL; 7297 7247 goto out; 7298 7248 } 7299 - ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device); 7300 7249 scsi_device_put(hba->sdev_ufs_device); 7301 7250 7302 7251 hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, ··· 7458 7409 __func__, err); 7459 7410 goto out; 7460 7411 } 7412 + 7413 + hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] + 7414 + desc_buf[DEVICE_DESC_PARAM_NUM_WLU]; 7461 7415 7462 7416 ufs_fixup_device_setup(hba); 7463 7417 ··· 7939 7887 ufshcd_set_ufs_dev_active(hba); 7940 7888 ufshcd_force_reset_auto_bkops(hba); 7941 7889 hba->wlun_dev_clr_ua = true; 7890 + hba->wlun_rpmb_clr_ua = true; 7942 7891 7943 7892 /* Gear up to HS gear if supported */ 7944 7893 if (hba->max_pwr_info.is_valid) { ··· 8525 8472 * handling context. 8526 8473 */ 8527 8474 hba->host->eh_noresume = 1; 8528 - ufshcd_clear_ua_wluns(hba); 8475 + if (hba->wlun_dev_clr_ua) 8476 + ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN); 8529 8477 8530 8478 cmd[4] = pwr_mode << 4; 8531 8479 ··· 8701 8647 ufshcd_setup_hba_vreg(hba, true); 8702 8648 } 8703 8649 8704 - /** 8705 - * ufshcd_suspend - helper function for suspend operations 8706 - * @hba: per adapter instance 8707 - * @pm_op: desired low power operation type 8708 - * 8709 - * This function will try to put the UFS device and link into low power 8710 - * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl" 8711 - * (System PM level). 8712 - * 8713 - * If this function is called during shutdown, it will make sure that 8714 - * both UFS device and UFS link is powered off. 8715 - * 8716 - * NOTE: UFS device & link must be active before we enter in this function. 8717 - * 8718 - * Returns 0 for success and non-zero for failure 8719 - */ 8720 - static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 8650 + static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 8721 8651 { 8722 8652 int ret = 0; 8723 8653 int check_for_bkops; ··· 8709 8671 enum ufs_dev_pwr_mode req_dev_pwr_mode; 8710 8672 enum uic_link_state req_link_state; 8711 8673 8712 - hba->pm_op_in_progress = 1; 8674 + hba->pm_op_in_progress = true; 8713 8675 if (!ufshcd_is_shutdown_pm(pm_op)) { 8714 8676 pm_lvl = ufshcd_is_runtime_pm(pm_op) ? 8715 8677 hba->rpm_lvl : hba->spm_lvl; ··· 8732 8694 8733 8695 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && 8734 8696 req_link_state == UIC_LINK_ACTIVE_STATE) { 8735 - goto disable_clks; 8697 + goto vops_suspend; 8736 8698 } 8737 8699 8738 8700 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && 8739 8701 (req_link_state == hba->uic_link_state)) 8740 - goto enable_gating; 8702 + goto enable_scaling; 8741 8703 8742 8704 /* UFS device & link must be active before we enter in this function */ 8743 8705 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { 8744 8706 ret = -EINVAL; 8745 - goto enable_gating; 8707 + goto enable_scaling; 8746 8708 } 8747 8709 8748 8710 if (ufshcd_is_runtime_pm(pm_op)) { ··· 8754 8716 */ 8755 8717 ret = ufshcd_urgent_bkops(hba); 8756 8718 if (ret) 8757 - goto enable_gating; 8719 + goto enable_scaling; 8758 8720 } else { 8759 8721 /* make sure that auto bkops is disabled */ 8760 8722 ufshcd_disable_auto_bkops(hba); ··· 8782 8744 if (!hba->dev_info.b_rpm_dev_flush_capable) { 8783 8745 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); 8784 8746 if (ret) 8785 - goto enable_gating; 8747 + goto enable_scaling; 8786 8748 } 8787 8749 } 8788 8750 ··· 8795 8757 if (ret) 8796 8758 goto set_dev_active; 8797 8759 8798 - disable_clks: 8760 + vops_suspend: 8799 8761 /* 8800 8762 * Call vendor specific suspend callback. As these callbacks may access 8801 8763 * vendor specific host controller register space call them before the ··· 8804 8766 ret = ufshcd_vops_suspend(hba, pm_op); 8805 8767 if (ret) 8806 8768 goto set_link_active; 8807 - /* 8808 - * Disable the host irq as host controller as there won't be any 8809 - * host controller transaction expected till resume. 8810 - */ 8811 - ufshcd_disable_irq(hba); 8812 - 8813 - ufshcd_setup_clocks(hba, false); 8814 - 8815 - if (ufshcd_is_clkgating_allowed(hba)) { 8816 - hba->clk_gating.state = CLKS_OFF; 8817 - trace_ufshcd_clk_gating(dev_name(hba->dev), 8818 - hba->clk_gating.state); 8819 - } 8820 - 8821 - ufshcd_vreg_set_lpm(hba); 8822 - 8823 - /* Put the host controller in low power mode if possible */ 8824 - ufshcd_hba_vreg_set_lpm(hba); 8825 8769 goto out; 8826 8770 8827 8771 set_link_active: 8828 - ufshcd_vreg_set_hpm(hba); 8829 8772 /* 8830 8773 * Device hardware reset is required to exit DeepSleep. Also, for 8831 8774 * DeepSleep, the link is off so host reset and restore will be done ··· 8828 8809 } 8829 8810 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) 8830 8811 ufshcd_disable_auto_bkops(hba); 8831 - enable_gating: 8812 + enable_scaling: 8832 8813 if (ufshcd_is_clkscaling_supported(hba)) 8833 8814 ufshcd_clk_scaling_suspend(hba, false); 8834 8815 8835 - hba->clk_gating.is_suspended = false; 8836 8816 hba->dev_info.b_rpm_dev_flush_capable = false; 8837 - ufshcd_clear_ua_wluns(hba); 8838 - ufshcd_release(hba); 8839 8817 out: 8840 8818 if (hba->dev_info.b_rpm_dev_flush_capable) { 8841 8819 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, 8842 8820 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS)); 8843 8821 } 8844 8822 8845 - hba->pm_op_in_progress = 0; 8846 - 8847 - if (ret) 8848 - ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret); 8823 + if (ret) { 8824 + ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret); 8825 + hba->clk_gating.is_suspended = false; 8826 + ufshcd_release(hba); 8827 + } 8828 + hba->pm_op_in_progress = false; 8849 8829 return ret; 8850 8830 } 8851 8831 8852 - /** 8853 - * ufshcd_resume - helper function for resume operations 8854 - * @hba: per adapter instance 8855 - * @pm_op: runtime PM or system PM 8856 - * 8857 - * This function basically brings the UFS device, UniPro link and controller 8858 - * to active state. 8859 - * 8860 - * Returns 0 for success and non-zero for failure 8861 - */ 8862 - static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 8832 + static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) 8863 8833 { 8864 8834 int ret; 8865 - enum uic_link_state old_link_state; 8835 + enum uic_link_state old_link_state = hba->uic_link_state; 8866 8836 8867 - hba->pm_op_in_progress = 1; 8868 - old_link_state = hba->uic_link_state; 8869 - 8870 - ufshcd_hba_vreg_set_hpm(hba); 8871 - ret = ufshcd_vreg_set_hpm(hba); 8872 - if (ret) 8873 - goto out; 8874 - 8875 - /* Make sure clocks are enabled before accessing controller */ 8876 - ret = ufshcd_setup_clocks(hba, true); 8877 - if (ret) 8878 - goto disable_vreg; 8879 - 8880 - /* enable the host irq as host controller would be active soon */ 8881 - ufshcd_enable_irq(hba); 8837 + hba->pm_op_in_progress = true; 8882 8838 8883 8839 /* 8884 8840 * Call vendor specific resume callback. As these callbacks may access ··· 8862 8868 */ 8863 8869 ret = ufshcd_vops_resume(hba, pm_op); 8864 8870 if (ret) 8865 - goto disable_irq_and_vops_clks; 8871 + goto out; 8866 8872 8867 8873 /* For DeepSleep, the only supported option is to have the link off */ 8868 8874 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba)); ··· 8910 8916 if (hba->ee_usr_mask) 8911 8917 ufshcd_write_ee_control(hba); 8912 8918 8913 - hba->clk_gating.is_suspended = false; 8914 - 8915 8919 if (ufshcd_is_clkscaling_supported(hba)) 8916 8920 ufshcd_clk_scaling_suspend(hba, false); 8917 - 8918 - /* Enable Auto-Hibernate if configured */ 8919 - ufshcd_auto_hibern8_enable(hba); 8920 8921 8921 8922 if (hba->dev_info.b_rpm_dev_flush_capable) { 8922 8923 hba->dev_info.b_rpm_dev_flush_capable = false; 8923 8924 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); 8924 8925 } 8925 8926 8926 - ufshcd_clear_ua_wluns(hba); 8927 - 8928 - /* Schedule clock gating in case of no access to UFS device yet */ 8929 - ufshcd_release(hba); 8930 - 8927 + /* Enable Auto-Hibernate if configured */ 8928 + ufshcd_auto_hibern8_enable(hba); 8931 8929 goto out; 8932 8930 8933 8931 set_old_link_state: 8934 8932 ufshcd_link_state_transition(hba, old_link_state, 0); 8935 8933 vendor_suspend: 8936 8934 ufshcd_vops_suspend(hba, pm_op); 8937 - disable_irq_and_vops_clks: 8935 + out: 8936 + if (ret) 8937 + ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret); 8938 + hba->clk_gating.is_suspended = false; 8939 + ufshcd_release(hba); 8940 + hba->pm_op_in_progress = false; 8941 + return ret; 8942 + } 8943 + 8944 + static int ufshcd_wl_runtime_suspend(struct device *dev) 8945 + { 8946 + struct scsi_device *sdev = to_scsi_device(dev); 8947 + struct ufs_hba *hba; 8948 + int ret; 8949 + ktime_t start = ktime_get(); 8950 + 8951 + hba = shost_priv(sdev->host); 8952 + 8953 + ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM); 8954 + if (ret) 8955 + dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 8956 + 8957 + trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret, 8958 + ktime_to_us(ktime_sub(ktime_get(), start)), 8959 + hba->curr_dev_pwr_mode, hba->uic_link_state); 8960 + 8961 + return ret; 8962 + } 8963 + 8964 + static int ufshcd_wl_runtime_resume(struct device *dev) 8965 + { 8966 + struct scsi_device *sdev = to_scsi_device(dev); 8967 + struct ufs_hba *hba; 8968 + int ret = 0; 8969 + ktime_t start = ktime_get(); 8970 + 8971 + hba = shost_priv(sdev->host); 8972 + 8973 + ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM); 8974 + if (ret) 8975 + dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 8976 + 8977 + trace_ufshcd_wl_runtime_resume(dev_name(dev), ret, 8978 + ktime_to_us(ktime_sub(ktime_get(), start)), 8979 + hba->curr_dev_pwr_mode, hba->uic_link_state); 8980 + 8981 + return ret; 8982 + } 8983 + 8984 + #ifdef CONFIG_PM_SLEEP 8985 + static int ufshcd_wl_suspend(struct device *dev) 8986 + { 8987 + struct scsi_device *sdev = to_scsi_device(dev); 8988 + struct ufs_hba *hba; 8989 + int ret = 0; 8990 + ktime_t start = ktime_get(); 8991 + 8992 + hba = shost_priv(sdev->host); 8993 + down(&hba->host_sem); 8994 + 8995 + if (pm_runtime_suspended(dev)) 8996 + goto out; 8997 + 8998 + ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM); 8999 + if (ret) { 9000 + dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9001 + up(&hba->host_sem); 9002 + } 9003 + 9004 + out: 9005 + if (!ret) 9006 + hba->is_sys_suspended = true; 9007 + trace_ufshcd_wl_suspend(dev_name(dev), ret, 9008 + ktime_to_us(ktime_sub(ktime_get(), start)), 9009 + hba->curr_dev_pwr_mode, hba->uic_link_state); 9010 + 9011 + return ret; 9012 + } 9013 + 9014 + static int ufshcd_wl_resume(struct device *dev) 9015 + { 9016 + struct scsi_device *sdev = to_scsi_device(dev); 9017 + struct ufs_hba *hba; 9018 + int ret = 0; 9019 + ktime_t start = ktime_get(); 9020 + 9021 + hba = shost_priv(sdev->host); 9022 + 9023 + if (pm_runtime_suspended(dev)) 9024 + goto out; 9025 + 9026 + ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM); 9027 + if (ret) 9028 + dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); 9029 + out: 9030 + trace_ufshcd_wl_resume(dev_name(dev), ret, 9031 + ktime_to_us(ktime_sub(ktime_get(), start)), 9032 + hba->curr_dev_pwr_mode, hba->uic_link_state); 9033 + if (!ret) 9034 + hba->is_sys_suspended = false; 9035 + up(&hba->host_sem); 9036 + return ret; 9037 + } 9038 + #endif 9039 + 9040 + static void ufshcd_wl_shutdown(struct device *dev) 9041 + { 9042 + struct scsi_device *sdev = to_scsi_device(dev); 9043 + struct ufs_hba *hba; 9044 + 9045 + hba = shost_priv(sdev->host); 9046 + 9047 + down(&hba->host_sem); 9048 + hba->shutting_down = true; 9049 + up(&hba->host_sem); 9050 + 9051 + /* Turn on everything while shutting down */ 9052 + ufshcd_rpm_get_sync(hba); 9053 + scsi_device_quiesce(sdev); 9054 + shost_for_each_device(sdev, hba->host) { 9055 + if (sdev == hba->sdev_ufs_device) 9056 + continue; 9057 + scsi_device_quiesce(sdev); 9058 + } 9059 + __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); 9060 + } 9061 + 9062 + /** 9063 + * ufshcd_suspend - helper function for suspend operations 9064 + * @hba: per adapter instance 9065 + * 9066 + * This function will put disable irqs, turn off clocks 9067 + * and set vreg and hba-vreg in lpm mode. 9068 + * Also check the description of __ufshcd_wl_suspend(). 9069 + */ 9070 + static int ufshcd_suspend(struct ufs_hba *hba) 9071 + { 9072 + int ret; 9073 + 9074 + if (!hba->is_powered) 9075 + return 0; 9076 + /* 9077 + * Disable the host irq as host controller as there won't be any 9078 + * host controller transaction expected till resume. 9079 + */ 8938 9080 ufshcd_disable_irq(hba); 8939 - ufshcd_setup_clocks(hba, false); 9081 + ret = ufshcd_setup_clocks(hba, false); 9082 + if (ret) { 9083 + ufshcd_enable_irq(hba); 9084 + return ret; 9085 + } 8940 9086 if (ufshcd_is_clkgating_allowed(hba)) { 8941 9087 hba->clk_gating.state = CLKS_OFF; 8942 9088 trace_ufshcd_clk_gating(dev_name(hba->dev), 8943 9089 hba->clk_gating.state); 8944 9090 } 9091 + 9092 + ufshcd_vreg_set_lpm(hba); 9093 + /* Put the host controller in low power mode if possible */ 9094 + ufshcd_hba_vreg_set_lpm(hba); 9095 + return ret; 9096 + } 9097 + 9098 + /** 9099 + * ufshcd_resume - helper function for resume operations 9100 + * @hba: per adapter instance 9101 + * 9102 + * This function basically turns on the regulators, clocks and 9103 + * irqs of the hba. 9104 + * Also check the description of __ufshcd_wl_resume(). 9105 + * 9106 + * Returns 0 for success and non-zero for failure 9107 + */ 9108 + static int ufshcd_resume(struct ufs_hba *hba) 9109 + { 9110 + int ret; 9111 + 9112 + if (!hba->is_powered) 9113 + return 0; 9114 + 9115 + ufshcd_hba_vreg_set_hpm(hba); 9116 + ret = ufshcd_vreg_set_hpm(hba); 9117 + if (ret) 9118 + goto out; 9119 + 9120 + /* Make sure clocks are enabled before accessing controller */ 9121 + ret = ufshcd_setup_clocks(hba, true); 9122 + if (ret) 9123 + goto disable_vreg; 9124 + 9125 + /* enable the host irq as host controller would be active soon */ 9126 + ufshcd_enable_irq(hba); 9127 + goto out; 9128 + 8945 9129 disable_vreg: 8946 9130 ufshcd_vreg_set_lpm(hba); 8947 9131 out: 8948 - hba->pm_op_in_progress = 0; 8949 9132 if (ret) 8950 9133 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret); 8951 9134 return ret; ··· 9133 8962 * @hba: per adapter instance 9134 8963 * 9135 8964 * Check the description of ufshcd_suspend() function for more details. 8965 + * Also check the description of __ufshcd_wl_suspend(). 9136 8966 * 9137 8967 * Returns 0 for success and non-zero for failure 9138 8968 */ ··· 9142 8970 int ret = 0; 9143 8971 ktime_t start = ktime_get(); 9144 8972 9145 - down(&hba->host_sem); 9146 - 9147 - if (!hba->is_powered) 9148 - return 0; 9149 - 9150 - cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work); 9151 - 9152 - if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == 9153 - hba->curr_dev_pwr_mode) && 9154 - (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) == 9155 - hba->uic_link_state) && 9156 - pm_runtime_suspended(hba->dev) && 9157 - !hba->dev_info.b_rpm_dev_flush_capable) 8973 + if (pm_runtime_suspended(hba->dev)) 9158 8974 goto out; 9159 8975 9160 - if (pm_runtime_suspended(hba->dev)) { 9161 - /* 9162 - * UFS device and/or UFS link low power states during runtime 9163 - * suspend seems to be different than what is expected during 9164 - * system suspend. Hence runtime resume the devic & link and 9165 - * let the system suspend low power states to take effect. 9166 - * TODO: If resume takes longer time, we might have optimize 9167 - * it in future by not resuming everything if possible. 9168 - */ 9169 - ret = ufshcd_runtime_resume(hba); 9170 - if (ret) 9171 - goto out; 9172 - } 9173 - 9174 - ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); 8976 + ret = ufshcd_suspend(hba); 9175 8977 out: 9176 8978 trace_ufshcd_system_suspend(dev_name(hba->dev), ret, 9177 8979 ktime_to_us(ktime_sub(ktime_get(), start)), 9178 8980 hba->curr_dev_pwr_mode, hba->uic_link_state); 9179 - if (!ret) 9180 - hba->is_sys_suspended = true; 9181 - else 9182 - up(&hba->host_sem); 9183 8981 return ret; 9184 8982 } 9185 8983 EXPORT_SYMBOL(ufshcd_system_suspend); ··· 9166 9024 int ret = 0; 9167 9025 ktime_t start = ktime_get(); 9168 9026 9169 - if (!hba->is_powered || pm_runtime_suspended(hba->dev)) 9170 - /* 9171 - * Let the runtime resume take care of resuming 9172 - * if runtime suspended. 9173 - */ 9027 + if (pm_runtime_suspended(hba->dev)) 9174 9028 goto out; 9175 - else 9176 - ret = ufshcd_resume(hba, UFS_SYSTEM_PM); 9029 + 9030 + ret = ufshcd_resume(hba); 9031 + 9177 9032 out: 9178 9033 trace_ufshcd_system_resume(dev_name(hba->dev), ret, 9179 9034 ktime_to_us(ktime_sub(ktime_get(), start)), 9180 9035 hba->curr_dev_pwr_mode, hba->uic_link_state); 9181 - if (!ret) 9182 - hba->is_sys_suspended = false; 9183 - up(&hba->host_sem); 9036 + 9184 9037 return ret; 9185 9038 } 9186 9039 EXPORT_SYMBOL(ufshcd_system_resume); ··· 9185 9048 * @hba: per adapter instance 9186 9049 * 9187 9050 * Check the description of ufshcd_suspend() function for more details. 9051 + * Also check the description of __ufshcd_wl_suspend(). 9188 9052 * 9189 9053 * Returns 0 for success and non-zero for failure 9190 9054 */ 9191 9055 int ufshcd_runtime_suspend(struct ufs_hba *hba) 9192 9056 { 9193 - int ret = 0; 9057 + int ret; 9194 9058 ktime_t start = ktime_get(); 9195 9059 9196 - if (!hba->is_powered) 9197 - goto out; 9198 - else 9199 - ret = ufshcd_suspend(hba, UFS_RUNTIME_PM); 9200 - out: 9060 + ret = ufshcd_suspend(hba); 9061 + 9201 9062 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, 9202 9063 ktime_to_us(ktime_sub(ktime_get(), start)), 9203 9064 hba->curr_dev_pwr_mode, hba->uic_link_state); ··· 9207 9072 * ufshcd_runtime_resume - runtime resume routine 9208 9073 * @hba: per adapter instance 9209 9074 * 9210 - * This function basically brings the UFS device, UniPro link and controller 9075 + * This function basically brings controller 9211 9076 * to active state. Following operations are done in this function: 9212 9077 * 9213 9078 * 1. Turn on all the controller related clocks 9214 - * 2. Bring the UniPro link out of Hibernate state 9215 - * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device 9216 - * to active state. 9217 - * 4. If auto-bkops is enabled on the device, disable it. 9218 - * 9219 - * So following would be the possible power state after this function return 9220 - * successfully: 9221 - * S1: UFS device in Active state with VCC rail ON 9222 - * UniPro link in Active state 9223 - * All the UFS/UniPro controller clocks are ON 9224 - * 9225 - * Returns 0 for success and non-zero for failure 9079 + * 2. Turn ON VCC rail 9226 9080 */ 9227 9081 int ufshcd_runtime_resume(struct ufs_hba *hba) 9228 9082 { 9229 - int ret = 0; 9083 + int ret; 9230 9084 ktime_t start = ktime_get(); 9231 9085 9232 - if (!hba->is_powered) 9233 - goto out; 9234 - else 9235 - ret = ufshcd_resume(hba, UFS_RUNTIME_PM); 9236 - out: 9086 + ret = ufshcd_resume(hba); 9087 + 9237 9088 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, 9238 9089 ktime_to_us(ktime_sub(ktime_get(), start)), 9239 9090 hba->curr_dev_pwr_mode, hba->uic_link_state); ··· 9237 9116 * ufshcd_shutdown - shutdown routine 9238 9117 * @hba: per adapter instance 9239 9118 * 9240 - * This function would power off both UFS device and UFS link. 9119 + * This function would turn off both UFS device and UFS hba 9120 + * regulators. It would also disable clocks. 9241 9121 * 9242 9122 * Returns 0 always to allow force shutdown even in case of errors. 9243 9123 */ 9244 9124 int ufshcd_shutdown(struct ufs_hba *hba) 9245 9125 { 9246 - int ret = 0; 9247 - 9248 - down(&hba->host_sem); 9249 - hba->shutting_down = true; 9250 - up(&hba->host_sem); 9251 - 9252 - if (!hba->is_powered) 9253 - goto out; 9254 - 9255 9126 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) 9256 9127 goto out; 9257 9128 9258 9129 pm_runtime_get_sync(hba->dev); 9259 9130 9260 - ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); 9131 + ufshcd_suspend(hba); 9261 9132 out: 9262 - if (ret) 9263 - dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); 9264 9133 hba->is_powered = false; 9265 9134 /* allow force shutdown even in case of errors */ 9266 9135 return 0; ··· 9264 9153 */ 9265 9154 void ufshcd_remove(struct ufs_hba *hba) 9266 9155 { 9156 + if (hba->sdev_ufs_device) 9157 + ufshcd_rpm_get_sync(hba); 9267 9158 ufs_bsg_remove(hba); 9268 9159 ufs_sysfs_remove_nodes(hba->dev); 9269 9160 blk_cleanup_queue(hba->tmf_queue); ··· 9569 9456 } 9570 9457 EXPORT_SYMBOL_GPL(ufshcd_init); 9571 9458 9459 + void ufshcd_resume_complete(struct device *dev) 9460 + { 9461 + struct ufs_hba *hba = dev_get_drvdata(dev); 9462 + 9463 + if (hba->complete_put) { 9464 + ufshcd_rpm_put(hba); 9465 + hba->complete_put = false; 9466 + } 9467 + if (hba->rpmb_complete_put) { 9468 + ufshcd_rpmb_rpm_put(hba); 9469 + hba->rpmb_complete_put = false; 9470 + } 9471 + } 9472 + EXPORT_SYMBOL_GPL(ufshcd_resume_complete); 9473 + 9474 + int ufshcd_suspend_prepare(struct device *dev) 9475 + { 9476 + struct ufs_hba *hba = dev_get_drvdata(dev); 9477 + int ret; 9478 + 9479 + /* 9480 + * SCSI assumes that runtime-pm and system-pm for scsi drivers 9481 + * are same. And it doesn't wake up the device for system-suspend 9482 + * if it's runtime suspended. But ufs doesn't follow that. 9483 + * Refer ufshcd_resume_complete() 9484 + */ 9485 + if (hba->sdev_ufs_device) { 9486 + ret = ufshcd_rpm_get_sync(hba); 9487 + if (ret < 0 && ret != -EACCES) { 9488 + ufshcd_rpm_put(hba); 9489 + return ret; 9490 + } 9491 + hba->complete_put = true; 9492 + } 9493 + if (hba->sdev_rpmb) { 9494 + ufshcd_rpmb_rpm_get_sync(hba); 9495 + hba->rpmb_complete_put = true; 9496 + } 9497 + return 0; 9498 + } 9499 + EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare); 9500 + 9501 + #ifdef CONFIG_PM_SLEEP 9502 + static int ufshcd_wl_poweroff(struct device *dev) 9503 + { 9504 + struct scsi_device *sdev = to_scsi_device(dev); 9505 + struct ufs_hba *hba = shost_priv(sdev->host); 9506 + 9507 + __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); 9508 + return 0; 9509 + } 9510 + #endif 9511 + 9512 + static int ufshcd_wl_probe(struct device *dev) 9513 + { 9514 + struct scsi_device *sdev = to_scsi_device(dev); 9515 + 9516 + if (!is_device_wlun(sdev)) 9517 + return -ENODEV; 9518 + 9519 + blk_pm_runtime_init(sdev->request_queue, dev); 9520 + pm_runtime_set_autosuspend_delay(dev, 0); 9521 + pm_runtime_allow(dev); 9522 + 9523 + return 0; 9524 + } 9525 + 9526 + static int ufshcd_wl_remove(struct device *dev) 9527 + { 9528 + pm_runtime_forbid(dev); 9529 + return 0; 9530 + } 9531 + 9532 + static const struct dev_pm_ops ufshcd_wl_pm_ops = { 9533 + #ifdef CONFIG_PM_SLEEP 9534 + .suspend = ufshcd_wl_suspend, 9535 + .resume = ufshcd_wl_resume, 9536 + .freeze = ufshcd_wl_suspend, 9537 + .thaw = ufshcd_wl_resume, 9538 + .poweroff = ufshcd_wl_poweroff, 9539 + .restore = ufshcd_wl_resume, 9540 + #endif 9541 + SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL) 9542 + }; 9543 + 9544 + /* 9545 + * ufs_dev_wlun_template - describes ufs device wlun 9546 + * ufs-device wlun - used to send pm commands 9547 + * All luns are consumers of ufs-device wlun. 9548 + * 9549 + * Currently, no sd driver is present for wluns. 9550 + * Hence the no specific pm operations are performed. 9551 + * With ufs design, SSU should be sent to ufs-device wlun. 9552 + * Hence register a scsi driver for ufs wluns only. 9553 + */ 9554 + static struct scsi_driver ufs_dev_wlun_template = { 9555 + .gendrv = { 9556 + .name = "ufs_device_wlun", 9557 + .owner = THIS_MODULE, 9558 + .probe = ufshcd_wl_probe, 9559 + .remove = ufshcd_wl_remove, 9560 + .pm = &ufshcd_wl_pm_ops, 9561 + .shutdown = ufshcd_wl_shutdown, 9562 + }, 9563 + }; 9564 + 9565 + static int ufshcd_rpmb_probe(struct device *dev) 9566 + { 9567 + return is_rpmb_wlun(to_scsi_device(dev)) ? 0 : -ENODEV; 9568 + } 9569 + 9570 + static inline int ufshcd_clear_rpmb_uac(struct ufs_hba *hba) 9571 + { 9572 + int ret = 0; 9573 + 9574 + if (!hba->wlun_rpmb_clr_ua) 9575 + return 0; 9576 + ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN); 9577 + if (!ret) 9578 + hba->wlun_rpmb_clr_ua = 0; 9579 + return ret; 9580 + } 9581 + 9582 + static int ufshcd_rpmb_resume(struct device *dev) 9583 + { 9584 + struct ufs_hba *hba = wlun_dev_to_hba(dev); 9585 + 9586 + if (hba->sdev_rpmb) 9587 + ufshcd_clear_rpmb_uac(hba); 9588 + return 0; 9589 + } 9590 + 9591 + static const struct dev_pm_ops ufs_rpmb_pm_ops = { 9592 + SET_RUNTIME_PM_OPS(NULL, ufshcd_rpmb_resume, NULL) 9593 + SET_SYSTEM_SLEEP_PM_OPS(NULL, ufshcd_rpmb_resume) 9594 + }; 9595 + 9596 + /** 9597 + * Describes the ufs rpmb wlun. 9598 + * Used only to send uac. 9599 + */ 9600 + static struct scsi_driver ufs_rpmb_wlun_template = { 9601 + .gendrv = { 9602 + .name = "ufs_rpmb_wlun", 9603 + .owner = THIS_MODULE, 9604 + .probe = ufshcd_rpmb_probe, 9605 + .pm = &ufs_rpmb_pm_ops, 9606 + }, 9607 + }; 9608 + 9572 9609 static int __init ufshcd_core_init(void) 9573 9610 { 9611 + int ret; 9612 + 9574 9613 ufs_debugfs_init(); 9575 - return 0; 9614 + 9615 + ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv); 9616 + if (ret) 9617 + goto debugfs_exit; 9618 + 9619 + ret = scsi_register_driver(&ufs_rpmb_wlun_template.gendrv); 9620 + if (ret) 9621 + goto unregister; 9622 + 9623 + return ret; 9624 + unregister: 9625 + scsi_unregister_driver(&ufs_dev_wlun_template.gendrv); 9626 + debugfs_exit: 9627 + ufs_debugfs_exit(); 9628 + return ret; 9576 9629 } 9577 9630 9578 9631 static void __exit ufshcd_core_exit(void) 9579 9632 { 9580 9633 ufs_debugfs_exit(); 9634 + scsi_unregister_driver(&ufs_rpmb_wlun_template.gendrv); 9635 + scsi_unregister_driver(&ufs_dev_wlun_template.gendrv); 9581 9636 } 9582 9637 9583 9638 module_init(ufshcd_core_init);
+33
drivers/scsi/ufs/ufshcd.h
··· 72 72 UFS_EVT_LINK_STARTUP_FAIL, 73 73 UFS_EVT_RESUME_ERR, 74 74 UFS_EVT_SUSPEND_ERR, 75 + UFS_EVT_WL_SUSP_ERR, 76 + UFS_EVT_WL_RES_ERR, 75 77 76 78 /* abnormal events */ 77 79 UFS_EVT_DEV_RESET, ··· 809 807 struct list_head clk_list_head; 810 808 811 809 bool wlun_dev_clr_ua; 810 + bool wlun_rpmb_clr_ua; 812 811 813 812 /* Number of requests aborts */ 814 813 int req_abort_count; ··· 849 846 struct delayed_work debugfs_ee_work; 850 847 u32 debugfs_ee_rate_limit_ms; 851 848 #endif 849 + u32 luns_avail; 850 + bool complete_put; 851 + bool rpmb_complete_put; 852 852 }; 853 853 854 854 /* Returns true if clocks can be gated. Otherwise false */ ··· 1111 1105 enum query_opcode desc_op); 1112 1106 1113 1107 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable); 1108 + int ufshcd_suspend_prepare(struct device *dev); 1109 + void ufshcd_resume_complete(struct device *dev); 1114 1110 1115 1111 /* Wrapper functions for safely calling variant operations */ 1116 1112 static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) ··· 1315 1307 { 1316 1308 return ufshcd_update_ee_control(hba, &hba->ee_usr_mask, 1317 1309 &hba->ee_drv_mask, set, clr); 1310 + } 1311 + 1312 + static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba) 1313 + { 1314 + return pm_runtime_get_sync(&hba->sdev_ufs_device->sdev_gendev); 1315 + } 1316 + 1317 + static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba) 1318 + { 1319 + return pm_runtime_put_sync(&hba->sdev_ufs_device->sdev_gendev); 1320 + } 1321 + 1322 + static inline int ufshcd_rpm_put(struct ufs_hba *hba) 1323 + { 1324 + return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev); 1325 + } 1326 + 1327 + static inline int ufshcd_rpmb_rpm_get_sync(struct ufs_hba *hba) 1328 + { 1329 + return pm_runtime_get_sync(&hba->sdev_rpmb->sdev_gendev); 1330 + } 1331 + 1332 + static inline int ufshcd_rpmb_rpm_put(struct ufs_hba *hba) 1333 + { 1334 + return pm_runtime_put(&hba->sdev_rpmb->sdev_gendev); 1318 1335 } 1319 1336 1320 1337 #endif /* End of Header */
+20
include/trace/events/ufs.h
··· 246 246 int dev_state, int link_state), 247 247 TP_ARGS(dev_name, err, usecs, dev_state, link_state)); 248 248 249 + DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend, 250 + TP_PROTO(const char *dev_name, int err, s64 usecs, 251 + int dev_state, int link_state), 252 + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); 253 + 254 + DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume, 255 + TP_PROTO(const char *dev_name, int err, s64 usecs, 256 + int dev_state, int link_state), 257 + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); 258 + 259 + DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend, 260 + TP_PROTO(const char *dev_name, int err, s64 usecs, 261 + int dev_state, int link_state), 262 + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); 263 + 264 + DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume, 265 + TP_PROTO(const char *dev_name, int err, s64 usecs, 266 + int dev_state, int link_state), 267 + TP_ARGS(dev_name, err, usecs, dev_state, link_state)); 268 + 249 269 TRACE_EVENT(ufshcd_command, 250 270 TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, 251 271 unsigned int tag, u32 doorbell, int transfer_len, u32 intr,