Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
"This is a load of driver fixes (12 ufs, 1 mpt3sas, 1 cxgbi).

The big core two fixes are for power management ("block: Do not accept
any requests while suspended" and "block: Fix a race in the runtime
power management code") which finally sorts out the resume problems
we've occasionally been having.

To make the resume fix, there are seven necessary precursors which
effectively renames REQ_PREEMPT to REQ_PM, so every "special" request
in block is automatically a power management exempt one.

All of the non-PM preempt cases are removed except for the one in the
SCSI Parallel Interface (spi) domain validation which is a genuine
case where we have to run requests at high priority to validate the
bus so this becomes an autopm get/put protected request"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (22 commits)
scsi: cxgb4i: Fix TLS dependency
scsi: ufs: Un-inline ufshcd_vops_device_reset function
scsi: ufs: Re-enable WriteBooster after device reset
scsi: ufs-mediatek: Use correct path to fix compile error
scsi: mpt3sas: Signedness bug in _base_get_diag_triggers()
scsi: block: Do not accept any requests while suspended
scsi: block: Remove RQF_PREEMPT and BLK_MQ_REQ_PREEMPT
scsi: core: Only process PM requests if rpm_status != RPM_ACTIVE
scsi: scsi_transport_spi: Set RQF_PM for domain validation commands
scsi: ide: Mark power management requests with RQF_PM instead of RQF_PREEMPT
scsi: ide: Do not set the RQF_PREEMPT flag for sense requests
scsi: block: Introduce BLK_MQ_REQ_PM
scsi: block: Fix a race in the runtime power management code
scsi: ufs-pci: Enable UFSHCD_CAP_RPM_AUTOSUSPEND for Intel controllers
scsi: ufs-pci: Fix recovery from hibernate exit errors for Intel controllers
scsi: ufs-pci: Ensure UFS device is in PowerDown mode for suspend-to-disk ->poweroff()
scsi: ufs-pci: Fix restore from S4 for Intel controllers
scsi: ufs-mediatek: Keep VCC always-on for specific devices
scsi: ufs: Allow regulators being always-on
scsi: ufs: Clear UAC for RPMB after ufshcd resets
...

+7 -6
block/blk-core.c
··· 18 18 #include <linux/bio.h> 19 19 #include <linux/blkdev.h> 20 20 #include <linux/blk-mq.h> 21 + #include <linux/blk-pm.h> 21 22 #include <linux/highmem.h> 22 23 #include <linux/mm.h> 23 24 #include <linux/pagemap.h> ··· 425 424 /** 426 425 * blk_queue_enter() - try to increase q->q_usage_counter 427 426 * @q: request queue pointer 428 - * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT 427 + * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM 429 428 */ 430 429 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) 431 430 { 432 - const bool pm = flags & BLK_MQ_REQ_PREEMPT; 431 + const bool pm = flags & BLK_MQ_REQ_PM; 433 432 434 433 while (true) { 435 434 bool success = false; ··· 441 440 * responsible for ensuring that that counter is 442 441 * globally visible before the queue is unfrozen. 443 442 */ 444 - if (pm || !blk_queue_pm_only(q)) { 443 + if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) || 444 + !blk_queue_pm_only(q)) { 445 445 success = true; 446 446 } else { 447 447 percpu_ref_put(&q->q_usage_counter); ··· 467 465 468 466 wait_event(q->mq_freeze_wq, 469 467 (!q->mq_freeze_depth && 470 - (pm || (blk_pm_request_resume(q), 471 - !blk_queue_pm_only(q)))) || 468 + blk_pm_resume_queue(pm, q)) || 472 469 blk_queue_dying(q)); 473 470 if (blk_queue_dying(q)) 474 471 return -ENODEV; ··· 631 630 struct request *req; 632 631 633 632 WARN_ON_ONCE(op & REQ_NOWAIT); 634 - WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT)); 633 + WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM)); 635 634 636 635 req = blk_mq_alloc_request(q, op, flags); 637 636 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
-1
block/blk-mq-debugfs.c
··· 298 298 RQF_NAME(MIXED_MERGE), 299 299 RQF_NAME(MQ_INFLIGHT), 300 300 RQF_NAME(DONTPREP), 301 - RQF_NAME(PREEMPT), 302 301 RQF_NAME(FAILED), 303 302 RQF_NAME(QUIET), 304 303 RQF_NAME(ELVPRIV),
+2 -2
block/blk-mq.c
··· 294 294 rq->mq_hctx = data->hctx; 295 295 rq->rq_flags = 0; 296 296 rq->cmd_flags = data->cmd_flags; 297 - if (data->flags & BLK_MQ_REQ_PREEMPT) 298 - rq->rq_flags |= RQF_PREEMPT; 297 + if (data->flags & BLK_MQ_REQ_PM) 298 + rq->rq_flags |= RQF_PM; 299 299 if (blk_queue_io_stat(data->q)) 300 300 rq->rq_flags |= RQF_IO_STAT; 301 301 INIT_LIST_HEAD(&rq->queuelist);
+9 -6
block/blk-pm.c
··· 67 67 68 68 WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); 69 69 70 + spin_lock_irq(&q->queue_lock); 71 + q->rpm_status = RPM_SUSPENDING; 72 + spin_unlock_irq(&q->queue_lock); 73 + 70 74 /* 71 75 * Increase the pm_only counter before checking whether any 72 76 * non-PM blk_queue_enter() calls are in progress to avoid that any ··· 93 89 /* Switch q_usage_counter back to per-cpu mode. */ 94 90 blk_mq_unfreeze_queue(q); 95 91 96 - spin_lock_irq(&q->queue_lock); 97 - if (ret < 0) 92 + if (ret < 0) { 93 + spin_lock_irq(&q->queue_lock); 94 + q->rpm_status = RPM_ACTIVE; 98 95 pm_runtime_mark_last_busy(q->dev); 99 - else 100 - q->rpm_status = RPM_SUSPENDING; 101 - spin_unlock_irq(&q->queue_lock); 96 + spin_unlock_irq(&q->queue_lock); 102 97 103 - if (ret) 104 98 blk_clear_pm_only(q); 99 + } 105 100 106 101 return ret; 107 102 }
+9 -5
block/blk-pm.h
··· 6 6 #include <linux/pm_runtime.h> 7 7 8 8 #ifdef CONFIG_PM 9 - static inline void blk_pm_request_resume(struct request_queue *q) 9 + static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q) 10 10 { 11 - if (q->dev && (q->rpm_status == RPM_SUSPENDED || 12 - q->rpm_status == RPM_SUSPENDING)) 13 - pm_request_resume(q->dev); 11 + if (!q->dev || !blk_queue_pm_only(q)) 12 + return 1; /* Nothing to do */ 13 + if (pm && q->rpm_status != RPM_SUSPENDED) 14 + return 1; /* Request allowed */ 15 + pm_request_resume(q->dev); 16 + return 0; 14 17 } 15 18 16 19 static inline void blk_pm_mark_last_busy(struct request *rq) ··· 47 44 --rq->q->nr_pending; 48 45 } 49 46 #else 50 - static inline void blk_pm_request_resume(struct request_queue *q) 47 + static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q) 51 48 { 49 + return 1; 52 50 } 53 51 54 52 static inline void blk_pm_mark_last_busy(struct request *rq)
-1
drivers/ide/ide-atapi.c
··· 223 223 sense_rq->rq_disk = rq->rq_disk; 224 224 sense_rq->cmd_flags = REQ_OP_DRV_IN; 225 225 ide_req(sense_rq)->type = ATA_PRIV_SENSE; 226 - sense_rq->rq_flags |= RQF_PREEMPT; 227 226 228 227 req->cmd[0] = GPCMD_REQUEST_SENSE; 229 228 req->cmd[4] = cmd_len;
+1 -6
drivers/ide/ide-io.c
··· 515 515 * above to return us whatever is in the queue. Since we call 516 516 * ide_do_request() ourselves, we end up taking requests while 517 517 * the queue is blocked... 518 - * 519 - * We let requests forced at head of queue with ide-preempt 520 - * though. I hope that doesn't happen too much, hopefully not 521 - * unless the subdriver triggers such a thing in its own PM 522 - * state machine. 523 518 */ 524 519 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && 525 520 ata_pm_request(rq) == 0 && 526 - (rq->rq_flags & RQF_PREEMPT) == 0) { 521 + (rq->rq_flags & RQF_PM) == 0) { 527 522 /* there should be no pending command at this point */ 528 523 ide_unlock_port(hwif); 529 524 goto plug_device;
+1 -1
drivers/ide/ide-pm.c
··· 77 77 } 78 78 79 79 memset(&rqpm, 0, sizeof(rqpm)); 80 - rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT); 80 + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM); 81 81 ide_req(rq)->type = ATA_PRIV_PM_RESUME; 82 82 ide_req(rq)->special = &rqpm; 83 83 rqpm.pm_step = IDE_PM_START_RESUME;
+1
drivers/scsi/cxgbi/cxgb4i/Kconfig
··· 4 4 depends on PCI && INET && (IPV6 || IPV6=n) 5 5 depends on THERMAL || !THERMAL 6 6 depends on ETHERNET 7 + depends on TLS || TLS=n 7 8 select NET_VENDOR_CHELSIO 8 9 select CHELSIO_T4 9 10 select CHELSIO_LIB
+1 -1
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 5034 5034 static void 5035 5035 _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc) 5036 5036 { 5037 - u16 trigger_flags; 5037 + int trigger_flags; 5038 5038 5039 5039 /* 5040 5040 * Default setting of master trigger.
+14 -13
drivers/scsi/scsi_lib.c
··· 249 249 250 250 req = blk_get_request(sdev->request_queue, 251 251 data_direction == DMA_TO_DEVICE ? 252 - REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT); 252 + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 253 + rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0); 253 254 if (IS_ERR(req)) 254 255 return ret; 255 256 rq = scsi_req(req); ··· 1207 1206 scsi_device_state_check(struct scsi_device *sdev, struct request *req) 1208 1207 { 1209 1208 switch (sdev->sdev_state) { 1209 + case SDEV_CREATED: 1210 + return BLK_STS_OK; 1210 1211 case SDEV_OFFLINE: 1211 1212 case SDEV_TRANSPORT_OFFLINE: 1212 1213 /* ··· 1235 1232 return BLK_STS_RESOURCE; 1236 1233 case SDEV_QUIESCE: 1237 1234 /* 1238 - * If the devices is blocked we defer normal commands. 1235 + * If the device is blocked we only accept power management 1236 + * commands. 1239 1237 */ 1240 - if (req && !(req->rq_flags & RQF_PREEMPT)) 1238 + if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) 1241 1239 return BLK_STS_RESOURCE; 1242 1240 return BLK_STS_OK; 1243 1241 default: 1244 1242 /* 1245 1243 * For any other not fully online state we only allow 1246 - * special commands. In particular any user initiated 1247 - * command is not allowed. 1244 + * power management commands. 1248 1245 */ 1249 - if (req && !(req->rq_flags & RQF_PREEMPT)) 1246 + if (req && !(req->rq_flags & RQF_PM)) 1250 1247 return BLK_STS_IOERR; 1251 1248 return BLK_STS_OK; 1252 1249 } ··· 2519 2516 EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2520 2517 2521 2518 /** 2522 - * scsi_device_quiesce - Block user issued commands. 2519 + * scsi_device_quiesce - Block all commands except power management. 2523 2520 * @sdev: scsi device to quiesce. 2524 2521 * 2525 2522 * This works by trying to transition to the SDEV_QUIESCE state 2526 2523 * (which must be a legal transition). When the device is in this 2527 - * state, only special requests will be accepted, all others will 2528 - * be deferred. Since special requests may also be requeued requests, 2529 - * a successful return doesn't guarantee the device will be 2530 - * totally quiescent. 2524 + * state, only power management requests will be accepted, all others will 2525 + * be deferred. 2531 2526 * 2532 2527 * Must be called with user context, may sleep. 2533 2528 * ··· 2587 2586 * device deleted during suspend) 2588 2587 */ 2589 2588 mutex_lock(&sdev->state_mutex); 2589 + if (sdev->sdev_state == SDEV_QUIESCE) 2590 + scsi_device_set_state(sdev, SDEV_RUNNING); 2590 2591 if (sdev->quiesced_by) { 2591 2592 sdev->quiesced_by = NULL; 2592 2593 blk_clear_pm_only(sdev->request_queue); 2593 2594 } 2594 - if (sdev->sdev_state == SDEV_QUIESCE) 2595 - scsi_device_set_state(sdev, SDEV_RUNNING); 2596 2595 mutex_unlock(&sdev->state_mutex); 2597 2596 } 2598 2597 EXPORT_SYMBOL(scsi_device_resume);
+19 -8
drivers/scsi/scsi_transport_spi.c
··· 117 117 sshdr = &sshdr_tmp; 118 118 119 119 for(i = 0; i < DV_RETRIES; i++) { 120 + /* 121 + * The purpose of the RQF_PM flag below is to bypass the 122 + * SDEV_QUIESCE state. 123 + */ 120 124 result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense, 121 125 sshdr, DV_TIMEOUT, /* retries */ 1, 122 126 REQ_FAILFAST_DEV | 123 127 REQ_FAILFAST_TRANSPORT | 124 128 REQ_FAILFAST_DRIVER, 125 - 0, NULL); 129 + RQF_PM, NULL); 126 130 if (driver_byte(result) != DRIVER_SENSE || 127 131 sshdr->sense_key != UNIT_ATTENTION) 128 132 break; ··· 1009 1005 */ 1010 1006 lock_system_sleep(); 1011 1007 1008 + if (scsi_autopm_get_device(sdev)) 1009 + goto unlock_system_sleep; 1010 + 1012 1011 if (unlikely(spi_dv_in_progress(starget))) 1013 - goto unlock; 1012 + goto put_autopm; 1014 1013 1015 1014 if (unlikely(scsi_device_get(sdev))) 1016 - goto unlock; 1015 + goto put_autopm; 1017 1016 1018 1017 spi_dv_in_progress(starget) = 1; 1019 1018 1020 1019 buffer = kzalloc(len, GFP_KERNEL); 1021 1020 1022 1021 if (unlikely(!buffer)) 1023 - goto out_put; 1022 + goto put_sdev; 1024 1023 1025 1024 /* We need to verify that the actual device will quiesce; the 1026 1025 * later target quiesce is just a nice to have */ 1027 1026 if (unlikely(scsi_device_quiesce(sdev))) 1028 - goto out_free; 1027 + goto free_buffer; 1029 1028 1030 1029 scsi_target_quiesce(starget); 1031 1030 ··· 1048 1041 1049 1042 spi_initial_dv(starget) = 1; 1050 1043 1051 - out_free: 1044 + free_buffer: 1052 1045 kfree(buffer); 1053 - out_put: 1046 + 1047 + put_sdev: 1054 1048 spi_dv_in_progress(starget) = 0; 1055 1049 scsi_device_put(sdev); 1056 - unlock: 1050 + put_autopm: 1051 + scsi_autopm_put_device(sdev); 1052 + 1053 + unlock_system_sleep: 1057 1054 unlock_system_sleep(); 1058 1055 } 1059 1056 EXPORT_SYMBOL(spi_dv_device);
+1 -1
drivers/scsi/ufs/ufs-mediatek-trace.h
··· 31 31 32 32 #undef TRACE_INCLUDE_PATH 33 33 #undef TRACE_INCLUDE_FILE 34 - #define TRACE_INCLUDE_PATH . 34 + #define TRACE_INCLUDE_PATH ../../drivers/scsi/ufs/ 35 35 #define TRACE_INCLUDE_FILE ufs-mediatek-trace 36 36 #include <trace/define_trace.h>
+21
drivers/scsi/ufs/ufs-mediatek.c
··· 70 70 return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL); 71 71 } 72 72 73 + static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba) 74 + { 75 + struct ufs_mtk_host *host = ufshcd_get_variant(hba); 76 + 77 + return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC); 78 + } 79 + 73 80 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) 74 81 { 75 82 u32 tmp; ··· 520 513 521 514 if (of_property_read_bool(np, "mediatek,ufs-disable-ah8")) 522 515 host->caps |= UFS_MTK_CAP_DISABLE_AH8; 516 + 517 + if (of_property_read_bool(np, "mediatek,ufs-broken-vcc")) 518 + host->caps |= UFS_MTK_CAP_BROKEN_VCC; 523 519 524 520 dev_info(hba->dev, "caps: 0x%x", host->caps); 525 521 } ··· 1013 1003 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) 1014 1004 { 1015 1005 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); 1006 + 1007 + if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc && 1008 + (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) { 1009 + hba->vreg_info.vcc->always_on = true; 1010 + /* 1011 + * VCC will be kept always-on thus we don't 1012 + * need any delay during regulator operations 1013 + */ 1014 + hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | 1015 + UFS_DEVICE_QUIRK_DELAY_AFTER_LPM); 1016 + } 1016 1017 } 1017 1018 1018 1019 static void ufs_mtk_event_notify(struct ufs_hba *hba,
+1
drivers/scsi/ufs/ufs-mediatek.h
··· 81 81 UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0, 82 82 UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1, 83 83 UFS_MTK_CAP_DISABLE_AH8 = 1 << 2, 84 + UFS_MTK_CAP_BROKEN_VCC = 1 << 3, 84 85 }; 85 86 86 87 struct ufs_mtk_crypt_cfg {
+1 -1
drivers/scsi/ufs/ufs.h
··· 330 330 UFS_DEV_WRITE_BOOSTER_SUP = BIT(8), 331 331 }; 332 332 333 - #define POWER_DESC_MAX_SIZE 0x62 334 333 #define POWER_DESC_MAX_ACTV_ICC_LVLS 16 335 334 336 335 /* Attribute bActiveICCLevel parameter bit masks definitions */ ··· 512 513 struct ufs_vreg { 513 514 struct regulator *reg; 514 515 const char *name; 516 + bool always_on; 515 517 bool enabled; 516 518 int min_uV; 517 519 int max_uV;
+71 -2
drivers/scsi/ufs/ufshcd-pci.c
··· 148 148 { 149 149 struct intel_host *host; 150 150 151 + hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; 152 + 151 153 host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL); 152 154 if (!host) 153 155 return -ENOMEM; ··· 165 163 intel_ltr_hide(hba->dev); 166 164 } 167 165 166 + static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op) 167 + { 168 + /* 169 + * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base 170 + * address registers must be restored because the restore kernel can 171 + * have used different addresses. 172 + */ 173 + ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), 174 + REG_UTP_TRANSFER_REQ_LIST_BASE_L); 175 + ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), 176 + REG_UTP_TRANSFER_REQ_LIST_BASE_H); 177 + ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), 178 + REG_UTP_TASK_REQ_LIST_BASE_L); 179 + ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), 180 + REG_UTP_TASK_REQ_LIST_BASE_H); 181 + 182 + if (ufshcd_is_link_hibern8(hba)) { 183 + int ret = ufshcd_uic_hibern8_exit(hba); 184 + 185 + if (!ret) { 186 + ufshcd_set_link_active(hba); 187 + } else { 188 + dev_err(hba->dev, "%s: hibern8 exit failed %d\n", 189 + __func__, ret); 190 + /* 191 + * Force reset and restore. Any other actions can lead 192 + * to an unrecoverable state. 193 + */ 194 + ufshcd_set_link_off(hba); 195 + } 196 + } 197 + 198 + return 0; 199 + } 200 + 168 201 static int ufs_intel_ehl_init(struct ufs_hba *hba) 169 202 { 170 203 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; ··· 211 174 .init = ufs_intel_common_init, 212 175 .exit = ufs_intel_common_exit, 213 176 .link_startup_notify = ufs_intel_link_startup_notify, 177 + .resume = ufs_intel_resume, 214 178 }; 215 179 216 180 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = { ··· 219 181 .init = ufs_intel_ehl_init, 220 182 .exit = ufs_intel_common_exit, 221 183 .link_startup_notify = ufs_intel_link_startup_notify, 184 + .resume = ufs_intel_resume, 222 185 }; 223 186 224 187 #ifdef CONFIG_PM_SLEEP ··· 246 207 { 247 208 return ufshcd_system_resume(dev_get_drvdata(dev)); 248 209 } 210 + 211 + /** 212 + * ufshcd_pci_poweroff - suspend-to-disk poweroff function 213 + * @dev: pointer to PCI device handle 214 + * 215 + * Returns 0 if successful 216 + * Returns non-zero otherwise 217 + */ 218 + static int ufshcd_pci_poweroff(struct device *dev) 219 + { 220 + struct ufs_hba *hba = dev_get_drvdata(dev); 221 + int spm_lvl = hba->spm_lvl; 222 + int ret; 223 + 224 + /* 225 + * For poweroff we need to set the UFS device to PowerDown mode. 226 + * Force spm_lvl to ensure that. 227 + */ 228 + hba->spm_lvl = 5; 229 + ret = ufshcd_system_suspend(hba); 230 + hba->spm_lvl = spm_lvl; 231 + return ret; 232 + } 233 + 249 234 #endif /* !CONFIG_PM_SLEEP */ 250 235 251 236 #ifdef CONFIG_PM ··· 365 302 } 366 303 367 304 static const struct dev_pm_ops ufshcd_pci_pm_ops = { 368 - SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend, 369 - ufshcd_pci_resume) 305 + #ifdef CONFIG_PM_SLEEP 306 + .suspend = ufshcd_pci_suspend, 307 + .resume = ufshcd_pci_resume, 308 + .freeze = ufshcd_pci_suspend, 309 + .thaw = ufshcd_pci_resume, 310 + .poweroff = ufshcd_pci_poweroff, 311 + .restore = ufshcd_pci_resume, 312 + #endif 370 313 SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend, 371 314 ufshcd_pci_runtime_resume, 372 315 ufshcd_pci_runtime_idle)
+29 -16
drivers/scsi/ufs/ufshcd.c
··· 225 225 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); 226 226 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); 227 227 static void ufshcd_hba_exit(struct ufs_hba *hba); 228 + static int ufshcd_clear_ua_wluns(struct ufs_hba *hba); 228 229 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async); 229 230 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); 230 231 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); ··· 579 578 names[hba->pwr_info.pwr_rx], 580 579 names[hba->pwr_info.pwr_tx], 581 580 hba->pwr_info.hs_rate); 581 + } 582 + 583 + static void ufshcd_device_reset(struct ufs_hba *hba) 584 + { 585 + int err; 586 + 587 + err = ufshcd_vops_device_reset(hba); 588 + 589 + if (!err) { 590 + ufshcd_set_ufs_dev_active(hba); 591 + if (ufshcd_is_wb_allowed(hba)) { 592 + hba->wb_enabled = false; 593 + hba->wb_buf_flush_enabled = false; 594 + } 595 + } 596 + if (err != -EOPNOTSUPP) 597 + ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); 582 598 } 583 599 584 600 void ufshcd_delay_us(unsigned long us, unsigned long tolerance) ··· 3683 3665 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3684 3666 if (ret) 3685 3667 dev_err(hba->dev, 3686 - "dme-reset: error code %d\n", ret); 3668 + "dme-enable: error code %d\n", ret); 3687 3669 3688 3670 return ret; 3689 3671 } ··· 3982 3964 spin_unlock_irqrestore(hba->host->host_lock, flags); 3983 3965 3984 3966 /* Reset the attached device */ 3985 - ufshcd_vops_device_reset(hba); 3967 + ufshcd_device_reset(hba); 3986 3968 3987 3969 ret = ufshcd_host_reset_and_restore(hba); 3988 3970 ··· 6948 6930 6949 6931 /* Establish the link again and restore the device */ 6950 6932 err = ufshcd_probe_hba(hba, false); 6951 - 6933 + if (!err) 6934 + ufshcd_clear_ua_wluns(hba); 6952 6935 out: 6953 6936 if (err) 6954 6937 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); ··· 6987 6968 6988 6969 do { 6989 6970 /* Reset the attached device */ 6990 - ufshcd_vops_device_reset(hba); 6971 + ufshcd_device_reset(hba); 6991 6972 6992 6973 err = ufshcd_host_reset_and_restore(hba); 6993 6974 } while (err && --retries); ··· 8064 8045 { 8065 8046 int ret = 0; 8066 8047 8067 - if (!vreg || !vreg->enabled) 8048 + if (!vreg || !vreg->enabled || vreg->always_on) 8068 8049 goto out; 8069 8050 8070 8051 ret = regulator_disable(vreg->reg); ··· 8433 8414 * handling context. 8434 8415 */ 8435 8416 hba->host->eh_noresume = 1; 8436 - if (hba->wlun_dev_clr_ua) { 8437 - ret = ufshcd_send_request_sense(hba, sdp); 8438 - if (ret) 8439 - goto out; 8440 - /* Unit attention condition is cleared now */ 8441 - hba->wlun_dev_clr_ua = false; 8442 - } 8417 + ufshcd_clear_ua_wluns(hba); 8443 8418 8444 8419 cmd[4] = pwr_mode << 4; 8445 8420 ··· 8454 8441 8455 8442 if (!ret) 8456 8443 hba->curr_dev_pwr_mode = pwr_mode; 8457 - out: 8444 + 8458 8445 scsi_device_put(sdp); 8459 8446 hba->host->eh_noresume = 0; 8460 8447 return ret; ··· 8760 8747 * further below. 8761 8748 */ 8762 8749 if (ufshcd_is_ufs_dev_deepsleep(hba)) { 8763 - ufshcd_vops_device_reset(hba); 8750 + ufshcd_device_reset(hba); 8764 8751 WARN_ON(!ufshcd_is_link_off(hba)); 8765 8752 } 8766 8753 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) ··· 8770 8757 set_dev_active: 8771 8758 /* Can also get here needing to exit DeepSleep */ 8772 8759 if (ufshcd_is_ufs_dev_deepsleep(hba)) { 8773 - ufshcd_vops_device_reset(hba); 8760 + ufshcd_device_reset(hba); 8774 8761 ufshcd_host_reset_and_restore(hba); 8775 8762 } 8776 8763 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) ··· 9366 9353 } 9367 9354 9368 9355 /* Reset the attached device */ 9369 - ufshcd_vops_device_reset(hba); 9356 + ufshcd_device_reset(hba); 9370 9357 9371 9358 ufshcd_init_crypto(hba); 9372 9359
+4 -8
drivers/scsi/ufs/ufshcd.h
··· 1218 1218 hba->vops->dbg_register_dump(hba); 1219 1219 } 1220 1220 1221 - static inline void ufshcd_vops_device_reset(struct ufs_hba *hba) 1221 + static inline int ufshcd_vops_device_reset(struct ufs_hba *hba) 1222 1222 { 1223 - if (hba->vops && hba->vops->device_reset) { 1224 - int err = hba->vops->device_reset(hba); 1223 + if (hba->vops && hba->vops->device_reset) 1224 + return hba->vops->device_reset(hba); 1225 1225 1226 - if (!err) 1227 - ufshcd_set_ufs_dev_active(hba); 1228 - if (err != -EOPNOTSUPP) 1229 - ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); 1230 - } 1226 + return -EOPNOTSUPP; 1231 1227 } 1232 1228 1233 1229 static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
+2 -2
include/linux/blk-mq.h
··· 447 447 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), 448 448 /* allocate from reserved pool */ 449 449 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), 450 - /* set RQF_PREEMPT */ 451 - BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), 450 + /* set RQF_PM */ 451 + BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), 452 452 }; 453 453 454 454 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
+13 -5
include/linux/blkdev.h
··· 79 79 #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 80 80 /* don't call prep for this one */ 81 81 #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 82 - /* set for "ide_preempt" requests and also for requests for which the SCSI 83 - "quiesce" state must be ignored. */ 84 - #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) 85 82 /* vaguely specified driver internal error. Ignored by the block layer */ 86 83 #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 87 84 /* don't warn about errors */ ··· 427 430 unsigned long queue_flags; 428 431 /* 429 432 * Number of contexts that have called blk_set_pm_only(). If this 430 - * counter is above zero then only RQF_PM and RQF_PREEMPT requests are 431 - * processed. 433 + * counter is above zero then only RQF_PM requests are processed. 432 434 */ 433 435 atomic_t pm_only; 434 436 ··· 691 695 { 692 696 return q->mq_ops; 693 697 } 698 + 699 + #ifdef CONFIG_PM 700 + static inline enum rpm_status queue_rpm_status(struct request_queue *q) 701 + { 702 + return q->rpm_status; 703 + } 704 + #else 705 + static inline enum rpm_status queue_rpm_status(struct request_queue *q) 706 + { 707 + return RPM_ACTIVE; 708 + } 709 + #endif 694 710 695 711 static inline enum blk_zoned_model 696 712 blk_queue_zoned_model(struct request_queue *q)