Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
"This is a load of driver fixes (12 ufs, 1 mpt3sas, 1 cxgbi).

The big core two fixes are for power management ("block: Do not accept
any requests while suspended" and "block: Fix a race in the runtime
power management code") which finally sorts out the resume problems
we've occasionally been having.

To make the resume fix, there are seven necessary precursors which
effectively renames REQ_PREEMPT to REQ_PM, so every "special" request
in block is automatically a power management exempt one.

All of the non-PM preempt cases are removed except for the one in the
SCSI Parallel Interface (spi) domain validation which is a genuine
case where we have to run requests at high priority to validate the
bus so this becomes an autopm get/put protected request"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (22 commits)
scsi: cxgb4i: Fix TLS dependency
scsi: ufs: Un-inline ufshcd_vops_device_reset function
scsi: ufs: Re-enable WriteBooster after device reset
scsi: ufs-mediatek: Use correct path to fix compile error
scsi: mpt3sas: Signedness bug in _base_get_diag_triggers()
scsi: block: Do not accept any requests while suspended
scsi: block: Remove RQF_PREEMPT and BLK_MQ_REQ_PREEMPT
scsi: core: Only process PM requests if rpm_status != RPM_ACTIVE
scsi: scsi_transport_spi: Set RQF_PM for domain validation commands
scsi: ide: Mark power management requests with RQF_PM instead of RQF_PREEMPT
scsi: ide: Do not set the RQF_PREEMPT flag for sense requests
scsi: block: Introduce BLK_MQ_REQ_PM
scsi: block: Fix a race in the runtime power management code
scsi: ufs-pci: Enable UFSHCD_CAP_RPM_AUTOSUSPEND for Intel controllers
scsi: ufs-pci: Fix recovery from hibernate exit errors for Intel controllers
scsi: ufs-pci: Ensure UFS device is in PowerDown mode for suspend-to-disk ->poweroff()
scsi: ufs-pci: Fix restore from S4 for Intel controllers
scsi: ufs-mediatek: Keep VCC always-on for specific devices
scsi: ufs: Allow regulators being always-on
scsi: ufs: Clear UAC for RPMB after ufshcd resets
...

+7 -6
block/blk-core.c
··· 18 #include <linux/bio.h> 19 #include <linux/blkdev.h> 20 #include <linux/blk-mq.h> 21 #include <linux/highmem.h> 22 #include <linux/mm.h> 23 #include <linux/pagemap.h> ··· 425 /** 426 * blk_queue_enter() - try to increase q->q_usage_counter 427 * @q: request queue pointer 428 - * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT 429 */ 430 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) 431 { 432 - const bool pm = flags & BLK_MQ_REQ_PREEMPT; 433 434 while (true) { 435 bool success = false; ··· 441 * responsible for ensuring that that counter is 442 * globally visible before the queue is unfrozen. 443 */ 444 - if (pm || !blk_queue_pm_only(q)) { 445 success = true; 446 } else { 447 percpu_ref_put(&q->q_usage_counter); ··· 467 468 wait_event(q->mq_freeze_wq, 469 (!q->mq_freeze_depth && 470 - (pm || (blk_pm_request_resume(q), 471 - !blk_queue_pm_only(q)))) || 472 blk_queue_dying(q)); 473 if (blk_queue_dying(q)) 474 return -ENODEV; ··· 631 struct request *req; 632 633 WARN_ON_ONCE(op & REQ_NOWAIT); 634 - WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT)); 635 636 req = blk_mq_alloc_request(q, op, flags); 637 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
··· 18 #include <linux/bio.h> 19 #include <linux/blkdev.h> 20 #include <linux/blk-mq.h> 21 + #include <linux/blk-pm.h> 22 #include <linux/highmem.h> 23 #include <linux/mm.h> 24 #include <linux/pagemap.h> ··· 424 /** 425 * blk_queue_enter() - try to increase q->q_usage_counter 426 * @q: request queue pointer 427 + * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM 428 */ 429 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) 430 { 431 + const bool pm = flags & BLK_MQ_REQ_PM; 432 433 while (true) { 434 bool success = false; ··· 440 * responsible for ensuring that that counter is 441 * globally visible before the queue is unfrozen. 442 */ 443 + if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) || 444 + !blk_queue_pm_only(q)) { 445 success = true; 446 } else { 447 percpu_ref_put(&q->q_usage_counter); ··· 465 466 wait_event(q->mq_freeze_wq, 467 (!q->mq_freeze_depth && 468 + blk_pm_resume_queue(pm, q)) || 469 blk_queue_dying(q)); 470 if (blk_queue_dying(q)) 471 return -ENODEV; ··· 630 struct request *req; 631 632 WARN_ON_ONCE(op & REQ_NOWAIT); 633 + WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM)); 634 635 req = blk_mq_alloc_request(q, op, flags); 636 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
-1
block/blk-mq-debugfs.c
··· 298 RQF_NAME(MIXED_MERGE), 299 RQF_NAME(MQ_INFLIGHT), 300 RQF_NAME(DONTPREP), 301 - RQF_NAME(PREEMPT), 302 RQF_NAME(FAILED), 303 RQF_NAME(QUIET), 304 RQF_NAME(ELVPRIV),
··· 298 RQF_NAME(MIXED_MERGE), 299 RQF_NAME(MQ_INFLIGHT), 300 RQF_NAME(DONTPREP), 301 RQF_NAME(FAILED), 302 RQF_NAME(QUIET), 303 RQF_NAME(ELVPRIV),
+2 -2
block/blk-mq.c
··· 294 rq->mq_hctx = data->hctx; 295 rq->rq_flags = 0; 296 rq->cmd_flags = data->cmd_flags; 297 - if (data->flags & BLK_MQ_REQ_PREEMPT) 298 - rq->rq_flags |= RQF_PREEMPT; 299 if (blk_queue_io_stat(data->q)) 300 rq->rq_flags |= RQF_IO_STAT; 301 INIT_LIST_HEAD(&rq->queuelist);
··· 294 rq->mq_hctx = data->hctx; 295 rq->rq_flags = 0; 296 rq->cmd_flags = data->cmd_flags; 297 + if (data->flags & BLK_MQ_REQ_PM) 298 + rq->rq_flags |= RQF_PM; 299 if (blk_queue_io_stat(data->q)) 300 rq->rq_flags |= RQF_IO_STAT; 301 INIT_LIST_HEAD(&rq->queuelist);
+9 -6
block/blk-pm.c
··· 67 68 WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); 69 70 /* 71 * Increase the pm_only counter before checking whether any 72 * non-PM blk_queue_enter() calls are in progress to avoid that any ··· 93 /* Switch q_usage_counter back to per-cpu mode. */ 94 blk_mq_unfreeze_queue(q); 95 96 - spin_lock_irq(&q->queue_lock); 97 - if (ret < 0) 98 pm_runtime_mark_last_busy(q->dev); 99 - else 100 - q->rpm_status = RPM_SUSPENDING; 101 - spin_unlock_irq(&q->queue_lock); 102 103 - if (ret) 104 blk_clear_pm_only(q); 105 106 return ret; 107 }
··· 67 68 WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); 69 70 + spin_lock_irq(&q->queue_lock); 71 + q->rpm_status = RPM_SUSPENDING; 72 + spin_unlock_irq(&q->queue_lock); 73 + 74 /* 75 * Increase the pm_only counter before checking whether any 76 * non-PM blk_queue_enter() calls are in progress to avoid that any ··· 89 /* Switch q_usage_counter back to per-cpu mode. */ 90 blk_mq_unfreeze_queue(q); 91 92 + if (ret < 0) { 93 + spin_lock_irq(&q->queue_lock); 94 + q->rpm_status = RPM_ACTIVE; 95 pm_runtime_mark_last_busy(q->dev); 96 + spin_unlock_irq(&q->queue_lock); 97 98 blk_clear_pm_only(q); 99 + } 100 101 return ret; 102 }
+9 -5
block/blk-pm.h
··· 6 #include <linux/pm_runtime.h> 7 8 #ifdef CONFIG_PM 9 - static inline void blk_pm_request_resume(struct request_queue *q) 10 { 11 - if (q->dev && (q->rpm_status == RPM_SUSPENDED || 12 - q->rpm_status == RPM_SUSPENDING)) 13 - pm_request_resume(q->dev); 14 } 15 16 static inline void blk_pm_mark_last_busy(struct request *rq) ··· 47 --rq->q->nr_pending; 48 } 49 #else 50 - static inline void blk_pm_request_resume(struct request_queue *q) 51 { 52 } 53 54 static inline void blk_pm_mark_last_busy(struct request *rq)
··· 6 #include <linux/pm_runtime.h> 7 8 #ifdef CONFIG_PM 9 + static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q) 10 { 11 + if (!q->dev || !blk_queue_pm_only(q)) 12 + return 1; /* Nothing to do */ 13 + if (pm && q->rpm_status != RPM_SUSPENDED) 14 + return 1; /* Request allowed */ 15 + pm_request_resume(q->dev); 16 + return 0; 17 } 18 19 static inline void blk_pm_mark_last_busy(struct request *rq) ··· 44 --rq->q->nr_pending; 45 } 46 #else 47 + static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q) 48 { 49 + return 1; 50 } 51 52 static inline void blk_pm_mark_last_busy(struct request *rq)
-1
drivers/ide/ide-atapi.c
··· 223 sense_rq->rq_disk = rq->rq_disk; 224 sense_rq->cmd_flags = REQ_OP_DRV_IN; 225 ide_req(sense_rq)->type = ATA_PRIV_SENSE; 226 - sense_rq->rq_flags |= RQF_PREEMPT; 227 228 req->cmd[0] = GPCMD_REQUEST_SENSE; 229 req->cmd[4] = cmd_len;
··· 223 sense_rq->rq_disk = rq->rq_disk; 224 sense_rq->cmd_flags = REQ_OP_DRV_IN; 225 ide_req(sense_rq)->type = ATA_PRIV_SENSE; 226 227 req->cmd[0] = GPCMD_REQUEST_SENSE; 228 req->cmd[4] = cmd_len;
+1 -6
drivers/ide/ide-io.c
··· 515 * above to return us whatever is in the queue. Since we call 516 * ide_do_request() ourselves, we end up taking requests while 517 * the queue is blocked... 518 - * 519 - * We let requests forced at head of queue with ide-preempt 520 - * though. I hope that doesn't happen too much, hopefully not 521 - * unless the subdriver triggers such a thing in its own PM 522 - * state machine. 523 */ 524 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && 525 ata_pm_request(rq) == 0 && 526 - (rq->rq_flags & RQF_PREEMPT) == 0) { 527 /* there should be no pending command at this point */ 528 ide_unlock_port(hwif); 529 goto plug_device;
··· 515 * above to return us whatever is in the queue. Since we call 516 * ide_do_request() ourselves, we end up taking requests while 517 * the queue is blocked... 518 */ 519 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && 520 ata_pm_request(rq) == 0 && 521 + (rq->rq_flags & RQF_PM) == 0) { 522 /* there should be no pending command at this point */ 523 ide_unlock_port(hwif); 524 goto plug_device;
+1 -1
drivers/ide/ide-pm.c
··· 77 } 78 79 memset(&rqpm, 0, sizeof(rqpm)); 80 - rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT); 81 ide_req(rq)->type = ATA_PRIV_PM_RESUME; 82 ide_req(rq)->special = &rqpm; 83 rqpm.pm_step = IDE_PM_START_RESUME;
··· 77 } 78 79 memset(&rqpm, 0, sizeof(rqpm)); 80 + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM); 81 ide_req(rq)->type = ATA_PRIV_PM_RESUME; 82 ide_req(rq)->special = &rqpm; 83 rqpm.pm_step = IDE_PM_START_RESUME;
+1
drivers/scsi/cxgbi/cxgb4i/Kconfig
··· 4 depends on PCI && INET && (IPV6 || IPV6=n) 5 depends on THERMAL || !THERMAL 6 depends on ETHERNET 7 select NET_VENDOR_CHELSIO 8 select CHELSIO_T4 9 select CHELSIO_LIB
··· 4 depends on PCI && INET && (IPV6 || IPV6=n) 5 depends on THERMAL || !THERMAL 6 depends on ETHERNET 7 + depends on TLS || TLS=n 8 select NET_VENDOR_CHELSIO 9 select CHELSIO_T4 10 select CHELSIO_LIB
+1 -1
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 5034 static void 5035 _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc) 5036 { 5037 - u16 trigger_flags; 5038 5039 /* 5040 * Default setting of master trigger.
··· 5034 static void 5035 _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc) 5036 { 5037 + int trigger_flags; 5038 5039 /* 5040 * Default setting of master trigger.
+14 -13
drivers/scsi/scsi_lib.c
··· 249 250 req = blk_get_request(sdev->request_queue, 251 data_direction == DMA_TO_DEVICE ? 252 - REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT); 253 if (IS_ERR(req)) 254 return ret; 255 rq = scsi_req(req); ··· 1207 scsi_device_state_check(struct scsi_device *sdev, struct request *req) 1208 { 1209 switch (sdev->sdev_state) { 1210 case SDEV_OFFLINE: 1211 case SDEV_TRANSPORT_OFFLINE: 1212 /* ··· 1235 return BLK_STS_RESOURCE; 1236 case SDEV_QUIESCE: 1237 /* 1238 - * If the devices is blocked we defer normal commands. 1239 */ 1240 - if (req && !(req->rq_flags & RQF_PREEMPT)) 1241 return BLK_STS_RESOURCE; 1242 return BLK_STS_OK; 1243 default: 1244 /* 1245 * For any other not fully online state we only allow 1246 - * special commands. In particular any user initiated 1247 - * command is not allowed. 1248 */ 1249 - if (req && !(req->rq_flags & RQF_PREEMPT)) 1250 return BLK_STS_IOERR; 1251 return BLK_STS_OK; 1252 } ··· 2519 EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2520 2521 /** 2522 - * scsi_device_quiesce - Block user issued commands. 2523 * @sdev: scsi device to quiesce. 2524 * 2525 * This works by trying to transition to the SDEV_QUIESCE state 2526 * (which must be a legal transition). When the device is in this 2527 - * state, only special requests will be accepted, all others will 2528 - * be deferred. Since special requests may also be requeued requests, 2529 - * a successful return doesn't guarantee the device will be 2530 - * totally quiescent. 2531 * 2532 * Must be called with user context, may sleep. 2533 * ··· 2587 * device deleted during suspend) 2588 */ 2589 mutex_lock(&sdev->state_mutex); 2590 if (sdev->quiesced_by) { 2591 sdev->quiesced_by = NULL; 2592 blk_clear_pm_only(sdev->request_queue); 2593 } 2594 - if (sdev->sdev_state == SDEV_QUIESCE) 2595 - scsi_device_set_state(sdev, SDEV_RUNNING); 2596 mutex_unlock(&sdev->state_mutex); 2597 } 2598 EXPORT_SYMBOL(scsi_device_resume);
··· 249 250 req = blk_get_request(sdev->request_queue, 251 data_direction == DMA_TO_DEVICE ? 252 + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 253 + rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0); 254 if (IS_ERR(req)) 255 return ret; 256 rq = scsi_req(req); ··· 1206 scsi_device_state_check(struct scsi_device *sdev, struct request *req) 1207 { 1208 switch (sdev->sdev_state) { 1209 + case SDEV_CREATED: 1210 + return BLK_STS_OK; 1211 case SDEV_OFFLINE: 1212 case SDEV_TRANSPORT_OFFLINE: 1213 /* ··· 1232 return BLK_STS_RESOURCE; 1233 case SDEV_QUIESCE: 1234 /* 1235 + * If the device is blocked we only accept power management 1236 + * commands. 1237 */ 1238 + if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) 1239 return BLK_STS_RESOURCE; 1240 return BLK_STS_OK; 1241 default: 1242 /* 1243 * For any other not fully online state we only allow 1244 + * power management commands. 1245 */ 1246 + if (req && !(req->rq_flags & RQF_PM)) 1247 return BLK_STS_IOERR; 1248 return BLK_STS_OK; 1249 } ··· 2516 EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2517 2518 /** 2519 + * scsi_device_quiesce - Block all commands except power management. 2520 * @sdev: scsi device to quiesce. 2521 * 2522 * This works by trying to transition to the SDEV_QUIESCE state 2523 * (which must be a legal transition). When the device is in this 2524 + * state, only power management requests will be accepted, all others will 2525 + * be deferred. 2526 * 2527 * Must be called with user context, may sleep. 2528 * ··· 2586 * device deleted during suspend) 2587 */ 2588 mutex_lock(&sdev->state_mutex); 2589 + if (sdev->sdev_state == SDEV_QUIESCE) 2590 + scsi_device_set_state(sdev, SDEV_RUNNING); 2591 if (sdev->quiesced_by) { 2592 sdev->quiesced_by = NULL; 2593 blk_clear_pm_only(sdev->request_queue); 2594 } 2595 mutex_unlock(&sdev->state_mutex); 2596 } 2597 EXPORT_SYMBOL(scsi_device_resume);
+19 -8
drivers/scsi/scsi_transport_spi.c
··· 117 sshdr = &sshdr_tmp; 118 119 for(i = 0; i < DV_RETRIES; i++) { 120 result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense, 121 sshdr, DV_TIMEOUT, /* retries */ 1, 122 REQ_FAILFAST_DEV | 123 REQ_FAILFAST_TRANSPORT | 124 REQ_FAILFAST_DRIVER, 125 - 0, NULL); 126 if (driver_byte(result) != DRIVER_SENSE || 127 sshdr->sense_key != UNIT_ATTENTION) 128 break; ··· 1009 */ 1010 lock_system_sleep(); 1011 1012 if (unlikely(spi_dv_in_progress(starget))) 1013 - goto unlock; 1014 1015 if (unlikely(scsi_device_get(sdev))) 1016 - goto unlock; 1017 1018 spi_dv_in_progress(starget) = 1; 1019 1020 buffer = kzalloc(len, GFP_KERNEL); 1021 1022 if (unlikely(!buffer)) 1023 - goto out_put; 1024 1025 /* We need to verify that the actual device will quiesce; the 1026 * later target quiesce is just a nice to have */ 1027 if (unlikely(scsi_device_quiesce(sdev))) 1028 - goto out_free; 1029 1030 scsi_target_quiesce(starget); 1031 ··· 1048 1049 spi_initial_dv(starget) = 1; 1050 1051 - out_free: 1052 kfree(buffer); 1053 - out_put: 1054 spi_dv_in_progress(starget) = 0; 1055 scsi_device_put(sdev); 1056 - unlock: 1057 unlock_system_sleep(); 1058 } 1059 EXPORT_SYMBOL(spi_dv_device);
··· 117 sshdr = &sshdr_tmp; 118 119 for(i = 0; i < DV_RETRIES; i++) { 120 + /* 121 + * The purpose of the RQF_PM flag below is to bypass the 122 + * SDEV_QUIESCE state. 123 + */ 124 result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense, 125 sshdr, DV_TIMEOUT, /* retries */ 1, 126 REQ_FAILFAST_DEV | 127 REQ_FAILFAST_TRANSPORT | 128 REQ_FAILFAST_DRIVER, 129 + RQF_PM, NULL); 130 if (driver_byte(result) != DRIVER_SENSE || 131 sshdr->sense_key != UNIT_ATTENTION) 132 break; ··· 1005 */ 1006 lock_system_sleep(); 1007 1008 + if (scsi_autopm_get_device(sdev)) 1009 + goto unlock_system_sleep; 1010 + 1011 if (unlikely(spi_dv_in_progress(starget))) 1012 + goto put_autopm; 1013 1014 if (unlikely(scsi_device_get(sdev))) 1015 + goto put_autopm; 1016 1017 spi_dv_in_progress(starget) = 1; 1018 1019 buffer = kzalloc(len, GFP_KERNEL); 1020 1021 if (unlikely(!buffer)) 1022 + goto put_sdev; 1023 1024 /* We need to verify that the actual device will quiesce; the 1025 * later target quiesce is just a nice to have */ 1026 if (unlikely(scsi_device_quiesce(sdev))) 1027 + goto free_buffer; 1028 1029 scsi_target_quiesce(starget); 1030 ··· 1041 1042 spi_initial_dv(starget) = 1; 1043 1044 + free_buffer: 1045 kfree(buffer); 1046 + 1047 + put_sdev: 1048 spi_dv_in_progress(starget) = 0; 1049 scsi_device_put(sdev); 1050 + put_autopm: 1051 + scsi_autopm_put_device(sdev); 1052 + 1053 + unlock_system_sleep: 1054 unlock_system_sleep(); 1055 } 1056 EXPORT_SYMBOL(spi_dv_device);
+1 -1
drivers/scsi/ufs/ufs-mediatek-trace.h
··· 31 32 #undef TRACE_INCLUDE_PATH 33 #undef TRACE_INCLUDE_FILE 34 - #define TRACE_INCLUDE_PATH . 35 #define TRACE_INCLUDE_FILE ufs-mediatek-trace 36 #include <trace/define_trace.h>
··· 31 32 #undef TRACE_INCLUDE_PATH 33 #undef TRACE_INCLUDE_FILE 34 + #define TRACE_INCLUDE_PATH ../../drivers/scsi/ufs/ 35 #define TRACE_INCLUDE_FILE ufs-mediatek-trace 36 #include <trace/define_trace.h>
+21
drivers/scsi/ufs/ufs-mediatek.c
··· 70 return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL); 71 } 72 73 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) 74 { 75 u32 tmp; ··· 520 521 if (of_property_read_bool(np, "mediatek,ufs-disable-ah8")) 522 host->caps |= UFS_MTK_CAP_DISABLE_AH8; 523 524 dev_info(hba->dev, "caps: 0x%x", host->caps); 525 } ··· 1013 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) 1014 { 1015 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); 1016 } 1017 1018 static void ufs_mtk_event_notify(struct ufs_hba *hba,
··· 70 return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL); 71 } 72 73 + static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba) 74 + { 75 + struct ufs_mtk_host *host = ufshcd_get_variant(hba); 76 + 77 + return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC); 78 + } 79 + 80 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) 81 { 82 u32 tmp; ··· 513 514 if (of_property_read_bool(np, "mediatek,ufs-disable-ah8")) 515 host->caps |= UFS_MTK_CAP_DISABLE_AH8; 516 + 517 + if (of_property_read_bool(np, "mediatek,ufs-broken-vcc")) 518 + host->caps |= UFS_MTK_CAP_BROKEN_VCC; 519 520 dev_info(hba->dev, "caps: 0x%x", host->caps); 521 } ··· 1003 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) 1004 { 1005 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); 1006 + 1007 + if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc && 1008 + (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) { 1009 + hba->vreg_info.vcc->always_on = true; 1010 + /* 1011 + * VCC will be kept always-on thus we don't 1012 + * need any delay during regulator operations 1013 + */ 1014 + hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | 1015 + UFS_DEVICE_QUIRK_DELAY_AFTER_LPM); 1016 + } 1017 } 1018 1019 static void ufs_mtk_event_notify(struct ufs_hba *hba,
+1
drivers/scsi/ufs/ufs-mediatek.h
··· 81 UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0, 82 UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1, 83 UFS_MTK_CAP_DISABLE_AH8 = 1 << 2, 84 }; 85 86 struct ufs_mtk_crypt_cfg {
··· 81 UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0, 82 UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1, 83 UFS_MTK_CAP_DISABLE_AH8 = 1 << 2, 84 + UFS_MTK_CAP_BROKEN_VCC = 1 << 3, 85 }; 86 87 struct ufs_mtk_crypt_cfg {
+1 -1
drivers/scsi/ufs/ufs.h
··· 330 UFS_DEV_WRITE_BOOSTER_SUP = BIT(8), 331 }; 332 333 - #define POWER_DESC_MAX_SIZE 0x62 334 #define POWER_DESC_MAX_ACTV_ICC_LVLS 16 335 336 /* Attribute bActiveICCLevel parameter bit masks definitions */ ··· 512 struct ufs_vreg { 513 struct regulator *reg; 514 const char *name; 515 bool enabled; 516 int min_uV; 517 int max_uV;
··· 330 UFS_DEV_WRITE_BOOSTER_SUP = BIT(8), 331 }; 332 333 #define POWER_DESC_MAX_ACTV_ICC_LVLS 16 334 335 /* Attribute bActiveICCLevel parameter bit masks definitions */ ··· 513 struct ufs_vreg { 514 struct regulator *reg; 515 const char *name; 516 + bool always_on; 517 bool enabled; 518 int min_uV; 519 int max_uV;
+71 -2
drivers/scsi/ufs/ufshcd-pci.c
··· 148 { 149 struct intel_host *host; 150 151 host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL); 152 if (!host) 153 return -ENOMEM; ··· 165 intel_ltr_hide(hba->dev); 166 } 167 168 static int ufs_intel_ehl_init(struct ufs_hba *hba) 169 { 170 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; ··· 211 .init = ufs_intel_common_init, 212 .exit = ufs_intel_common_exit, 213 .link_startup_notify = ufs_intel_link_startup_notify, 214 }; 215 216 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = { ··· 219 .init = ufs_intel_ehl_init, 220 .exit = ufs_intel_common_exit, 221 .link_startup_notify = ufs_intel_link_startup_notify, 222 }; 223 224 #ifdef CONFIG_PM_SLEEP ··· 246 { 247 return ufshcd_system_resume(dev_get_drvdata(dev)); 248 } 249 #endif /* !CONFIG_PM_SLEEP */ 250 251 #ifdef CONFIG_PM ··· 365 } 366 367 static const struct dev_pm_ops ufshcd_pci_pm_ops = { 368 - SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend, 369 - ufshcd_pci_resume) 370 SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend, 371 ufshcd_pci_runtime_resume, 372 ufshcd_pci_runtime_idle)
··· 148 { 149 struct intel_host *host; 150 151 + hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; 152 + 153 host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL); 154 if (!host) 155 return -ENOMEM; ··· 163 intel_ltr_hide(hba->dev); 164 } 165 166 + static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op) 167 + { 168 + /* 169 + * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base 170 + * address registers must be restored because the restore kernel can 171 + * have used different addresses. 172 + */ 173 + ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), 174 + REG_UTP_TRANSFER_REQ_LIST_BASE_L); 175 + ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), 176 + REG_UTP_TRANSFER_REQ_LIST_BASE_H); 177 + ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), 178 + REG_UTP_TASK_REQ_LIST_BASE_L); 179 + ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), 180 + REG_UTP_TASK_REQ_LIST_BASE_H); 181 + 182 + if (ufshcd_is_link_hibern8(hba)) { 183 + int ret = ufshcd_uic_hibern8_exit(hba); 184 + 185 + if (!ret) { 186 + ufshcd_set_link_active(hba); 187 + } else { 188 + dev_err(hba->dev, "%s: hibern8 exit failed %d\n", 189 + __func__, ret); 190 + /* 191 + * Force reset and restore. Any other actions can lead 192 + * to an unrecoverable state. 193 + */ 194 + ufshcd_set_link_off(hba); 195 + } 196 + } 197 + 198 + return 0; 199 + } 200 + 201 static int ufs_intel_ehl_init(struct ufs_hba *hba) 202 { 203 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; ··· 174 .init = ufs_intel_common_init, 175 .exit = ufs_intel_common_exit, 176 .link_startup_notify = ufs_intel_link_startup_notify, 177 + .resume = ufs_intel_resume, 178 }; 179 180 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = { ··· 181 .init = ufs_intel_ehl_init, 182 .exit = ufs_intel_common_exit, 183 .link_startup_notify = ufs_intel_link_startup_notify, 184 + .resume = ufs_intel_resume, 185 }; 186 187 #ifdef CONFIG_PM_SLEEP ··· 207 { 208 return ufshcd_system_resume(dev_get_drvdata(dev)); 209 } 210 + 211 + /** 212 + * ufshcd_pci_poweroff - suspend-to-disk poweroff function 213 + * @dev: pointer to PCI device handle 214 + * 215 + * Returns 0 if successful 216 + * Returns non-zero otherwise 217 + */ 218 + static int ufshcd_pci_poweroff(struct device *dev) 219 + { 220 + struct ufs_hba *hba = dev_get_drvdata(dev); 221 + int spm_lvl = hba->spm_lvl; 222 + int ret; 223 + 224 + /* 225 + * For poweroff we need to set the UFS device to PowerDown mode. 226 + * Force spm_lvl to ensure that. 227 + */ 228 + hba->spm_lvl = 5; 229 + ret = ufshcd_system_suspend(hba); 230 + hba->spm_lvl = spm_lvl; 231 + return ret; 232 + } 233 + 234 #endif /* !CONFIG_PM_SLEEP */ 235 236 #ifdef CONFIG_PM ··· 302 } 303 304 static const struct dev_pm_ops ufshcd_pci_pm_ops = { 305 + #ifdef CONFIG_PM_SLEEP 306 + .suspend = ufshcd_pci_suspend, 307 + .resume = ufshcd_pci_resume, 308 + .freeze = ufshcd_pci_suspend, 309 + .thaw = ufshcd_pci_resume, 310 + .poweroff = ufshcd_pci_poweroff, 311 + .restore = ufshcd_pci_resume, 312 + #endif 313 SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend, 314 ufshcd_pci_runtime_resume, 315 ufshcd_pci_runtime_idle)
+29 -16
drivers/scsi/ufs/ufshcd.c
··· 225 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); 226 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); 227 static void ufshcd_hba_exit(struct ufs_hba *hba); 228 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async); 229 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); 230 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); ··· 579 names[hba->pwr_info.pwr_rx], 580 names[hba->pwr_info.pwr_tx], 581 hba->pwr_info.hs_rate); 582 } 583 584 void ufshcd_delay_us(unsigned long us, unsigned long tolerance) ··· 3683 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3684 if (ret) 3685 dev_err(hba->dev, 3686 - "dme-reset: error code %d\n", ret); 3687 3688 return ret; 3689 } ··· 3982 spin_unlock_irqrestore(hba->host->host_lock, flags); 3983 3984 /* Reset the attached device */ 3985 - ufshcd_vops_device_reset(hba); 3986 3987 ret = ufshcd_host_reset_and_restore(hba); 3988 ··· 6948 6949 /* Establish the link again and restore the device */ 6950 err = ufshcd_probe_hba(hba, false); 6951 - 6952 out: 6953 if (err) 6954 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); ··· 6987 6988 do { 6989 /* Reset the attached device */ 6990 - ufshcd_vops_device_reset(hba); 6991 6992 err = ufshcd_host_reset_and_restore(hba); 6993 } while (err && --retries); ··· 8064 { 8065 int ret = 0; 8066 8067 - if (!vreg || !vreg->enabled) 8068 goto out; 8069 8070 ret = regulator_disable(vreg->reg); ··· 8433 * handling context. 8434 */ 8435 hba->host->eh_noresume = 1; 8436 - if (hba->wlun_dev_clr_ua) { 8437 - ret = ufshcd_send_request_sense(hba, sdp); 8438 - if (ret) 8439 - goto out; 8440 - /* Unit attention condition is cleared now */ 8441 - hba->wlun_dev_clr_ua = false; 8442 - } 8443 8444 cmd[4] = pwr_mode << 4; 8445 ··· 8454 8455 if (!ret) 8456 hba->curr_dev_pwr_mode = pwr_mode; 8457 - out: 8458 scsi_device_put(sdp); 8459 hba->host->eh_noresume = 0; 8460 return ret; ··· 8760 * further below. 8761 */ 8762 if (ufshcd_is_ufs_dev_deepsleep(hba)) { 8763 - ufshcd_vops_device_reset(hba); 8764 WARN_ON(!ufshcd_is_link_off(hba)); 8765 } 8766 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) ··· 8770 set_dev_active: 8771 /* Can also get here needing to exit DeepSleep */ 8772 if (ufshcd_is_ufs_dev_deepsleep(hba)) { 8773 - ufshcd_vops_device_reset(hba); 8774 ufshcd_host_reset_and_restore(hba); 8775 } 8776 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) ··· 9366 } 9367 9368 /* Reset the attached device */ 9369 - ufshcd_vops_device_reset(hba); 9370 9371 ufshcd_init_crypto(hba); 9372
··· 225 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); 226 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); 227 static void ufshcd_hba_exit(struct ufs_hba *hba); 228 + static int ufshcd_clear_ua_wluns(struct ufs_hba *hba); 229 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async); 230 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); 231 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); ··· 578 names[hba->pwr_info.pwr_rx], 579 names[hba->pwr_info.pwr_tx], 580 hba->pwr_info.hs_rate); 581 + } 582 + 583 + static void ufshcd_device_reset(struct ufs_hba *hba) 584 + { 585 + int err; 586 + 587 + err = ufshcd_vops_device_reset(hba); 588 + 589 + if (!err) { 590 + ufshcd_set_ufs_dev_active(hba); 591 + if (ufshcd_is_wb_allowed(hba)) { 592 + hba->wb_enabled = false; 593 + hba->wb_buf_flush_enabled = false; 594 + } 595 + } 596 + if (err != -EOPNOTSUPP) 597 + ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); 598 } 599 600 void ufshcd_delay_us(unsigned long us, unsigned long tolerance) ··· 3665 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3666 if (ret) 3667 dev_err(hba->dev, 3668 + "dme-enable: error code %d\n", ret); 3669 3670 return ret; 3671 } ··· 3964 spin_unlock_irqrestore(hba->host->host_lock, flags); 3965 3966 /* Reset the attached device */ 3967 + ufshcd_device_reset(hba); 3968 3969 ret = ufshcd_host_reset_and_restore(hba); 3970 ··· 6930 6931 /* Establish the link again and restore the device */ 6932 err = ufshcd_probe_hba(hba, false); 6933 + if (!err) 6934 + ufshcd_clear_ua_wluns(hba); 6935 out: 6936 if (err) 6937 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); ··· 6968 6969 do { 6970 /* Reset the attached device */ 6971 + ufshcd_device_reset(hba); 6972 6973 err = ufshcd_host_reset_and_restore(hba); 6974 } while (err && --retries); ··· 8045 { 8046 int ret = 0; 8047 8048 + if (!vreg || !vreg->enabled || vreg->always_on) 8049 goto out; 8050 8051 ret = regulator_disable(vreg->reg); ··· 8414 * handling context. 8415 */ 8416 hba->host->eh_noresume = 1; 8417 + ufshcd_clear_ua_wluns(hba); 8418 8419 cmd[4] = pwr_mode << 4; 8420 ··· 8441 8442 if (!ret) 8443 hba->curr_dev_pwr_mode = pwr_mode; 8444 + 8445 scsi_device_put(sdp); 8446 hba->host->eh_noresume = 0; 8447 return ret; ··· 8747 * further below. 8748 */ 8749 if (ufshcd_is_ufs_dev_deepsleep(hba)) { 8750 + ufshcd_device_reset(hba); 8751 WARN_ON(!ufshcd_is_link_off(hba)); 8752 } 8753 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) ··· 8757 set_dev_active: 8758 /* Can also get here needing to exit DeepSleep */ 8759 if (ufshcd_is_ufs_dev_deepsleep(hba)) { 8760 + ufshcd_device_reset(hba); 8761 ufshcd_host_reset_and_restore(hba); 8762 } 8763 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) ··· 9353 } 9354 9355 /* Reset the attached device */ 9356 + ufshcd_device_reset(hba); 9357 9358 ufshcd_init_crypto(hba); 9359
+4 -8
drivers/scsi/ufs/ufshcd.h
··· 1218 hba->vops->dbg_register_dump(hba); 1219 } 1220 1221 - static inline void ufshcd_vops_device_reset(struct ufs_hba *hba) 1222 { 1223 - if (hba->vops && hba->vops->device_reset) { 1224 - int err = hba->vops->device_reset(hba); 1225 1226 - if (!err) 1227 - ufshcd_set_ufs_dev_active(hba); 1228 - if (err != -EOPNOTSUPP) 1229 - ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); 1230 - } 1231 } 1232 1233 static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
··· 1218 hba->vops->dbg_register_dump(hba); 1219 } 1220 1221 + static inline int ufshcd_vops_device_reset(struct ufs_hba *hba) 1222 { 1223 + if (hba->vops && hba->vops->device_reset) 1224 + return hba->vops->device_reset(hba); 1225 1226 + return -EOPNOTSUPP; 1227 } 1228 1229 static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
+2 -2
include/linux/blk-mq.h
··· 447 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), 448 /* allocate from reserved pool */ 449 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), 450 - /* set RQF_PREEMPT */ 451 - BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), 452 }; 453 454 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
··· 447 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), 448 /* allocate from reserved pool */ 449 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), 450 + /* set RQF_PM */ 451 + BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), 452 }; 453 454 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
+13 -5
include/linux/blkdev.h
··· 79 #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 80 /* don't call prep for this one */ 81 #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 82 - /* set for "ide_preempt" requests and also for requests for which the SCSI 83 - "quiesce" state must be ignored. */ 84 - #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) 85 /* vaguely specified driver internal error. Ignored by the block layer */ 86 #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 87 /* don't warn about errors */ ··· 427 unsigned long queue_flags; 428 /* 429 * Number of contexts that have called blk_set_pm_only(). If this 430 - * counter is above zero then only RQF_PM and RQF_PREEMPT requests are 431 - * processed. 432 */ 433 atomic_t pm_only; 434 ··· 691 { 692 return q->mq_ops; 693 } 694 695 static inline enum blk_zoned_model 696 blk_queue_zoned_model(struct request_queue *q)
··· 79 #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 80 /* don't call prep for this one */ 81 #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 82 /* vaguely specified driver internal error. Ignored by the block layer */ 83 #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 84 /* don't warn about errors */ ··· 430 unsigned long queue_flags; 431 /* 432 * Number of contexts that have called blk_set_pm_only(). If this 433 + * counter is above zero then only RQF_PM requests are processed. 434 */ 435 atomic_t pm_only; 436 ··· 695 { 696 return q->mq_ops; 697 } 698 + 699 + #ifdef CONFIG_PM 700 + static inline enum rpm_status queue_rpm_status(struct request_queue *q) 701 + { 702 + return q->rpm_status; 703 + } 704 + #else 705 + static inline enum rpm_status queue_rpm_status(struct request_queue *q) 706 + { 707 + return RPM_ACTIVE; 708 + } 709 + #endif 710 711 static inline enum blk_zoned_model 712 blk_queue_zoned_model(struct request_queue *q)