scsi: ufs: core: Fix task management completion

The UFS driver uses blk_mq_tagset_busy_iter() when identifying task
management requests to complete, however blk_mq_tagset_busy_iter() doesn't
work.

blk_mq_tagset_busy_iter() only iterates requests dispatched by the block
layer. That appears as if it might have started since commit 37f4a24c2469
("blk-mq: centralise related handling into blk_mq_get_driver_tag") which
removed 'data->hctx->tags->rqs[rq->tag] = rq' from blk_mq_rq_ctx_init()
which gets called:

blk_get_request
blk_mq_alloc_request
__blk_mq_alloc_request
blk_mq_rq_ctx_init

Since UFS task management requests are not dispatched by the block layer,
hctx->tags->rqs[rq->tag] remains NULL, and since blk_mq_tagset_busy_iter()
relies on finding requests using hctx->tags->rqs[rq->tag], UFS task
management requests are never found by blk_mq_tagset_busy_iter().

By using blk_mq_tagset_busy_iter(), the UFS driver was relying on internal
details of the block layer, which was fragile and subsequently got
broken. Fix by removing the use of blk_mq_tagset_busy_iter() and having the
driver keep track of task management requests.

Link: https://lore.kernel.org/r/20210922091059.4040-1-adrian.hunter@intel.com
Fixes: 1235fc569e0b ("scsi: ufs: core: Fix task management request completion timeout")
Fixes: 69a6c269c097 ("scsi: ufs: Use blk_{get,put}_request() to allocate and free TMFs")
Cc: stable@vger.kernel.org
Tested-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by Adrian Hunter and committed by Martin K. Petersen f5ef336f c5336400

+23 -30
+22 -30
drivers/scsi/ufs/ufshcd.c
··· 6377 6377 return retval; 6378 6378 } 6379 6379 6380 - struct ctm_info { 6381 - struct ufs_hba *hba; 6382 - unsigned long pending; 6383 - unsigned int ncpl; 6384 - }; 6385 - 6386 - static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved) 6387 - { 6388 - struct ctm_info *const ci = priv; 6389 - struct completion *c; 6390 - 6391 - WARN_ON_ONCE(reserved); 6392 - if (test_bit(req->tag, &ci->pending)) 6393 - return true; 6394 - ci->ncpl++; 6395 - c = req->end_io_data; 6396 - if (c) 6397 - complete(c); 6398 - return true; 6399 - } 6400 - 6401 6380 /** 6402 6381 * ufshcd_tmc_handler - handle task management function completion 6403 6382 * @hba: per adapter instance ··· 6387 6408 */ 6388 6409 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) 6389 6410 { 6390 - unsigned long flags; 6391 - struct request_queue *q = hba->tmf_queue; 6392 - struct ctm_info ci = { 6393 - .hba = hba, 6394 - }; 6411 + unsigned long flags, pending, issued; 6412 + irqreturn_t ret = IRQ_NONE; 6413 + int tag; 6414 + 6415 + pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); 6395 6416 6396 6417 spin_lock_irqsave(hba->host->host_lock, flags); 6397 - ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); 6398 - blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci); 6418 + issued = hba->outstanding_tasks & ~pending; 6419 + for_each_set_bit(tag, &issued, hba->nutmrs) { 6420 + struct request *req = hba->tmf_rqs[tag]; 6421 + struct completion *c = req->end_io_data; 6422 + 6423 + complete(c); 6424 + ret = IRQ_HANDLED; 6425 + } 6399 6426 spin_unlock_irqrestore(hba->host->host_lock, flags); 6400 6427 6401 - return ci.ncpl ? IRQ_HANDLED : IRQ_NONE; 6428 + return ret; 6402 6429 } 6403 6430 6404 6431 /** ··· 6527 6542 ufshcd_hold(hba, false); 6528 6543 6529 6544 spin_lock_irqsave(host->host_lock, flags); 6530 - blk_mq_start_request(req); 6531 6545 6532 6546 task_tag = req->tag; 6547 + hba->tmf_rqs[req->tag] = req; 6533 6548 treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag); 6534 6549 6535 6550 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); ··· 6570 6585 } 6571 6586 6572 6587 spin_lock_irqsave(hba->host->host_lock, flags); 6588 + hba->tmf_rqs[req->tag] = NULL; 6573 6589 __clear_bit(task_tag, &hba->outstanding_tasks); 6574 6590 spin_unlock_irqrestore(hba->host->host_lock, flags); 6575 6591 ··· 9620 9634 if (IS_ERR(hba->tmf_queue)) { 9621 9635 err = PTR_ERR(hba->tmf_queue); 9622 9636 goto free_tmf_tag_set; 9637 + } 9638 + hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, 9639 + sizeof(*hba->tmf_rqs), GFP_KERNEL); 9640 + if (!hba->tmf_rqs) { 9641 + err = -ENOMEM; 9642 + goto free_tmf_queue; 9623 9643 } 9624 9644 9625 9645 /* Reset the attached device */
+1
drivers/scsi/ufs/ufshcd.h
··· 828 828 829 829 struct blk_mq_tag_set tmf_tag_set; 830 830 struct request_queue *tmf_queue; 831 + struct request **tmf_rqs; 831 832 832 833 struct uic_command *active_uic_cmd; 833 834 struct mutex uic_cmd_mutex;