Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: smartpqi: enhance reset logic

Eliminated timeout from LUN reset logic.

Reviewed-by: Scott Teel <scott.teel@microsemi.com>
Reviewed-by: Scott Benesh <scott.benesh@microsemi.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Tomas Henzl <thenzl@redhat.com>
Signed-off-by: Kevin Barnett <kevin.barnett@microsemi.com>
Signed-off-by: Don Brace <don.brace@microsemi.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Kevin Barnett and committed by
Martin K. Petersen
14bb215d e58081a7

+39 -72
-2
drivers/scsi/smartpqi/smartpqi.h
··· 370 370 }; 371 371 372 372 #define SOP_TASK_MANAGEMENT_LUN_RESET 0x8 373 - #define PQI_ABORT_TIMEOUT_MSECS (20 * 1000) 374 373 375 374 struct pqi_task_management_response { 376 375 struct pqi_iu_header header; ··· 761 762 762 763 struct pqi_sas_port *sas_port; 763 764 struct scsi_device *sdev; 764 - bool reset_in_progress; 765 765 766 766 struct list_head scsi_device_list_entry; 767 767 struct list_head new_device_list_entry;
+39 -70
drivers/scsi/smartpqi/smartpqi_init.c
··· 4537 4537 bool raid_bypassed; 4538 4538 4539 4539 device = scmd->device->hostdata; 4540 - 4541 - if (device->reset_in_progress) { 4542 - set_host_byte(scmd, DID_RESET); 4543 - pqi_scsi_done(scmd); 4544 - return 0; 4545 - } 4546 - 4547 4540 ctrl_info = shost_to_hba(shost); 4548 4541 4549 4542 if (pqi_ctrl_offline(ctrl_info)) { ··· 4578 4585 return rc; 4579 4586 } 4580 4587 4581 - static inline void pqi_complete_queued_requests_queue_group( 4582 - struct pqi_queue_group *queue_group, 4583 - struct pqi_scsi_dev *device_in_reset) 4584 - { 4585 - unsigned int path; 4586 - unsigned long flags; 4587 - struct pqi_io_request *io_request; 4588 - struct pqi_io_request *next; 4589 - struct scsi_cmnd *scmd; 4590 - struct pqi_scsi_dev *device; 4591 - 4592 - for (path = 0; path < 2; path++) { 4593 - spin_lock_irqsave(&queue_group->submit_lock[path], flags); 4594 - 4595 - list_for_each_entry_safe(io_request, next, 4596 - &queue_group->request_list[path], 4597 - request_list_entry) { 4598 - scmd = io_request->scmd; 4599 - if (!scmd) 4600 - continue; 4601 - device = scmd->device->hostdata; 4602 - if (device == device_in_reset) { 4603 - set_host_byte(scmd, DID_RESET); 4604 - pqi_scsi_done(scmd); 4605 - list_del(&io_request-> 4606 - request_list_entry); 4607 - } 4608 - } 4609 - 4610 - spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 4611 - } 4612 - } 4613 - 4614 - static void pqi_complete_queued_requests(struct pqi_ctrl_info *ctrl_info, 4615 - struct pqi_scsi_dev *device_in_reset) 4616 - { 4617 - unsigned int i; 4618 - struct pqi_queue_group *queue_group; 4619 - 4620 - for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4621 - queue_group = &ctrl_info->queue_groups[i]; 4622 - pqi_complete_queued_requests_queue_group(queue_group, 4623 - device_in_reset); 4624 - } 4625 - } 4626 - 4627 - static void pqi_reset_lun_complete(struct pqi_io_request *io_request, 4588 + static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 4628 4589 void *context) 4629 4590 { 4630 4591 struct completion *waiting = context; ··· 4586 4639 complete(waiting); 4587 4640 } 4588 4641 4589 - static int pqi_reset_lun(struct pqi_ctrl_info *ctrl_info, 4642 + #define PQI_LUN_RESET_TIMEOUT_SECS 10 4643 + 4644 + static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 4645 + struct pqi_scsi_dev *device, struct completion *wait) 4646 + { 4647 + int rc; 4648 + unsigned int wait_secs = 0; 4649 + 4650 + while (1) { 4651 + if (wait_for_completion_io_timeout(wait, 4652 + PQI_LUN_RESET_TIMEOUT_SECS * HZ)) { 4653 + rc = 0; 4654 + break; 4655 + } 4656 + 4657 + pqi_check_ctrl_health(ctrl_info); 4658 + if (pqi_ctrl_offline(ctrl_info)) { 4659 + rc = -ETIMEDOUT; 4660 + break; 4661 + } 4662 + 4663 + wait_secs += PQI_LUN_RESET_TIMEOUT_SECS; 4664 + 4665 + dev_err(&ctrl_info->pci_dev->dev, 4666 + "resetting scsi %d:%d:%d:%d - waiting %u seconds\n", 4667 + ctrl_info->scsi_host->host_no, device->bus, 4668 + device->target, device->lun, wait_secs); 4669 + } 4670 + 4671 + return rc; 4672 + } 4673 + 4674 + static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, 4590 4675 struct pqi_scsi_dev *device) 4591 4676 { 4592 4677 int rc; ··· 4629 4650 down(&ctrl_info->lun_reset_sem); 4630 4651 4631 4652 io_request = pqi_alloc_io_request(ctrl_info); 4632 - io_request->io_complete_callback = pqi_reset_lun_complete; 4653 + io_request->io_complete_callback = pqi_lun_reset_complete; 4633 4654 io_request->context = &wait; 4634 4655 4635 4656 request = io_request->iu; ··· 4647 4668 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 4648 4669 io_request); 4649 4670 4650 - if (!wait_for_completion_io_timeout(&wait, 4651 - msecs_to_jiffies(PQI_ABORT_TIMEOUT_MSECS))) { 4652 - rc = -ETIMEDOUT; 4653 - } else { 4671 + rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); 4672 + if (rc == 0) 4654 4673 rc = io_request->status; 4655 - } 4656 4674 4657 4675 pqi_free_io_request(io_request); 4658 4676 up(&ctrl_info->lun_reset_sem); ··· 4668 4692 if (pqi_ctrl_offline(ctrl_info)) 4669 4693 return FAILED; 4670 4694 4671 - device->reset_in_progress = true; 4672 - pqi_complete_queued_requests(ctrl_info, device); 4673 - rc = pqi_reset_lun(ctrl_info, device); 4674 - device->reset_in_progress = false; 4695 + rc = pqi_lun_reset(ctrl_info, device); 4675 4696 4676 - if (rc) 4677 - return FAILED; 4678 - 4679 - return SUCCESS; 4697 + return rc == 0 ? SUCCESS : FAILED; 4680 4698 } 4681 4699 4682 4700 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) ··· 4680 4710 struct pqi_scsi_dev *device; 4681 4711 4682 4712 ctrl_info = shost_to_hba(scmd->device->host); 4683 - 4684 4713 device = scmd->device->hostdata; 4685 4714 4686 4715 dev_err(&ctrl_info->pci_dev->dev,