Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6:
[SCSI] stex: switch to block timeout
[SCSI] make scsi_eh_try_stu use block timeout
[SCSI] megaraid_sas: switch to block timeout
[SCSI] ibmvscsi: switch to block timeout
[SCSI] aacraid: switch to block timeout
[SCSI] zfcp: prevent double decrement on host_busy while being busy
[SCSI] zfcp: fix deadlock between wq triggered port scan and ERP
[SCSI] zfcp: eliminate race between validation and locking
[SCSI] zfcp: verify for correct rport state before scanning for SCSI devs
[SCSI] zfcp: returning an ERR_PTR where a NULL value is expected
[SCSI] zfcp: Fix opening of wka ports
[SCSI] zfcp: fix remote port status check
[SCSI] fc_transport: fix old bug on bitflag definitions
[SCSI] Fix hang in starved list processing

+42 -33
+5 -2
drivers/s390/scsi/zfcp_erp.c
··· 720 720 goto failed_openfcp; 721 721 722 722 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status); 723 - schedule_work(&act->adapter->scan_work); 724 723 725 724 return ZFCP_ERP_SUCCEEDED; 726 725 ··· 1185 1186 container_of(work, struct zfcp_erp_add_work, work); 1186 1187 struct zfcp_unit *unit = p->unit; 1187 1188 struct fc_rport *rport = unit->port->rport; 1188 - scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, 1189 + 1190 + if (rport && rport->port_state == FC_PORTSTATE_ONLINE) 1191 + scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, 1189 1192 scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0); 1190 1193 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); 1191 1194 zfcp_unit_put(unit); ··· 1283 1282 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1284 1283 if (result != ZFCP_ERP_SUCCEEDED) 1285 1284 zfcp_erp_rports_del(adapter); 1285 + else 1286 + schedule_work(&adapter->scan_work); 1286 1287 zfcp_adapter_put(adapter); 1287 1288 break; 1288 1289 }
+3 -4
drivers/s390/scsi/zfcp_fc.c
··· 50 50 if (mutex_lock_interruptible(&wka_port->mutex)) 51 51 return -ERESTARTSYS; 52 52 53 - if (wka_port->status != ZFCP_WKA_PORT_ONLINE) { 53 + if (wka_port->status == ZFCP_WKA_PORT_OFFLINE || 54 + wka_port->status == ZFCP_WKA_PORT_CLOSING) { 54 55 wka_port->status = ZFCP_WKA_PORT_OPENING; 55 56 if (zfcp_fsf_open_wka_port(wka_port)) 56 57 wka_port->status = ZFCP_WKA_PORT_OFFLINE; ··· 126 125 127 126 read_lock_irqsave(&zfcp_data.config_lock, flags); 128 127 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { 129 - /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */ 130 - if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_DID_DID)) 128 + if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_PHYS_OPEN)) 131 129 /* Try to connect to unused ports anyway. */ 132 130 zfcp_erp_port_reopen(port, 133 131 ZFCP_STATUS_COMMON_ERP_FAILED, ··· 610 610 int ret, i; 611 611 struct zfcp_gpn_ft *gpn_ft; 612 612 613 - zfcp_erp_wait(adapter); /* wait until adapter is finished with ERP */ 614 613 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT) 615 614 return 0; 616 615
+14 -6
drivers/s390/scsi/zfcp_fsf.c
··· 930 930 goto out; 931 931 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, 932 932 req_flags, adapter->pool.fsf_req_abort); 933 - if (IS_ERR(req)) 933 + if (IS_ERR(req)) { 934 + req = NULL; 934 935 goto out; 936 + } 935 937 936 938 if (unlikely(!(atomic_read(&unit->status) & 937 939 ZFCP_STATUS_COMMON_UNBLOCKED))) ··· 1586 1584 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1587 1585 break; 1588 1586 case FSF_PORT_ALREADY_OPEN: 1587 + break; 1589 1588 case FSF_GOOD: 1590 1589 wka_port->handle = header->port_handle; 1591 1590 wka_port->status = ZFCP_WKA_PORT_ONLINE; ··· 2116 2113 2117 2114 static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) 2118 2115 { 2119 - struct scsi_cmnd *scpnt = req->data; 2116 + struct scsi_cmnd *scpnt; 2120 2117 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2121 2118 &(req->qtcb->bottom.io.fcp_rsp); 2122 2119 u32 sns_len; 2123 2120 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1]; 2124 2121 unsigned long flags; 2125 2122 2126 - if (unlikely(!scpnt)) 2127 - return; 2128 - 2129 2123 read_lock_irqsave(&req->adapter->abort_lock, flags); 2124 + 2125 + scpnt = req->data; 2126 + if (unlikely(!scpnt)) { 2127 + read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2128 + return; 2129 + } 2130 2130 2131 2131 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) { 2132 2132 set_host_byte(scpnt, DID_SOFT_ERROR); ··· 2448 2442 goto out; 2449 2443 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2450 2444 adapter->pool.fsf_req_scsi); 2451 - if (IS_ERR(req)) 2445 + if (IS_ERR(req)) { 2446 + req = NULL; 2452 2447 goto out; 2448 + } 2453 2449 2454 2450 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; 2455 2451 req->data = unit;
+1 -1
drivers/s390/scsi/zfcp_scsi.c
··· 88 88 ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0, 89 89 ZFCP_REQ_AUTO_CLEANUP); 90 90 if (unlikely(ret == -EBUSY)) 91 - zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT); 91 + return SCSI_MLQUEUE_DEVICE_BUSY; 92 92 else if (unlikely(ret < 0)) 93 93 return SCSI_MLQUEUE_HOST_BUSY; 94 94
+2 -2
drivers/scsi/aacraid/linit.c
··· 427 427 * Firmware has an individual device recovery time typically 428 428 * of 35 seconds, give us a margin. 429 429 */ 430 - if (sdev->timeout < (45 * HZ)) 431 - sdev->timeout = 45 * HZ; 430 + if (sdev->request_queue->rq_timeout < (45 * HZ)) 431 + blk_queue_rq_timeout(sdev->request_queue, 45*HZ); 432 432 for (cid = 0; cid < aac->maximum_num_containers; ++cid) 433 433 if (aac->fsa_dev[cid].valid) 434 434 ++num_lsu;
+1 -1
drivers/scsi/ibmvscsi/ibmvscsi.c
··· 1442 1442 spin_lock_irqsave(shost->host_lock, lock_flags); 1443 1443 if (sdev->type == TYPE_DISK) { 1444 1444 sdev->allow_restart = 1; 1445 - sdev->timeout = 60 * HZ; 1445 + blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); 1446 1446 } 1447 1447 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); 1448 1448 spin_unlock_irqrestore(shost->host_lock, lock_flags);
+2 -1
drivers/scsi/megaraid/megaraid_sas.c
··· 1016 1016 * The RAID firmware may require extended timeouts. 1017 1017 */ 1018 1018 if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS) 1019 - sdev->timeout = MEGASAS_DEFAULT_CMD_TIMEOUT * HZ; 1019 + blk_queue_rq_timeout(sdev->request_queue, 1020 + MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); 1020 1021 return 0; 1021 1022 } 1022 1023
+1 -2
drivers/scsi/scsi_error.c
··· 932 932 int i, rtn = NEEDS_RETRY; 933 933 934 934 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++) 935 - rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, 936 - scmd->device->timeout, 0); 935 + rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0); 937 936 938 937 if (rtn == SUCCESS) 939 938 return 0;
+11 -12
drivers/scsi/scsi_lib.c
··· 567 567 */ 568 568 static void scsi_run_queue(struct request_queue *q) 569 569 { 570 - struct scsi_device *starved_head = NULL, *sdev = q->queuedata; 570 + struct scsi_device *sdev = q->queuedata; 571 571 struct Scsi_Host *shost = sdev->host; 572 + LIST_HEAD(starved_list); 572 573 unsigned long flags; 573 574 574 575 if (scsi_target(sdev)->single_lun) 575 576 scsi_single_lun_run(sdev); 576 577 577 578 spin_lock_irqsave(shost->host_lock, flags); 578 - while (!list_empty(&shost->starved_list) && !scsi_host_is_busy(shost)) { 579 + list_splice_init(&shost->starved_list, &starved_list); 580 + 581 + while (!list_empty(&starved_list)) { 579 582 int flagset; 580 583 581 584 /* ··· 591 588 * scsi_request_fn must get the host_lock before checking 592 589 * or modifying starved_list or starved_entry. 593 590 */ 594 - sdev = list_entry(shost->starved_list.next, 595 - struct scsi_device, starved_entry); 596 - /* 597 - * The *queue_ready functions can add a device back onto the 598 - * starved list's tail, so we must check for a infinite loop. 599 - */ 600 - if (sdev == starved_head) 591 + if (scsi_host_is_busy(shost)) 601 592 break; 602 - if (!starved_head) 603 - starved_head = sdev; 604 593 594 + sdev = list_entry(starved_list.next, 595 + struct scsi_device, starved_entry); 596 + list_del_init(&sdev->starved_entry); 605 597 if (scsi_target_is_busy(scsi_target(sdev))) { 606 598 list_move_tail(&sdev->starved_entry, 607 599 &shost->starved_list); 608 600 continue; 609 601 } 610 602 611 - list_del_init(&sdev->starved_entry); 612 603 spin_unlock(shost->host_lock); 613 604 614 605 spin_lock(sdev->request_queue->queue_lock); ··· 618 621 619 622 spin_lock(shost->host_lock); 620 623 } 624 + /* put any unprocessed entries back */ 625 + list_splice(&starved_list, &shost->starved_list); 621 626 spin_unlock_irqrestore(shost->host_lock, flags); 622 627 623 628 blk_run_queue(q);
+1 -1
drivers/scsi/stex.c
··· 477 477 { 478 478 sdev->use_10_for_rw = 1; 479 479 sdev->use_10_for_ms = 1; 480 - sdev->timeout = 60 * HZ; 480 + blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); 481 481 sdev->tagged_supported = 1; 482 482 483 483 return 0;
+1 -1
include/scsi/scsi_transport_fc.h
··· 357 357 /* bit field values for struct fc_rport "flags" field: */ 358 358 #define FC_RPORT_DEVLOSS_PENDING 0x01 359 359 #define FC_RPORT_SCAN_PENDING 0x02 360 - #define FC_RPORT_FAST_FAIL_TIMEDOUT 0x03 360 + #define FC_RPORT_FAST_FAIL_TIMEDOUT 0x04 361 361 362 362 #define dev_to_rport(d) \ 363 363 container_of(d, struct fc_rport, dev)