Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6:
[SCSI] aic79xx: check for non-NULL scb in ahd_handle_nonpkt_busfree
[SCSI] zfcp: Set hardware timeout as requested by BSG request.
[SCSI] zfcp: Introduce bsg_timeout callback.
[SCSI] scsi_transport_fc: Allow LLD to reset FC BSG timeout
[SCSI] zfcp: add missing compat ptr conversion
[SCSI] zfcp: Fix linebreak in hba trace
[SCSI] zfcp: Issue zfcp_fc_wka_port_put after FC CT BSG request
[SCSI] qla2xxx: Update version number to 8.03.01-k10.
[SCSI] fc-transport: Use packed modifier for fc_bsg_request structure.
[SCSI] qla2xxx: Perform fast mailbox read of flash regardless of size nor address alignment.
[SCSI] qla2xxx: Correct FCP2 recovery handling.
[SCSI] scsi_lib: Fix bug in completion of bidi commands
[SCSI] mptsas: Fix issue with chain pools allocation on katmai
[SCSI] aacraid: fix File System going into read-only mode
[SCSI] lpfc: fix file permissions

+291 -126
+2
drivers/message/fusion/mptbase.c
··· 4330 4331 if (ioc->bus_type == SPI) 4332 num_chain *= MPT_SCSI_CAN_QUEUE; 4333 else 4334 num_chain *= MPT_FC_CAN_QUEUE; 4335
··· 4330 4331 if (ioc->bus_type == SPI) 4332 num_chain *= MPT_SCSI_CAN_QUEUE; 4333 + else if (ioc->bus_type == SAS) 4334 + num_chain *= MPT_SAS_CAN_QUEUE; 4335 else 4336 num_chain *= MPT_FC_CAN_QUEUE; 4337
+7 -2
drivers/s390/scsi/zfcp_cfdc.c
··· 12 13 #include <linux/types.h> 14 #include <linux/miscdevice.h> 15 #include <asm/ccwdev.h> 16 #include "zfcp_def.h" 17 #include "zfcp_ext.h" ··· 164 } 165 166 static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, 167 - unsigned long buffer) 168 { 169 struct zfcp_cfdc_data *data; 170 struct zfcp_cfdc_data __user *data_user; ··· 176 if (command != ZFCP_CFDC_IOC) 177 return -ENOTTY; 178 179 - data_user = (void __user *) buffer; 180 if (!data_user) 181 return -EINVAL; 182
··· 12 13 #include <linux/types.h> 14 #include <linux/miscdevice.h> 15 + #include <asm/compat.h> 16 #include <asm/ccwdev.h> 17 #include "zfcp_def.h" 18 #include "zfcp_ext.h" ··· 163 } 164 165 static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, 166 + unsigned long arg) 167 { 168 struct zfcp_cfdc_data *data; 169 struct zfcp_cfdc_data __user *data_user; ··· 175 if (command != ZFCP_CFDC_IOC) 176 return -ENOTTY; 177 178 + if (is_compat_task()) 179 + data_user = compat_ptr(arg); 180 + else 181 + data_user = (void __user *)arg; 182 + 183 if (!data_user) 184 return -EINVAL; 185
+1 -1
drivers/s390/scsi/zfcp_dbf.c
··· 327 break; 328 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 329 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); 330 - p += sprintf(*p, "\n"); 331 break; 332 333 case FSF_QTCB_OPEN_PORT_WITH_DID:
··· 327 break; 328 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 329 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); 330 + *p += sprintf(*p, "\n"); 331 break; 332 333 case FSF_QTCB_OPEN_PORT_WITH_DID:
+3 -2
drivers/s390/scsi/zfcp_ext.h
··· 108 extern int zfcp_fc_gs_setup(struct zfcp_adapter *); 109 extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); 110 extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); 111 112 /* zfcp_fsf.c */ 113 extern int zfcp_fsf_open_port(struct zfcp_erp_action *); ··· 130 extern int zfcp_fsf_status_read(struct zfcp_qdio *); 131 extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); 132 extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *, 133 - mempool_t *); 134 extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, 135 - struct zfcp_fsf_ct_els *); 136 extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, 137 struct scsi_cmnd *); 138 extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
··· 108 extern int zfcp_fc_gs_setup(struct zfcp_adapter *); 109 extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); 110 extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); 111 + extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); 112 113 /* zfcp_fsf.c */ 114 extern int zfcp_fsf_open_port(struct zfcp_erp_action *); ··· 129 extern int zfcp_fsf_status_read(struct zfcp_qdio *); 130 extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); 131 extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *, 132 + mempool_t *, unsigned int); 133 extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, 134 + struct zfcp_fsf_ct_els *, unsigned int); 135 extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, 136 struct scsi_cmnd *); 137 extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
+57 -27
drivers/s390/scsi/zfcp_fc.c
··· 258 gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; 259 260 ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct, 261 - adapter->pool.gid_pn_req); 262 if (!ret) { 263 wait_for_completion(&completion); 264 zfcp_fc_ns_gid_pn_eval(gid_pn); ··· 422 hton24(adisc->adisc_req.adisc_port_id, 423 fc_host_port_id(adapter->scsi_host)); 424 425 - ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els); 426 if (ret) 427 kmem_cache_free(zfcp_data.adisc_cache, adisc); 428 ··· 534 ct->req = &gpn_ft->sg_req; 535 ct->resp = gpn_ft->sg_resp; 536 537 - ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL); 538 if (!ret) 539 wait_for_completion(&completion); 540 return ret; ··· 680 job->job_done(job); 681 } 682 683 static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, 684 struct zfcp_adapter *adapter) 685 { ··· 736 } else 737 d_id = ntoh24(job->request->rqst_data.h_els.port_id); 738 739 - return zfcp_fsf_send_els(adapter, d_id, els); 740 } 741 742 static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job, 743 struct zfcp_adapter *adapter) 744 { 745 int ret; 746 - u8 gs_type; 747 struct zfcp_fsf_ct_els *ct = job->dd_data; 748 struct zfcp_fc_wka_port *wka_port; 749 - u32 preamble_word1; 750 751 - preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; 752 - gs_type = (preamble_word1 & 0xff000000) >> 24; 753 - 754 - switch (gs_type) { 755 - case FC_FST_ALIAS: 756 - wka_port = &adapter->gs->as; 757 - break; 758 - case FC_FST_MGMT: 759 - wka_port = &adapter->gs->ms; 760 - break; 761 - case FC_FST_TIME: 762 - wka_port = &adapter->gs->ts; 763 - break; 764 - case FC_FST_DIR: 765 - wka_port = &adapter->gs->ds; 766 - break; 767 - default: 768 - return -EINVAL; /* no such service */ 769 - } 770 771 ret = zfcp_fc_wka_port_get(wka_port); 772 if (ret) 773 return ret; 774 775 - ret = zfcp_fsf_send_ct(wka_port, ct, NULL); 776 if (ret) 777 zfcp_fc_wka_port_put(wka_port); 778 ··· 777 778 ct_els->req = job->request_payload.sg_list; 779 ct_els->resp = job->reply_payload.sg_list; 780 - ct_els->handler = zfcp_fc_ct_els_job_handler; 781 ct_els->handler_data = job; 782 783 switch (job->request->msgcode) { ··· 789 default: 790 return -EINVAL; 791 } 792 } 793 794 int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
··· 258 gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; 259 260 ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct, 261 + adapter->pool.gid_pn_req, 262 + ZFCP_FC_CTELS_TMO); 263 if (!ret) { 264 wait_for_completion(&completion); 265 zfcp_fc_ns_gid_pn_eval(gid_pn); ··· 421 hton24(adisc->adisc_req.adisc_port_id, 422 fc_host_port_id(adapter->scsi_host)); 423 424 + ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els, 425 + ZFCP_FC_CTELS_TMO); 426 if (ret) 427 kmem_cache_free(zfcp_data.adisc_cache, adisc); 428 ··· 532 ct->req = &gpn_ft->sg_req; 533 ct->resp = gpn_ft->sg_resp; 534 535 + ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL, 536 + ZFCP_FC_CTELS_TMO); 537 if (!ret) 538 wait_for_completion(&completion); 539 return ret; ··· 677 job->job_done(job); 678 } 679 680 + static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job) 681 + { 682 + u32 preamble_word1; 683 + u8 gs_type; 684 + struct zfcp_adapter *adapter; 685 + 686 + preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; 687 + gs_type = (preamble_word1 & 0xff000000) >> 24; 688 + 689 + adapter = (struct zfcp_adapter *) job->shost->hostdata[0]; 690 + 691 + switch (gs_type) { 692 + case FC_FST_ALIAS: 693 + return &adapter->gs->as; 694 + case FC_FST_MGMT: 695 + return &adapter->gs->ms; 696 + case FC_FST_TIME: 697 + return &adapter->gs->ts; 698 + break; 699 + case FC_FST_DIR: 700 + return &adapter->gs->ds; 701 + break; 702 + default: 703 + return NULL; 704 + } 705 + } 706 + 707 + static void zfcp_fc_ct_job_handler(void *data) 708 + { 709 + struct fc_bsg_job *job = data; 710 + struct zfcp_fc_wka_port *wka_port; 711 + 712 + wka_port = zfcp_fc_job_wka_port(job); 713 + zfcp_fc_wka_port_put(wka_port); 714 + 715 + zfcp_fc_ct_els_job_handler(data); 716 + } 717 + 718 static int zfcp_fc_exec_els_job(struct fc_bsg_job *job, 719 struct zfcp_adapter *adapter) 720 { ··· 695 } else 696 d_id = ntoh24(job->request->rqst_data.h_els.port_id); 697 698 + els->handler = zfcp_fc_ct_els_job_handler; 699 + return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ); 700 } 701 702 static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job, 703 struct zfcp_adapter *adapter) 704 { 705 int ret; 706 struct zfcp_fsf_ct_els *ct = job->dd_data; 707 struct zfcp_fc_wka_port *wka_port; 708 709 + wka_port = zfcp_fc_job_wka_port(job); 710 + if (!wka_port) 711 + return -EINVAL; 712 713 ret = zfcp_fc_wka_port_get(wka_port); 714 if (ret) 715 return ret; 716 717 + ct->handler = zfcp_fc_ct_job_handler; 718 + ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ); 719 if (ret) 720 zfcp_fc_wka_port_put(wka_port); 721 ··· 752 753 ct_els->req = job->request_payload.sg_list; 754 ct_els->resp = job->reply_payload.sg_list; 755 ct_els->handler_data = job; 756 757 switch (job->request->msgcode) { ··· 765 default: 766 return -EINVAL; 767 } 768 + } 769 + 770 + int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job) 771 + { 772 + /* hardware tracks timeout, reset bsg timeout to not interfere */ 773 + return -EAGAIN; 774 } 775 776 int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
+2
drivers/s390/scsi/zfcp_fc.h
··· 27 #define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \ 28 (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) 29 30 /** 31 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request 32 * @ct_hdr: FC GS common transport header
··· 27 #define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \ 28 (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) 29 30 + #define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000) 31 + 32 /** 33 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request 34 * @ct_hdr: FC GS common transport header
+10 -9
drivers/s390/scsi/zfcp_fsf.c
··· 1068 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1069 struct scatterlist *sg_req, 1070 struct scatterlist *sg_resp, 1071 - int max_sbals) 1072 { 1073 int ret; 1074 - unsigned int fcp_chan_timeout; 1075 1076 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); 1077 if (ret) 1078 return ret; 1079 1080 /* common settings for ct/gs and els requests */ 1081 - fcp_chan_timeout = 2 * FC_DEF_R_A_TOV / 1000; 1082 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1083 - req->qtcb->bottom.support.timeout = fcp_chan_timeout; 1084 - zfcp_fsf_start_timer(req, (fcp_chan_timeout + 10) * HZ); 1085 1086 return 0; 1087 } ··· 1092 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1093 */ 1094 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, 1095 - struct zfcp_fsf_ct_els *ct, mempool_t *pool) 1096 { 1097 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1098 struct zfcp_fsf_req *req; ··· 1112 1113 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1114 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, 1115 - FSF_MAX_SBALS_PER_REQ); 1116 if (ret) 1117 goto failed_send; 1118 ··· 1189 * @els: pointer to struct zfcp_send_els with data for the command 1190 */ 1191 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, 1192 - struct zfcp_fsf_ct_els *els) 1193 { 1194 struct zfcp_fsf_req *req; 1195 struct zfcp_qdio *qdio = adapter->qdio; ··· 1207 } 1208 1209 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1210 - ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2); 1211 1212 if (ret) 1213 goto failed_send;
··· 1068 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1069 struct scatterlist *sg_req, 1070 struct scatterlist *sg_resp, 1071 + int max_sbals, unsigned int timeout) 1072 { 1073 int ret; 1074 1075 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals); 1076 if (ret) 1077 return ret; 1078 1079 /* common settings for ct/gs and els requests */ 1080 + if (timeout > 255) 1081 + timeout = 255; /* max value accepted by hardware */ 1082 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1083 + req->qtcb->bottom.support.timeout = timeout; 1084 + zfcp_fsf_start_timer(req, (timeout + 10) * HZ); 1085 1086 return 0; 1087 } ··· 1092 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1093 */ 1094 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, 1095 + struct zfcp_fsf_ct_els *ct, mempool_t *pool, 1096 + unsigned int timeout) 1097 { 1098 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1099 struct zfcp_fsf_req *req; ··· 1111 1112 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1113 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, 1114 + FSF_MAX_SBALS_PER_REQ, timeout); 1115 if (ret) 1116 goto failed_send; 1117 ··· 1188 * @els: pointer to struct zfcp_send_els with data for the command 1189 */ 1190 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, 1191 + struct zfcp_fsf_ct_els *els, unsigned int timeout) 1192 { 1193 struct zfcp_fsf_req *req; 1194 struct zfcp_qdio *qdio = adapter->qdio; ··· 1206 } 1207 1208 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1209 + ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout); 1210 1211 if (ret) 1212 goto failed_send;
+1
drivers/s390/scsi/zfcp_scsi.c
··· 652 .show_host_port_state = 1, 653 .show_host_active_fc4s = 1, 654 .bsg_request = zfcp_fc_exec_bsg_job, 655 /* no functions registered for following dynamic attributes but 656 directly set by LLDD */ 657 .show_host_port_type = 1,
··· 652 .show_host_port_state = 1, 653 .show_host_active_fc4s = 1, 654 .bsg_request = zfcp_fc_exec_bsg_job, 655 + .bsg_timeout = zfcp_fc_timeout_bsg_job, 656 /* no functions registered for following dynamic attributes but 657 directly set by LLDD */ 658 .show_host_port_type = 1,
+40 -12
drivers/scsi/aacraid/aachba.c
··· 293 status = -EINVAL; 294 } 295 } 296 - aac_fib_complete(fibptr); 297 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 298 if (status >= 0) { 299 if ((aac_commit == 1) || commit_flag) { ··· 313 FsaNormal, 314 1, 1, 315 NULL, NULL); 316 - aac_fib_complete(fibptr); 317 } else if (aac_commit == 0) { 318 printk(KERN_WARNING 319 "aac_get_config_status: Foreign device configurations are being ignored\n"); 320 } 321 } 322 - aac_fib_free(fibptr); 323 return status; 324 } 325 ··· 363 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 364 aac_fib_complete(fibptr); 365 } 366 - aac_fib_free(fibptr); 367 368 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 369 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; ··· 1255 NULL); 1256 1257 if (rcode < 0) { 1258 - aac_fib_complete(fibptr); 1259 - aac_fib_free(fibptr); 1260 return rcode; 1261 } 1262 memcpy(&dev->adapter_info, info, sizeof(*info)); ··· 1284 1285 if (rcode >= 0) 1286 memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); 1287 } 1288 1289 ··· 1490 (dev->scsi_host_ptr->sg_tablesize * 8) + 112; 1491 } 1492 } 1493 - 1494 - aac_fib_complete(fibptr); 1495 - aac_fib_free(fibptr); 1496 1497 return rcode; 1498 } ··· 1655 * Alocate and initialize a Fib 1656 */ 1657 if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1658 return -1; 1659 } 1660 ··· 1735 * Allocate and initialize a Fib then setup a BlockWrite command 1736 */ 1737 if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1738 - scsicmd->result = DID_ERROR << 16; 1739 - scsicmd->scsi_done(scsicmd); 1740 - return 0; 1741 } 1742 1743 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
··· 293 status = -EINVAL; 294 } 295 } 296 + /* Do not set XferState to zero unless receives a response from F/W */ 297 + if (status >= 0) 298 + aac_fib_complete(fibptr); 299 + 300 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 301 if (status >= 0) { 302 if ((aac_commit == 1) || commit_flag) { ··· 310 FsaNormal, 311 1, 1, 312 NULL, NULL); 313 + /* Do not set XferState to zero unless 314 + * receives a response from F/W */ 315 + if (status >= 0) 316 + aac_fib_complete(fibptr); 317 } else if (aac_commit == 0) { 318 printk(KERN_WARNING 319 "aac_get_config_status: Foreign device configurations are being ignored\n"); 320 } 321 } 322 + /* FIB should be freed only after getting the response from the F/W */ 323 + if (status != -ERESTARTSYS) 324 + aac_fib_free(fibptr); 325 return status; 326 } 327 ··· 355 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 356 aac_fib_complete(fibptr); 357 } 358 + /* FIB should be freed only after getting the response from the F/W */ 359 + if (status != -ERESTARTSYS) 360 + aac_fib_free(fibptr); 361 362 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 363 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; ··· 1245 NULL); 1246 1247 if (rcode < 0) { 1248 + /* FIB should be freed only after 1249 + * getting the response from the F/W */ 1250 + if (rcode != -ERESTARTSYS) { 1251 + aac_fib_complete(fibptr); 1252 + aac_fib_free(fibptr); 1253 + } 1254 return rcode; 1255 } 1256 memcpy(&dev->adapter_info, info, sizeof(*info)); ··· 1270 1271 if (rcode >= 0) 1272 memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); 1273 + if (rcode == -ERESTARTSYS) { 1274 + fibptr = aac_fib_alloc(dev); 1275 + if (!fibptr) 1276 + return -ENOMEM; 1277 + } 1278 + 1279 } 1280 1281 ··· 1470 (dev->scsi_host_ptr->sg_tablesize * 8) + 112; 1471 } 1472 } 1473 + /* FIB should be freed only after getting the response from the F/W */ 1474 + if (rcode != -ERESTARTSYS) { 1475 + aac_fib_complete(fibptr); 1476 + aac_fib_free(fibptr); 1477 + } 1478 1479 return rcode; 1480 } ··· 1633 * Alocate and initialize a Fib 1634 */ 1635 if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1636 + printk(KERN_WARNING "aac_read: fib allocation failed\n"); 1637 return -1; 1638 } 1639 ··· 1712 * Allocate and initialize a Fib then setup a BlockWrite command 1713 */ 1714 if (!(cmd_fibcontext = aac_fib_alloc(dev))) { 1715 + /* FIB temporarily unavailable,not catastrophic failure */ 1716 + 1717 + /* scsicmd->result = DID_ERROR << 16; 1718 + * scsicmd->scsi_done(scsicmd); 1719 + * return 0; 1720 + */ 1721 + printk(KERN_WARNING "aac_write: fib allocation failed\n"); 1722 + return -1; 1723 } 1724 1725 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
+4 -1
drivers/scsi/aacraid/aacraid.h
··· 12 *----------------------------------------------------------------------------*/ 13 14 #ifndef AAC_DRIVER_BUILD 15 - # define AAC_DRIVER_BUILD 2461 16 # define AAC_DRIVER_BRANCH "-ms" 17 #endif 18 #define MAXIMUM_NUM_CONTAINERS 32 ··· 1036 u8 printf_enabled; 1037 u8 in_reset; 1038 u8 msi; 1039 }; 1040 1041 #define aac_adapter_interrupt(dev) \
··· 12 *----------------------------------------------------------------------------*/ 13 14 #ifndef AAC_DRIVER_BUILD 15 + # define AAC_DRIVER_BUILD 24702 16 # define AAC_DRIVER_BRANCH "-ms" 17 #endif 18 #define MAXIMUM_NUM_CONTAINERS 32 ··· 1036 u8 printf_enabled; 1037 u8 in_reset; 1038 u8 msi; 1039 + int management_fib_count; 1040 + spinlock_t manage_lock; 1041 + 1042 }; 1043 1044 #define aac_adapter_interrupt(dev) \
+14 -14
drivers/scsi/aacraid/commctrl.c
··· 153 fibptr->hw_fib_pa = hw_fib_pa; 154 fibptr->hw_fib_va = hw_fib; 155 } 156 - if (retval != -EINTR) 157 aac_fib_free(fibptr); 158 return retval; 159 } ··· 322 } 323 if (f.wait) { 324 if(down_interruptible(&fibctx->wait_sem) < 0) { 325 - status = -EINTR; 326 } else { 327 /* Lock again and retry */ 328 spin_lock_irqsave(&dev->fib_lock, flags); ··· 593 u64 addr; 594 void* p; 595 if (upsg->sg[i].count > 596 - (dev->adapter_info.options & 597 AAC_OPT_NEW_COMM) ? 598 (dev->scsi_host_ptr->max_sectors << 9) : 599 - 65536) { 600 rcode = -EINVAL; 601 goto cleanup; 602 } ··· 645 u64 addr; 646 void* p; 647 if (usg->sg[i].count > 648 - (dev->adapter_info.options & 649 AAC_OPT_NEW_COMM) ? 650 (dev->scsi_host_ptr->max_sectors << 9) : 651 - 65536) { 652 rcode = -EINVAL; 653 goto cleanup; 654 } ··· 695 uintptr_t addr; 696 void* p; 697 if (usg->sg[i].count > 698 - (dev->adapter_info.options & 699 AAC_OPT_NEW_COMM) ? 700 (dev->scsi_host_ptr->max_sectors << 9) : 701 - 65536) { 702 rcode = -EINVAL; 703 goto cleanup; 704 } ··· 734 dma_addr_t addr; 735 void* p; 736 if (upsg->sg[i].count > 737 - (dev->adapter_info.options & 738 AAC_OPT_NEW_COMM) ? 739 (dev->scsi_host_ptr->max_sectors << 9) : 740 - 65536) { 741 rcode = -EINVAL; 742 goto cleanup; 743 } ··· 772 psg->count = cpu_to_le32(sg_indx+1); 773 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 774 } 775 - if (status == -EINTR) { 776 - rcode = -EINTR; 777 goto cleanup; 778 } 779 ··· 810 for(i=0; i <= sg_indx; i++){ 811 kfree(sg_list[i]); 812 } 813 - if (rcode != -EINTR) { 814 aac_fib_complete(srbfib); 815 aac_fib_free(srbfib); 816 } ··· 848 */ 849 850 status = aac_dev_ioctl(dev, cmd, arg); 851 - if(status != -ENOTTY) 852 return status; 853 854 switch (cmd) {
··· 153 fibptr->hw_fib_pa = hw_fib_pa; 154 fibptr->hw_fib_va = hw_fib; 155 } 156 + if (retval != -ERESTARTSYS) 157 aac_fib_free(fibptr); 158 return retval; 159 } ··· 322 } 323 if (f.wait) { 324 if(down_interruptible(&fibctx->wait_sem) < 0) { 325 + status = -ERESTARTSYS; 326 } else { 327 /* Lock again and retry */ 328 spin_lock_irqsave(&dev->fib_lock, flags); ··· 593 u64 addr; 594 void* p; 595 if (upsg->sg[i].count > 596 + ((dev->adapter_info.options & 597 AAC_OPT_NEW_COMM) ? 598 (dev->scsi_host_ptr->max_sectors << 9) : 599 + 65536)) { 600 rcode = -EINVAL; 601 goto cleanup; 602 } ··· 645 u64 addr; 646 void* p; 647 if (usg->sg[i].count > 648 + ((dev->adapter_info.options & 649 AAC_OPT_NEW_COMM) ? 650 (dev->scsi_host_ptr->max_sectors << 9) : 651 + 65536)) { 652 rcode = -EINVAL; 653 goto cleanup; 654 } ··· 695 uintptr_t addr; 696 void* p; 697 if (usg->sg[i].count > 698 + ((dev->adapter_info.options & 699 AAC_OPT_NEW_COMM) ? 700 (dev->scsi_host_ptr->max_sectors << 9) : 701 + 65536)) { 702 rcode = -EINVAL; 703 goto cleanup; 704 } ··· 734 dma_addr_t addr; 735 void* p; 736 if (upsg->sg[i].count > 737 + ((dev->adapter_info.options & 738 AAC_OPT_NEW_COMM) ? 739 (dev->scsi_host_ptr->max_sectors << 9) : 740 + 65536)) { 741 rcode = -EINVAL; 742 goto cleanup; 743 } ··· 772 psg->count = cpu_to_le32(sg_indx+1); 773 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 774 } 775 + if (status == -ERESTARTSYS) { 776 + rcode = -ERESTARTSYS; 777 goto cleanup; 778 } 779 ··· 810 for(i=0; i <= sg_indx; i++){ 811 kfree(sg_list[i]); 812 } 813 + if (rcode != -ERESTARTSYS) { 814 aac_fib_complete(srbfib); 815 aac_fib_free(srbfib); 816 } ··· 848 */ 849 850 status = aac_dev_ioctl(dev, cmd, arg); 851 + if (status != -ENOTTY) 852 return status; 853 854 switch (cmd) {
+5 -1
drivers/scsi/aacraid/comminit.c
··· 194 195 if (status >= 0) 196 aac_fib_complete(fibctx); 197 - aac_fib_free(fibctx); 198 return status; 199 } 200 ··· 306 /* 307 * Check the preferred comm settings, defaults from template. 308 */ 309 dev->max_fib_size = sizeof(struct hw_fib); 310 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size 311 - sizeof(struct aac_fibhdr)
··· 194 195 if (status >= 0) 196 aac_fib_complete(fibctx); 197 + /* FIB should be freed only after getting the response from the F/W */ 198 + if (status != -ERESTARTSYS) 199 + aac_fib_free(fibctx); 200 return status; 201 } 202 ··· 304 /* 305 * Check the preferred comm settings, defaults from template. 306 */ 307 + dev->management_fib_count = 0; 308 + spin_lock_init(&dev->manage_lock); 309 dev->max_fib_size = sizeof(struct hw_fib); 310 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size 311 - sizeof(struct aac_fibhdr)
+61 -11
drivers/scsi/aacraid/commsup.c
··· 189 190 void aac_fib_free(struct fib *fibptr) 191 { 192 - unsigned long flags; 193 194 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 195 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) ··· 397 struct hw_fib * hw_fib = fibptr->hw_fib_va; 398 unsigned long flags = 0; 399 unsigned long qflags; 400 401 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 402 return -EBUSY; ··· 480 if (!dev->queues) 481 return -EBUSY; 482 483 - if(wait) 484 spin_lock_irqsave(&fibptr->event_lock, flags); 485 - aac_adapter_deliver(fibptr); 486 487 /* 488 * If the caller wanted us to wait for response wait now. ··· 547 udelay(5); 548 } 549 } else if (down_interruptible(&fibptr->event_wait)) { 550 - fibptr->done = 2; 551 - up(&fibptr->event_wait); 552 } 553 spin_lock_irqsave(&fibptr->event_lock, flags); 554 - if ((fibptr->done == 0) || (fibptr->done == 2)) { 555 fibptr->done = 2; /* Tell interrupt we aborted */ 556 spin_unlock_irqrestore(&fibptr->event_lock, flags); 557 - return -EINTR; 558 } 559 spin_unlock_irqrestore(&fibptr->event_lock, flags); 560 BUG_ON(fibptr->done == 0); ··· 721 722 int aac_fib_complete(struct fib *fibptr) 723 { 724 struct hw_fib * hw_fib = fibptr->hw_fib_va; 725 726 /* ··· 742 * command is complete that we had sent to the adapter and this 743 * cdb could be reused. 744 */ 745 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && 746 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) 747 { ··· 1395 1396 if (status >= 0) 1397 aac_fib_complete(fibctx); 1398 - aac_fib_free(fibctx); 1399 } 1400 } 1401 ··· 1802 struct fib *fibptr; 1803 1804 if ((fibptr = aac_fib_alloc(dev))) { 1805 __le32 *info; 1806 1807 aac_fib_init(fibptr); ··· 1813 1814 *info = cpu_to_le32(now.tv_sec); 1815 1816 - (void)aac_fib_send(SendHostTime, 1817 fibptr, 1818 sizeof(*info), 1819 FsaNormal, 1820 1, 1, 1821 NULL, 1822 NULL); 1823 - aac_fib_complete(fibptr); 1824 - aac_fib_free(fibptr); 1825 } 1826 difference = (long)(unsigned)update_interval*HZ; 1827 } else {
··· 189 190 void aac_fib_free(struct fib *fibptr) 191 { 192 + unsigned long flags, flagsv; 193 + 194 + spin_lock_irqsave(&fibptr->event_lock, flagsv); 195 + if (fibptr->done == 2) { 196 + spin_unlock_irqrestore(&fibptr->event_lock, flagsv); 197 + return; 198 + } 199 + spin_unlock_irqrestore(&fibptr->event_lock, flagsv); 200 201 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 202 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) ··· 390 struct hw_fib * hw_fib = fibptr->hw_fib_va; 391 unsigned long flags = 0; 392 unsigned long qflags; 393 + unsigned long mflags = 0; 394 + 395 396 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 397 return -EBUSY; ··· 471 if (!dev->queues) 472 return -EBUSY; 473 474 + if (wait) { 475 + 476 + spin_lock_irqsave(&dev->manage_lock, mflags); 477 + if (dev->management_fib_count >= AAC_NUM_MGT_FIB) { 478 + printk(KERN_INFO "No management Fibs Available:%d\n", 479 + dev->management_fib_count); 480 + spin_unlock_irqrestore(&dev->manage_lock, mflags); 481 + return -EBUSY; 482 + } 483 + dev->management_fib_count++; 484 + spin_unlock_irqrestore(&dev->manage_lock, mflags); 485 spin_lock_irqsave(&fibptr->event_lock, flags); 486 + } 487 + 488 + if (aac_adapter_deliver(fibptr) != 0) { 489 + printk(KERN_ERR "aac_fib_send: returned -EBUSY\n"); 490 + if (wait) { 491 + spin_unlock_irqrestore(&fibptr->event_lock, flags); 492 + spin_lock_irqsave(&dev->manage_lock, mflags); 493 + dev->management_fib_count--; 494 + spin_unlock_irqrestore(&dev->manage_lock, mflags); 495 + } 496 + return -EBUSY; 497 + } 498 + 499 500 /* 501 * If the caller wanted us to wait for response wait now. ··· 516 udelay(5); 517 } 518 } else if (down_interruptible(&fibptr->event_wait)) { 519 + /* Do nothing ... satisfy 520 + * down_interruptible must_check */ 521 } 522 + 523 spin_lock_irqsave(&fibptr->event_lock, flags); 524 + if (fibptr->done == 0) { 525 fibptr->done = 2; /* Tell interrupt we aborted */ 526 spin_unlock_irqrestore(&fibptr->event_lock, flags); 527 + return -ERESTARTSYS; 528 } 529 spin_unlock_irqrestore(&fibptr->event_lock, flags); 530 BUG_ON(fibptr->done == 0); ··· 689 690 int aac_fib_complete(struct fib *fibptr) 691 { 692 + unsigned long flags; 693 struct hw_fib * hw_fib = fibptr->hw_fib_va; 694 695 /* ··· 709 * command is complete that we had sent to the adapter and this 710 * cdb could be reused. 711 */ 712 + spin_lock_irqsave(&fibptr->event_lock, flags); 713 + if (fibptr->done == 2) { 714 + spin_unlock_irqrestore(&fibptr->event_lock, flags); 715 + return 0; 716 + } 717 + spin_unlock_irqrestore(&fibptr->event_lock, flags); 718 + 719 if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && 720 (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) 721 { ··· 1355 1356 if (status >= 0) 1357 aac_fib_complete(fibctx); 1358 + /* FIB should be freed only after getting 1359 + * the response from the F/W */ 1360 + if (status != -ERESTARTSYS) 1361 + aac_fib_free(fibctx); 1362 } 1363 } 1364 ··· 1759 struct fib *fibptr; 1760 1761 if ((fibptr = aac_fib_alloc(dev))) { 1762 + int status; 1763 __le32 *info; 1764 1765 aac_fib_init(fibptr); ··· 1769 1770 *info = cpu_to_le32(now.tv_sec); 1771 1772 + status = aac_fib_send(SendHostTime, 1773 fibptr, 1774 sizeof(*info), 1775 FsaNormal, 1776 1, 1, 1777 NULL, 1778 NULL); 1779 + /* Do not set XferState to zero unless 1780 + * receives a response from F/W */ 1781 + if (status >= 0) 1782 + aac_fib_complete(fibptr); 1783 + /* FIB should be freed only after 1784 + * getting the response from the F/W */ 1785 + if (status != -ERESTARTSYS) 1786 + aac_fib_free(fibptr); 1787 } 1788 difference = (long)(unsigned)update_interval*HZ; 1789 } else {
+30 -6
drivers/scsi/aacraid/dpcsup.c
··· 57 struct hw_fib * hwfib; 58 struct fib * fib; 59 int consumed = 0; 60 - unsigned long flags; 61 62 - spin_lock_irqsave(q->lock, flags); 63 /* 64 * Keep pulling response QEs off the response queue and waking 65 * up the waiters until there are no more QEs. We then return ··· 125 } else { 126 unsigned long flagv; 127 spin_lock_irqsave(&fib->event_lock, flagv); 128 - if (!fib->done) 129 fib->done = 1; 130 - up(&fib->event_wait); 131 spin_unlock_irqrestore(&fib->event_lock, flagv); 132 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 133 if (fib->done == 2) { 134 aac_fib_complete(fib); 135 aac_fib_free(fib); 136 } ··· 241 242 unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) 243 { 244 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); 245 if ((index & 0x00000002L)) { 246 struct hw_fib * hw_fib; ··· 330 unsigned long flagv; 331 dprintk((KERN_INFO "event_wait up\n")); 332 spin_lock_irqsave(&fib->event_lock, flagv); 333 - if (!fib->done) 334 fib->done = 1; 335 - up(&fib->event_wait); 336 spin_unlock_irqrestore(&fib->event_lock, flagv); 337 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 338 } 339 return 0; 340 }
··· 57 struct hw_fib * hwfib; 58 struct fib * fib; 59 int consumed = 0; 60 + unsigned long flags, mflags; 61 62 + spin_lock_irqsave(q->lock, flags); 63 /* 64 * Keep pulling response QEs off the response queue and waking 65 * up the waiters until there are no more QEs. We then return ··· 125 } else { 126 unsigned long flagv; 127 spin_lock_irqsave(&fib->event_lock, flagv); 128 + if (!fib->done) { 129 fib->done = 1; 130 + up(&fib->event_wait); 131 + } 132 spin_unlock_irqrestore(&fib->event_lock, flagv); 133 + 134 + spin_lock_irqsave(&dev->manage_lock, mflags); 135 + dev->management_fib_count--; 136 + spin_unlock_irqrestore(&dev->manage_lock, mflags); 137 + 138 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 139 if (fib->done == 2) { 140 + spin_lock_irqsave(&fib->event_lock, flagv); 141 + fib->done = 0; 142 + spin_unlock_irqrestore(&fib->event_lock, flagv); 143 aac_fib_complete(fib); 144 aac_fib_free(fib); 145 } ··· 232 233 unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) 234 { 235 + unsigned long mflags; 236 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); 237 if ((index & 0x00000002L)) { 238 struct hw_fib * hw_fib; ··· 320 unsigned long flagv; 321 dprintk((KERN_INFO "event_wait up\n")); 322 spin_lock_irqsave(&fib->event_lock, flagv); 323 + if (!fib->done) { 324 fib->done = 1; 325 + up(&fib->event_wait); 326 + } 327 spin_unlock_irqrestore(&fib->event_lock, flagv); 328 + 329 + spin_lock_irqsave(&dev->manage_lock, mflags); 330 + dev->management_fib_count--; 331 + spin_unlock_irqrestore(&dev->manage_lock, mflags); 332 + 333 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 334 + if (fib->done == 2) { 335 + spin_lock_irqsave(&fib->event_lock, flagv); 336 + fib->done = 0; 337 + spin_unlock_irqrestore(&fib->event_lock, flagv); 338 + aac_fib_complete(fib); 339 + aac_fib_free(fib); 340 + } 341 + 342 } 343 return 0; 344 }
+31 -22
drivers/scsi/aic7xxx/aic79xx_core.c
··· 3171 tinfo->curr.transport_version = 2; 3172 tinfo->goal.transport_version = 2; 3173 tinfo->goal.ppr_options = 0; 3174 - /* 3175 - * Remove any SCBs in the waiting for selection 3176 - * queue that may also be for this target so 3177 - * that command ordering is preserved. 3178 - */ 3179 - ahd_freeze_devq(ahd, scb); 3180 - ahd_qinfifo_requeue_tail(ahd, scb); 3181 printerror = 0; 3182 } 3183 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) ··· 3197 MSG_EXT_WDTR_BUS_8_BIT, 3198 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3199 /*paused*/TRUE); 3200 - /* 3201 - * Remove any SCBs in the waiting for selection 3202 - * queue that may also be for this target so that 3203 - * command ordering is preserved. 3204 - */ 3205 - ahd_freeze_devq(ahd, scb); 3206 - ahd_qinfifo_requeue_tail(ahd, scb); 3207 printerror = 0; 3208 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) 3209 && ppr_busfree == 0) { ··· 3223 /*ppr_options*/0, 3224 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3225 /*paused*/TRUE); 3226 - /* 3227 - * Remove any SCBs in the waiting for selection 3228 - * queue that may also be for this target so that 3229 - * command ordering is preserved. 3230 - */ 3231 - ahd_freeze_devq(ahd, scb); 3232 - ahd_qinfifo_requeue_tail(ahd, scb); 3233 printerror = 0; 3234 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 3235 && ahd_sent_msg(ahd, AHDMSG_1B, ··· 3260 * the message phases. We check it last in case we 3261 * had to send some other message that caused a busfree. 3262 */ 3263 - if (printerror != 0 3264 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) 3265 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { 3266
··· 3171 tinfo->curr.transport_version = 2; 3172 tinfo->goal.transport_version = 2; 3173 tinfo->goal.ppr_options = 0; 3174 + if (scb != NULL) { 3175 + /* 3176 + * Remove any SCBs in the waiting 3177 + * for selection queue that may 3178 + * also be for this target so that 3179 + * command ordering is preserved. 3180 + */ 3181 + ahd_freeze_devq(ahd, scb); 3182 + ahd_qinfifo_requeue_tail(ahd, scb); 3183 + } 3184 printerror = 0; 3185 } 3186 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) ··· 3194 MSG_EXT_WDTR_BUS_8_BIT, 3195 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3196 /*paused*/TRUE); 3197 + if (scb != NULL) { 3198 + /* 3199 + * Remove any SCBs in the waiting for 3200 + * selection queue that may also be for 3201 + * this target so that command ordering 3202 + * is preserved. 3203 + */ 3204 + ahd_freeze_devq(ahd, scb); 3205 + ahd_qinfifo_requeue_tail(ahd, scb); 3206 + } 3207 printerror = 0; 3208 } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) 3209 && ppr_busfree == 0) { ··· 3217 /*ppr_options*/0, 3218 AHD_TRANS_CUR|AHD_TRANS_GOAL, 3219 /*paused*/TRUE); 3220 + if (scb != NULL) { 3221 + /* 3222 + * Remove any SCBs in the waiting for 3223 + * selection queue that may also be for 3224 + * this target so that command ordering 3225 + * is preserved. 3226 + */ 3227 + ahd_freeze_devq(ahd, scb); 3228 + ahd_qinfifo_requeue_tail(ahd, scb); 3229 + } 3230 printerror = 0; 3231 } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 3232 && ahd_sent_msg(ahd, AHDMSG_1B, ··· 3251 * the message phases. We check it last in case we 3252 * had to send some other message that caused a busfree. 3253 */ 3254 + if (scb != NULL && printerror != 0 3255 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) 3256 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { 3257
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw4.h
+1 -2
drivers/scsi/qla2xxx/qla_def.h
··· 1586 */ 1587 #define FCF_FABRIC_DEVICE BIT_0 1588 #define FCF_LOGIN_NEEDED BIT_1 1589 - #define FCF_TAPE_PRESENT BIT_2 1590 - #define FCF_FCP2_DEVICE BIT_3 1591 1592 /* No loop ID flag. */ 1593 #define FC_NO_LOOP_ID 0x1000
··· 1586 */ 1587 #define FCF_FABRIC_DEVICE BIT_0 1588 #define FCF_LOGIN_NEEDED BIT_1 1589 + #define FCF_FCP2_DEVICE BIT_2 1590 1591 /* No loop ID flag. */ 1592 #define FC_NO_LOOP_ID 0x1000
+6 -6
drivers/scsi/qla2xxx/qla_init.c
··· 205 206 switch (data[0]) { 207 case MBS_COMMAND_COMPLETE: 208 - if (fcport->flags & FCF_TAPE_PRESENT) 209 opts |= BIT_1; 210 rval = qla2x00_get_port_database(vha, fcport, opts); 211 if (rval != QLA_SUCCESS) ··· 2726 2727 /* 2728 * Logout all previous fabric devices marked lost, except 2729 - * tape devices. 2730 */ 2731 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2732 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) ··· 2739 qla2x00_mark_device_lost(vha, fcport, 2740 ql2xplogiabsentdevice, 0); 2741 if (fcport->loop_id != FC_NO_LOOP_ID && 2742 - (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2743 fcport->port_type != FCT_INITIATOR && 2744 fcport->port_type != FCT_BROADCAST) { 2745 ha->isp_ops->fabric_logout(vha, ··· 3018 fcport->d_id.b24 = new_fcport->d_id.b24; 3019 fcport->flags |= FCF_LOGIN_NEEDED; 3020 if (fcport->loop_id != FC_NO_LOOP_ID && 3021 - (fcport->flags & FCF_TAPE_PRESENT) == 0 && 3022 fcport->port_type != FCT_INITIATOR && 3023 fcport->port_type != FCT_BROADCAST) { 3024 ha->isp_ops->fabric_logout(vha, fcport->loop_id, ··· 3272 3273 rval = qla2x00_fabric_login(vha, fcport, next_loopid); 3274 if (rval == QLA_SUCCESS) { 3275 - /* Send an ADISC to tape devices.*/ 3276 opts = 0; 3277 - if (fcport->flags & FCF_TAPE_PRESENT) 3278 opts |= BIT_1; 3279 rval = qla2x00_get_port_database(vha, fcport, opts); 3280 if (rval != QLA_SUCCESS) {
··· 205 206 switch (data[0]) { 207 case MBS_COMMAND_COMPLETE: 208 + if (fcport->flags & FCF_FCP2_DEVICE) 209 opts |= BIT_1; 210 rval = qla2x00_get_port_database(vha, fcport, opts); 211 if (rval != QLA_SUCCESS) ··· 2726 2727 /* 2728 * Logout all previous fabric devices marked lost, except 2729 + * FCP2 devices. 2730 */ 2731 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2732 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) ··· 2739 qla2x00_mark_device_lost(vha, fcport, 2740 ql2xplogiabsentdevice, 0); 2741 if (fcport->loop_id != FC_NO_LOOP_ID && 2742 + (fcport->flags & FCF_FCP2_DEVICE) == 0 && 2743 fcport->port_type != FCT_INITIATOR && 2744 fcport->port_type != FCT_BROADCAST) { 2745 ha->isp_ops->fabric_logout(vha, ··· 3018 fcport->d_id.b24 = new_fcport->d_id.b24; 3019 fcport->flags |= FCF_LOGIN_NEEDED; 3020 if (fcport->loop_id != FC_NO_LOOP_ID && 3021 + (fcport->flags & FCF_FCP2_DEVICE) == 0 && 3022 fcport->port_type != FCT_INITIATOR && 3023 fcport->port_type != FCT_BROADCAST) { 3024 ha->isp_ops->fabric_logout(vha, fcport->loop_id, ··· 3272 3273 rval = qla2x00_fabric_login(vha, fcport, next_loopid); 3274 if (rval == QLA_SUCCESS) { 3275 + /* Send an ADISC to FCP2 devices.*/ 3276 opts = 0; 3277 + if (fcport->flags & FCF_FCP2_DEVICE) 3278 opts |= BIT_1; 3279 rval = qla2x00_get_port_database(vha, fcport, opts); 3280 if (rval != QLA_SUCCESS) {
+6 -6
drivers/scsi/qla2xxx/qla_os.c
··· 1188 scsi_qla_host_t *vha = shost_priv(sdev->host); 1189 struct qla_hw_data *ha = vha->hw; 1190 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1191 - fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1192 struct req_que *req = vha->req; 1193 1194 if (sdev->tagged_supported) ··· 1196 scsi_deactivate_tcq(sdev, req->max_q_depth); 1197 1198 rport->dev_loss_tmo = ha->port_down_retry_count; 1199 - if (sdev->type == TYPE_TAPE) 1200 - fcport->flags |= FCF_TAPE_PRESENT; 1201 1202 return 0; 1203 } ··· 2802 2803 fcport->login_retry--; 2804 if (fcport->flags & FCF_FABRIC_DEVICE) { 2805 - if (fcport->flags & FCF_TAPE_PRESENT) 2806 ha->isp_ops->fabric_logout(vha, 2807 fcport->loop_id, 2808 fcport->d_id.b.domain, ··· 3138 if (!IS_QLA2100(ha) && vha->link_down_timeout) 3139 atomic_set(&vha->loop_state, LOOP_DEAD); 3140 3141 - /* Schedule an ISP abort to return any tape commands. */ 3142 /* NPIV - scan physical port only */ 3143 if (!vha->vp_idx) { 3144 spin_lock_irqsave(&ha->hardware_lock, ··· 3158 if (sp->ctx) 3159 continue; 3160 sfcp = sp->fcport; 3161 - if (!(sfcp->flags & FCF_TAPE_PRESENT)) 3162 continue; 3163 3164 set_bit(ISP_ABORT_NEEDED,
··· 1188 scsi_qla_host_t *vha = shost_priv(sdev->host); 1189 struct qla_hw_data *ha = vha->hw; 1190 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1191 struct req_que *req = vha->req; 1192 1193 if (sdev->tagged_supported) ··· 1197 scsi_deactivate_tcq(sdev, req->max_q_depth); 1198 1199 rport->dev_loss_tmo = ha->port_down_retry_count; 1200 1201 return 0; 1202 } ··· 2805 2806 fcport->login_retry--; 2807 if (fcport->flags & FCF_FABRIC_DEVICE) { 2808 + if (fcport->flags & FCF_FCP2_DEVICE) 2809 ha->isp_ops->fabric_logout(vha, 2810 fcport->loop_id, 2811 fcport->d_id.b.domain, ··· 3141 if (!IS_QLA2100(ha) && vha->link_down_timeout) 3142 atomic_set(&vha->loop_state, LOOP_DEAD); 3143 3144 + /* 3145 + * Schedule an ISP abort to return any FCP2-device 3146 + * commands. 3147 + */ 3148 /* NPIV - scan physical port only */ 3149 if (!vha->vp_idx) { 3150 spin_lock_irqsave(&ha->hardware_lock, ··· 3158 if (sp->ctx) 3159 continue; 3160 sfcp = sp->fcport; 3161 + if (!(sfcp->flags & FCF_FCP2_DEVICE)) 3162 continue; 3163 3164 set_bit(ISP_ABORT_NEEDED,
+3
drivers/scsi/qla2xxx/qla_sup.c
··· 2292 uint32_t faddr, left, burst; 2293 struct qla_hw_data *ha = vha->hw; 2294 2295 if (offset & 0xfff) 2296 goto slow_read; 2297 if (length < OPTROM_BURST_SIZE) 2298 goto slow_read; 2299 2300 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2301 &optrom_dma, GFP_KERNEL); 2302 if (!optrom) {
··· 2292 uint32_t faddr, left, burst; 2293 struct qla_hw_data *ha = vha->hw; 2294 2295 + if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) 2296 + goto try_fast; 2297 if (offset & 0xfff) 2298 goto slow_read; 2299 if (length < OPTROM_BURST_SIZE) 2300 goto slow_read; 2301 2302 + try_fast: 2303 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2304 &optrom_dma, GFP_KERNEL); 2305 if (!optrom) {
+1 -1
drivers/scsi/qla2xxx/qla_version.h
··· 7 /* 8 * Driver version 9 */ 10 - #define QLA2XXX_VERSION "8.03.01-k9" 11 12 #define QLA_DRIVER_MAJOR_VER 8 13 #define QLA_DRIVER_MINOR_VER 3
··· 7 /* 8 * Driver version 9 */ 10 + #define QLA2XXX_VERSION "8.03.01-k10" 11 12 #define QLA_DRIVER_MAJOR_VER 8 13 #define QLA_DRIVER_MINOR_VER 3
+1 -1
drivers/scsi/scsi_lib.c
··· 749 */ 750 req->next_rq->resid_len = scsi_in(cmd)->resid; 751 752 blk_end_request_all(req, 0); 753 754 - scsi_release_buffers(cmd); 755 scsi_next_command(cmd); 756 return; 757 }
··· 749 */ 750 req->next_rq->resid_len = scsi_in(cmd)->resid; 751 752 + scsi_release_buffers(cmd); 753 blk_end_request_all(req, 0); 754 755 scsi_next_command(cmd); 756 return; 757 }
+4 -1
drivers/scsi/scsi_transport_fc.c
··· 3527 if (!done && i->f->bsg_timeout) { 3528 /* call LLDD to abort the i/o as it has timed out */ 3529 err = i->f->bsg_timeout(job); 3530 - if (err) 3531 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " 3532 "abort failed with status %d\n", err); 3533 }
··· 3527 if (!done && i->f->bsg_timeout) { 3528 /* call LLDD to abort the i/o as it has timed out */ 3529 err = i->f->bsg_timeout(job); 3530 + if (err == -EAGAIN) { 3531 + job->ref_cnt--; 3532 + return BLK_EH_RESET_TIMER; 3533 + } else if (err) 3534 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " 3535 "abort failed with status %d\n", err); 3536 }
+1 -1
include/scsi/scsi_bsg_fc.h
··· 292 struct fc_bsg_rport_els r_els; 293 struct fc_bsg_rport_ct r_ct; 294 } rqst_data; 295 - }; 296 297 298 /* response (request sense data) structure of the sg_io_v4 */
··· 292 struct fc_bsg_rport_els r_els; 293 struct fc_bsg_rport_ct r_ct; 294 } rqst_data; 295 + } __attribute__((packed)); 296 297 298 /* response (request sense data) structure of the sg_io_v4 */