Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6:
[SCSI] dpt_i2o: fix transferred data length for scsi_set_resid()
[SCSI] scsi_error regression: Fix idempotent command handling
[SCSI] zfcp: Fix hexdump data in s390dbf traces
[SCSI] zfcp: fix erp timeout cleanup for port open requests
[SCSI] zfcp: Wait for port scan to complete when setting adapter online
[SCSI] zfcp: Fix cast warning
[SCSI] zfcp: Fix request list handling in error path
[SCSI] zfcp: fix mempool usage for status_read requests
[SCSI] zfcp: fix req_list_locking.
[SCSI] zfcp: Dont clear reference from SCSI device to unit
[SCSI] qla2xxx: Update version number to 8.02.01-k9.
[SCSI] qla2xxx: Return a FAILED status when abort mailbox-command fails.
[SCSI] qla2xxx: Do not honour max_vports from firmware for 2G ISPs and below.
[SCSI] qla2xxx: Use pci_disable_rom() to manipulate PCI config space.
[SCSI] qla2xxx: Correct Atmel flash-part handling.
[SCSI] megaraid: fix mega_internal_command oops

+67 -99
+2 -1
drivers/s390/scsi/zfcp_aux.c
··· 610 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); 611 atomic_set(&port->refcount, 0); 612 613 - dev_set_name(&port->sysfs_device, "0x%016llx", wwpn); 614 port->sysfs_device.parent = &adapter->ccw_device->dev; 615 616 port->sysfs_device.release = zfcp_sysfs_port_release;
··· 610 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); 611 atomic_set(&port->refcount, 0); 612 613 + dev_set_name(&port->sysfs_device, "0x%016llx", 614 + (unsigned long long)wwpn); 615 port->sysfs_device.parent = &adapter->ccw_device->dev; 616 617 port->sysfs_device.release = zfcp_sysfs_port_release;
+3 -1
drivers/s390/scsi/zfcp_ccw.c
··· 116 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 85, 117 NULL); 118 zfcp_erp_wait(adapter); 119 - goto out; 120 121 out_scsi_register: 122 zfcp_erp_thread_kill(adapter);
··· 116 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 85, 117 NULL); 118 zfcp_erp_wait(adapter); 119 + up(&zfcp_data.config_sema); 120 + flush_work(&adapter->scan_work); 121 + return 0; 122 123 out_scsi_register: 124 zfcp_erp_thread_kill(adapter);
+16 -26
drivers/s390/scsi/zfcp_dbf.c
··· 30 dump->offset = offset; 31 dump->size = min(from_len - offset, room); 32 memcpy(dump->data, from + offset, dump->size); 33 - debug_event(dbf, level, dump, dump->size); 34 } 35 } 36 ··· 108 t.tv_sec, t.tv_nsec); 109 zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid); 110 } else { 111 - zfcp_dbf_outd(&p, NULL, dump->data, dump->size, dump->offset, 112 dump->total_size); 113 if ((dump->offset + dump->size) == dump->total_size) 114 p += sprintf(p, "\n"); ··· 366 break; 367 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 368 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); 369 break; 370 371 case FSF_QTCB_OPEN_PORT_WITH_DID: ··· 466 else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0) 467 zfcp_hba_dbf_view_berr(&p, &r->u.berr); 468 469 - p += sprintf(p, "\n"); 470 return p - out_buf; 471 } 472 ··· 882 struct ct_hdr *hdr = sg_virt(ct->req); 883 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 884 struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; 885 unsigned long flags; 886 887 spin_lock_irqsave(&adapter->san_dbf_lock, flags); ··· 899 oct->options = hdr->options; 900 oct->max_res_size = hdr->max_res_size; 901 oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr), 902 - ZFCP_DBF_CT_PAYLOAD); 903 - memcpy(oct->payload, (void *)hdr + sizeof(struct ct_hdr), oct->len); 904 - debug_event(adapter->san_dbf, 3, r, sizeof(*r)); 905 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 906 } 907 ··· 918 struct ct_hdr *hdr = sg_virt(ct->resp); 919 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 920 struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; 921 unsigned long flags; 922 923 spin_lock_irqsave(&adapter->san_dbf_lock, flags); ··· 934 rct->expl = hdr->reason_code_expl; 935 rct->vendor_unique = hdr->vendor_unique; 936 rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), 937 - ZFCP_DBF_CT_PAYLOAD); 938 - memcpy(rct->payload, (void *)hdr + sizeof(struct ct_hdr), rct->len); 939 - debug_event(adapter->san_dbf, 3, r, sizeof(*r)); 940 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 941 } 942 ··· 960 rec->u.els.ls_code = ls_code; 961 debug_event(adapter->san_dbf, level, rec, sizeof(*rec)); 962 zfcp_dbf_hexdump(adapter->san_dbf, rec, sizeof(*rec), level, 963 - buffer, min(buflen, ZFCP_DBF_ELS_MAX_PAYLOAD)); 964 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 965 } 966 ··· 1014 char *out_buf, const char *in_buf) 1015 { 1016 struct zfcp_san_dbf_record *r = (struct zfcp_san_dbf_record *)in_buf; 1017 - char *buffer = NULL; 1018 - int buflen = 0, total = 0; 1019 char *p = out_buf; 1020 1021 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) ··· 1033 zfcp_dbf_out(&p, "gs_subtype", "0x%02x", ct->gs_subtype); 1034 zfcp_dbf_out(&p, "options", "0x%02x", ct->options); 1035 zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); 1036 - total = ct->len; 1037 - buffer = ct->payload; 1038 - buflen = min(total, ZFCP_DBF_CT_PAYLOAD); 1039 } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { 1040 struct zfcp_san_dbf_record_ct_response *ct = &r->u.ct_resp; 1041 zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code); ··· 1040 zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code); 1041 zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl); 1042 zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique); 1043 - total = ct->len; 1044 - buffer = ct->payload; 1045 - buflen = min(total, ZFCP_DBF_CT_PAYLOAD); 1046 } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || 1047 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || 1048 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { 1049 struct zfcp_san_dbf_record_els *els = &r->u.els; 1050 zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); 1051 - total = els->len; 1052 - buffer = els->payload; 1053 - buflen = min(total, ZFCP_DBF_ELS_PAYLOAD); 1054 } 1055 - 1056 - zfcp_dbf_outd(&p, "payload", buffer, buflen, 0, total); 1057 - if (buflen == total) 1058 - p += sprintf(p, "\n"); 1059 - 1060 return p - out_buf; 1061 } 1062
··· 30 dump->offset = offset; 31 dump->size = min(from_len - offset, room); 32 memcpy(dump->data, from + offset, dump->size); 33 + debug_event(dbf, level, dump, dump->size + sizeof(*dump)); 34 } 35 } 36 ··· 108 t.tv_sec, t.tv_nsec); 109 zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid); 110 } else { 111 + zfcp_dbf_outd(&p, "", dump->data, dump->size, dump->offset, 112 dump->total_size); 113 if ((dump->offset + dump->size) == dump->total_size) 114 p += sprintf(p, "\n"); ··· 366 break; 367 zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); 368 zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); 369 + p += sprintf(*p, "\n"); 370 break; 371 372 case FSF_QTCB_OPEN_PORT_WITH_DID: ··· 465 else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0) 466 zfcp_hba_dbf_view_berr(&p, &r->u.berr); 467 468 + if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0) 469 + p += sprintf(p, "\n"); 470 return p - out_buf; 471 } 472 ··· 880 struct ct_hdr *hdr = sg_virt(ct->req); 881 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 882 struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; 883 + int level = 3; 884 unsigned long flags; 885 886 spin_lock_irqsave(&adapter->san_dbf_lock, flags); ··· 896 oct->options = hdr->options; 897 oct->max_res_size = hdr->max_res_size; 898 oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr), 899 + ZFCP_DBF_SAN_MAX_PAYLOAD); 900 + debug_event(adapter->san_dbf, level, r, sizeof(*r)); 901 + zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level, 902 + (void *)hdr + sizeof(struct ct_hdr), oct->len); 903 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 904 } 905 ··· 914 struct ct_hdr *hdr = sg_virt(ct->resp); 915 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 916 struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; 917 + int level = 3; 918 unsigned long flags; 919 920 spin_lock_irqsave(&adapter->san_dbf_lock, flags); ··· 929 rct->expl = hdr->reason_code_expl; 930 rct->vendor_unique = hdr->vendor_unique; 931 rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), 932 + ZFCP_DBF_SAN_MAX_PAYLOAD); 933 + debug_event(adapter->san_dbf, level, r, sizeof(*r)); 934 + zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level, 935 + (void *)hdr + sizeof(struct ct_hdr), rct->len); 936 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 937 } 938 ··· 954 rec->u.els.ls_code = ls_code; 955 debug_event(adapter->san_dbf, level, rec, sizeof(*rec)); 956 zfcp_dbf_hexdump(adapter->san_dbf, rec, sizeof(*rec), level, 957 + buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); 958 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 959 } 960 ··· 1008 char *out_buf, const char *in_buf) 1009 { 1010 struct zfcp_san_dbf_record *r = (struct zfcp_san_dbf_record *)in_buf; 1011 char *p = out_buf; 1012 1013 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) ··· 1029 zfcp_dbf_out(&p, "gs_subtype", "0x%02x", ct->gs_subtype); 1030 zfcp_dbf_out(&p, "options", "0x%02x", ct->options); 1031 zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); 1032 } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { 1033 struct zfcp_san_dbf_record_ct_response *ct = &r->u.ct_resp; 1034 zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code); ··· 1039 zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code); 1040 zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl); 1041 zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique); 1042 } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || 1043 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || 1044 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { 1045 struct zfcp_san_dbf_record_els *els = &r->u.els; 1046 zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); 1047 } 1048 return p - out_buf; 1049 } 1050
+2 -6
drivers/s390/scsi/zfcp_dbf.h
··· 163 u8 options; 164 u16 max_res_size; 165 u32 len; 166 - #define ZFCP_DBF_CT_PAYLOAD 24 167 - u8 payload[ZFCP_DBF_CT_PAYLOAD]; 168 } __attribute__ ((packed)); 169 170 struct zfcp_san_dbf_record_ct_response { ··· 172 u8 expl; 173 u8 vendor_unique; 174 u32 len; 175 - u8 payload[ZFCP_DBF_CT_PAYLOAD]; 176 } __attribute__ ((packed)); 177 178 struct zfcp_san_dbf_record_els { 179 u8 ls_code; 180 u32 len; 181 - #define ZFCP_DBF_ELS_PAYLOAD 32 182 - #define ZFCP_DBF_ELS_MAX_PAYLOAD 1024 183 - u8 payload[ZFCP_DBF_ELS_PAYLOAD]; 184 } __attribute__ ((packed)); 185 186 struct zfcp_san_dbf_record { ··· 190 struct zfcp_san_dbf_record_ct_response ct_resp; 191 struct zfcp_san_dbf_record_els els; 192 } u; 193 } __attribute__ ((packed)); 194 195 struct zfcp_scsi_dbf_record {
··· 163 u8 options; 164 u16 max_res_size; 165 u32 len; 166 } __attribute__ ((packed)); 167 168 struct zfcp_san_dbf_record_ct_response { ··· 174 u8 expl; 175 u8 vendor_unique; 176 u32 len; 177 } __attribute__ ((packed)); 178 179 struct zfcp_san_dbf_record_els { 180 u8 ls_code; 181 u32 len; 182 } __attribute__ ((packed)); 183 184 struct zfcp_san_dbf_record { ··· 196 struct zfcp_san_dbf_record_ct_response ct_resp; 197 struct zfcp_san_dbf_record_els els; 198 } u; 199 + #define ZFCP_DBF_SAN_MAX_PAYLOAD 1024 200 + u8 payload[32]; 201 } __attribute__ ((packed)); 202 203 struct zfcp_scsi_dbf_record {
+1
drivers/s390/scsi/zfcp_erp.c
··· 472 ZFCP_STATUS_ERP_TIMEDOUT)) { 473 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 474 zfcp_rec_dbf_event_action(142, act); 475 } 476 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 477 zfcp_rec_dbf_event_action(143, act);
··· 472 ZFCP_STATUS_ERP_TIMEDOUT)) { 473 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 474 zfcp_rec_dbf_event_action(142, act); 475 + act->fsf_req->erp_action = NULL; 476 } 477 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 478 zfcp_rec_dbf_event_action(143, act);
+10 -13
drivers/s390/scsi/zfcp_fsf.c
··· 683 if (!req) 684 return NULL; 685 memset(req, 0, sizeof(*req)); 686 return req; 687 } 688 ··· 770 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) 771 { 772 struct zfcp_adapter *adapter = req->adapter; 773 - struct zfcp_qdio_queue *req_q = &adapter->req_q; 774 int idx; 775 776 /* put allocated FSF request into hash table */ 777 - spin_lock(&adapter->req_list_lock); 778 idx = zfcp_reqlist_hash(req->req_id); 779 list_add_tail(&req->list, &adapter->req_list[idx]); 780 - spin_unlock(&adapter->req_list_lock); 781 782 - req->qdio_outb_usage = atomic_read(&req_q->count); 783 req->issued = get_clock(); 784 if (zfcp_qdio_send(req)) { 785 - /* Queues are down..... */ 786 del_timer(&req->timer); 787 - spin_lock(&adapter->req_list_lock); 788 - zfcp_reqlist_remove(adapter, req); 789 - spin_unlock(&adapter->req_list_lock); 790 - /* undo changes in request queue made for this request */ 791 - atomic_add(req->sbal_number, &req_q->count); 792 - req_q->first -= req->sbal_number; 793 - req_q->first += QDIO_MAX_BUFFERS_PER_Q; 794 - req_q->first %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ 795 zfcp_erp_adapter_reopen(adapter, 0, 116, req); 796 return -EIO; 797 }
··· 683 if (!req) 684 return NULL; 685 memset(req, 0, sizeof(*req)); 686 + req->pool = pool; 687 return req; 688 } 689 ··· 769 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) 770 { 771 struct zfcp_adapter *adapter = req->adapter; 772 + unsigned long flags; 773 int idx; 774 775 /* put allocated FSF request into hash table */ 776 + spin_lock_irqsave(&adapter->req_list_lock, flags); 777 idx = zfcp_reqlist_hash(req->req_id); 778 list_add_tail(&req->list, &adapter->req_list[idx]); 779 + spin_unlock_irqrestore(&adapter->req_list_lock, flags); 780 781 + req->qdio_outb_usage = atomic_read(&adapter->req_q.count); 782 req->issued = get_clock(); 783 if (zfcp_qdio_send(req)) { 784 del_timer(&req->timer); 785 + spin_lock_irqsave(&adapter->req_list_lock, flags); 786 + /* lookup request again, list might have changed */ 787 + if (zfcp_reqlist_find_safe(adapter, req)) 788 + zfcp_reqlist_remove(adapter, req); 789 + spin_unlock_irqrestore(&adapter->req_list_lock, flags); 790 zfcp_erp_adapter_reopen(adapter, 0, 116, req); 791 return -EIO; 792 }
+4 -8
drivers/s390/scsi/zfcp_scsi.c
··· 24 static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 25 { 26 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 27 - WARN_ON(!unit); 28 - if (unit) { 29 - atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); 30 - sdpnt->hostdata = NULL; 31 - unit->device = NULL; 32 - zfcp_erp_unit_failed(unit, 12, NULL); 33 - zfcp_unit_put(unit); 34 - } 35 } 36 37 static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
··· 24 static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 25 { 26 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 27 + atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); 28 + unit->device = NULL; 29 + zfcp_erp_unit_failed(unit, 12, NULL); 30 + zfcp_unit_put(unit); 31 } 32 33 static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
+2 -2
drivers/scsi/dpt_i2o.c
··· 2445 hba_status = detailed_status >> 8; 2446 2447 // calculate resid for sg 2448 - scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5)); 2449 2450 pHba = (adpt_hba*) cmd->device->host->hostdata[0]; 2451 ··· 2456 case I2O_SCSI_DSC_SUCCESS: 2457 cmd->result = (DID_OK << 16); 2458 // handle underflow 2459 - if(readl(reply+5) < cmd->underflow ) { 2460 cmd->result = (DID_ERROR <<16); 2461 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name); 2462 }
··· 2445 hba_status = detailed_status >> 8; 2446 2447 // calculate resid for sg 2448 + scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20)); 2449 2450 pHba = (adpt_hba*) cmd->device->host->hostdata[0]; 2451 ··· 2456 case I2O_SCSI_DSC_SUCCESS: 2457 cmd->result = (DID_OK << 16); 2458 // handle underflow 2459 + if (readl(reply+20) < cmd->underflow) { 2460 cmd->result = (DID_ERROR <<16); 2461 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name); 2462 }
+8 -3
drivers/scsi/megaraid.c
··· 4402 scb_t *scb; 4403 int rval; 4404 4405 /* 4406 * The internal commands share one command id and hence are 4407 * serialized. This is so because we want to reserve maximum number of ··· 4416 scb = &adapter->int_scb; 4417 memset(scb, 0, sizeof(scb_t)); 4418 4419 - scmd = &adapter->int_scmd; 4420 - memset(scmd, 0, sizeof(Scsi_Cmnd)); 4421 - 4422 sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); 4423 scmd->device = sdev; 4424 4425 scmd->device->host = adapter->host; 4426 scmd->host_scribble = (void *)scb; 4427 scmd->cmnd[0] = MEGA_INTERNAL_CMD; ··· 4458 } 4459 4460 mutex_unlock(&adapter->int_mtx); 4461 4462 return rval; 4463 }
··· 4402 scb_t *scb; 4403 int rval; 4404 4405 + scmd = scsi_allocate_command(GFP_KERNEL); 4406 + if (!scmd) 4407 + return -ENOMEM; 4408 + 4409 /* 4410 * The internal commands share one command id and hence are 4411 * serialized. This is so because we want to reserve maximum number of ··· 4412 scb = &adapter->int_scb; 4413 memset(scb, 0, sizeof(scb_t)); 4414 4415 sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); 4416 scmd->device = sdev; 4417 4418 + memset(adapter->int_cdb, 0, sizeof(adapter->int_cdb)); 4419 + scmd->cmnd = adapter->int_cdb; 4420 scmd->device->host = adapter->host; 4421 scmd->host_scribble = (void *)scb; 4422 scmd->cmnd[0] = MEGA_INTERNAL_CMD; ··· 4455 } 4456 4457 mutex_unlock(&adapter->int_mtx); 4458 + 4459 + scsi_free_command(GFP_KERNEL, scmd); 4460 4461 return rval; 4462 }
+1 -1
drivers/scsi/megaraid.h
··· 888 889 u8 sglen; /* f/w supported scatter-gather list length */ 890 891 scb_t int_scb; 892 - Scsi_Cmnd int_scmd; 893 struct mutex int_mtx; /* To synchronize the internal 894 commands */ 895 struct completion int_waitq; /* wait queue for internal
··· 888 889 u8 sglen; /* f/w supported scatter-gather list length */ 890 891 + unsigned char int_cdb[MAX_COMMAND_SIZE]; 892 scb_t int_scb; 893 struct mutex int_mtx; /* To synchronize the internal 894 commands */ 895 struct completion int_waitq; /* wait queue for internal
-1
drivers/scsi/qla2xxx/qla_def.h
··· 2547 uint8_t fcode_revision[16]; 2548 uint32_t fw_revision[4]; 2549 2550 - uint16_t fdt_odd_index; 2551 uint32_t fdt_wrt_disable; 2552 uint32_t fdt_erase_cmd; 2553 uint32_t fdt_block_size;
··· 2547 uint8_t fcode_revision[16]; 2548 uint32_t fw_revision[4]; 2549 2550 uint32_t fdt_wrt_disable; 2551 uint32_t fdt_erase_cmd; 2552 uint32_t fdt_block_size;
+5 -21
drivers/scsi/qla2xxx/qla_init.c
··· 140 qla2100_pci_config(scsi_qla_host_t *ha) 141 { 142 uint16_t w; 143 - uint32_t d; 144 unsigned long flags; 145 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 146 ··· 150 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 151 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 152 153 - /* Reset expansion ROM address decode enable */ 154 - pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d); 155 - d &= ~PCI_ROM_ADDRESS_ENABLE; 156 - pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d); 157 158 /* Get PCI bus information. */ 159 spin_lock_irqsave(&ha->hardware_lock, flags); ··· 170 qla2300_pci_config(scsi_qla_host_t *ha) 171 { 172 uint16_t w; 173 - uint32_t d; 174 unsigned long flags = 0; 175 uint32_t cnt; 176 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; ··· 231 232 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 233 234 - /* Reset expansion ROM address decode enable */ 235 - pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d); 236 - d &= ~PCI_ROM_ADDRESS_ENABLE; 237 - pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d); 238 239 /* Get PCI bus information. */ 240 spin_lock_irqsave(&ha->hardware_lock, flags); ··· 251 qla24xx_pci_config(scsi_qla_host_t *ha) 252 { 253 uint16_t w; 254 - uint32_t d; 255 unsigned long flags = 0; 256 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 257 ··· 272 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 273 pcie_set_readrq(ha->pdev, 2048); 274 275 - /* Reset expansion ROM address decode enable */ 276 - pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d); 277 - d &= ~PCI_ROM_ADDRESS_ENABLE; 278 - pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d); 279 280 ha->chip_revision = ha->pdev->revision; 281 ··· 294 qla25xx_pci_config(scsi_qla_host_t *ha) 295 { 296 uint16_t w; 297 - uint32_t d; 298 299 pci_set_master(ha->pdev); 300 pci_try_set_mwi(ha->pdev); ··· 307 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 308 pcie_set_readrq(ha->pdev, 2048); 309 310 - /* Reset expansion ROM address decode enable */ 311 - pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d); 312 - d &= ~PCI_ROM_ADDRESS_ENABLE; 313 - pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d); 314 315 ha->chip_revision = ha->pdev->revision; 316 ··· 964 &ha->fw_minor_version, 965 &ha->fw_subminor_version, 966 &ha->fw_attributes, &ha->fw_memory_size); 967 - qla2x00_resize_request_q(ha); 968 ha->flags.npiv_supported = 0; 969 if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) || 970 IS_QLA84XX(ha)) && ··· 975 ha->max_npiv_vports = 976 MIN_MULTI_ID_FABRIC - 1; 977 } 978 979 if (ql2xallocfwdump) 980 qla2x00_alloc_fw_dump(ha);
··· 140 qla2100_pci_config(scsi_qla_host_t *ha) 141 { 142 uint16_t w; 143 unsigned long flags; 144 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 145 ··· 151 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 152 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 153 154 + pci_disable_rom(ha->pdev); 155 156 /* Get PCI bus information. */ 157 spin_lock_irqsave(&ha->hardware_lock, flags); ··· 174 qla2300_pci_config(scsi_qla_host_t *ha) 175 { 176 uint16_t w; 177 unsigned long flags = 0; 178 uint32_t cnt; 179 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; ··· 236 237 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 238 239 + pci_disable_rom(ha->pdev); 240 241 /* Get PCI bus information. */ 242 spin_lock_irqsave(&ha->hardware_lock, flags); ··· 259 qla24xx_pci_config(scsi_qla_host_t *ha) 260 { 261 uint16_t w; 262 unsigned long flags = 0; 263 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 264 ··· 281 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 282 pcie_set_readrq(ha->pdev, 2048); 283 284 + pci_disable_rom(ha->pdev); 285 286 ha->chip_revision = ha->pdev->revision; 287 ··· 306 qla25xx_pci_config(scsi_qla_host_t *ha) 307 { 308 uint16_t w; 309 310 pci_set_master(ha->pdev); 311 pci_try_set_mwi(ha->pdev); ··· 320 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) 321 pcie_set_readrq(ha->pdev, 2048); 322 323 + pci_disable_rom(ha->pdev); 324 325 ha->chip_revision = ha->pdev->revision; 326 ··· 980 &ha->fw_minor_version, 981 &ha->fw_subminor_version, 982 &ha->fw_attributes, &ha->fw_memory_size); 983 ha->flags.npiv_supported = 0; 984 if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) || 985 IS_QLA84XX(ha)) && ··· 992 ha->max_npiv_vports = 993 MIN_MULTI_ID_FABRIC - 1; 994 } 995 + qla2x00_resize_request_q(ha); 996 997 if (ql2xallocfwdump) 998 qla2x00_alloc_fw_dump(ha);
+1 -1
drivers/scsi/qla2xxx/qla_mbx.c
··· 1964 *cur_iocb_cnt = mcp->mb[7]; 1965 if (orig_iocb_cnt) 1966 *orig_iocb_cnt = mcp->mb[10]; 1967 - if (max_npiv_vports) 1968 *max_npiv_vports = mcp->mb[11]; 1969 } 1970
··· 1964 *cur_iocb_cnt = mcp->mb[7]; 1965 if (orig_iocb_cnt) 1966 *orig_iocb_cnt = mcp->mb[10]; 1967 + if (ha->flags.npiv_supported && max_npiv_vports) 1968 *max_npiv_vports = mcp->mb[11]; 1969 } 1970
+1
drivers/scsi/qla2xxx/qla_os.c
··· 728 if (ha->isp_ops->abort_command(ha, sp)) { 729 DEBUG2(printk("%s(%ld): abort_command " 730 "mbx failed.\n", __func__, ha->host_no)); 731 } else { 732 DEBUG3(printk("%s(%ld): abort_command " 733 "mbx success.\n", __func__, ha->host_no));
··· 728 if (ha->isp_ops->abort_command(ha, sp)) { 729 DEBUG2(printk("%s(%ld): abort_command " 730 "mbx failed.\n", __func__, ha->host_no)); 731 + ret = FAILED; 732 } else { 733 DEBUG3(printk("%s(%ld): abort_command " 734 "mbx success.\n", __func__, ha->host_no));
+7 -12
drivers/scsi/qla2xxx/qla_sup.c
··· 722 static void 723 qla2xxx_get_fdt_info(scsi_qla_host_t *ha) 724 { 725 #define FLASH_BLK_SIZE_32K 0x8000 726 #define FLASH_BLK_SIZE_64K 0x10000 727 const char *loc, *locations[] = { "MID", "FDT" }; ··· 756 loc = locations[1]; 757 mid = le16_to_cpu(fdt->man_id); 758 fid = le16_to_cpu(fdt->id); 759 - ha->fdt_odd_index = mid == 0x1f; 760 ha->fdt_wrt_disable = fdt->wrt_disable_bits; 761 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd); 762 ha->fdt_block_size = le32_to_cpu(fdt->block_size); ··· 788 ha->fdt_block_size = FLASH_BLK_SIZE_64K; 789 break; 790 case 0x1f: /* Atmel 26DF081A. */ 791 - ha->fdt_odd_index = 1; 792 - ha->fdt_block_size = FLASH_BLK_SIZE_64K; 793 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320); 794 ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339); 795 ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336); ··· 800 } 801 done: 802 DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x " 803 - "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", loc, mid, fid, 804 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, 805 - ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable, 806 ha->fdt_block_size)); 807 } 808 ··· 986 qla24xx_unprotect_flash(ha); 987 988 for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { 989 - if (ha->fdt_odd_index) { 990 - findex = faddr << 2; 991 - fdata = findex & sec_mask; 992 - } else { 993 - findex = faddr; 994 - fdata = (findex & sec_mask) << 2; 995 - } 996 997 /* Are we at the beginning of a sector? */ 998 if ((findex & rest_addr) == 0) {
··· 722 static void 723 qla2xxx_get_fdt_info(scsi_qla_host_t *ha) 724 { 725 + #define FLASH_BLK_SIZE_4K 0x1000 726 #define FLASH_BLK_SIZE_32K 0x8000 727 #define FLASH_BLK_SIZE_64K 0x10000 728 const char *loc, *locations[] = { "MID", "FDT" }; ··· 755 loc = locations[1]; 756 mid = le16_to_cpu(fdt->man_id); 757 fid = le16_to_cpu(fdt->id); 758 ha->fdt_wrt_disable = fdt->wrt_disable_bits; 759 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd); 760 ha->fdt_block_size = le32_to_cpu(fdt->block_size); ··· 788 ha->fdt_block_size = FLASH_BLK_SIZE_64K; 789 break; 790 case 0x1f: /* Atmel 26DF081A. */ 791 + ha->fdt_block_size = FLASH_BLK_SIZE_4K; 792 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320); 793 ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339); 794 ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336); ··· 801 } 802 done: 803 DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x " 804 + "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid, 805 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, 806 + ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable, 807 ha->fdt_block_size)); 808 } 809 ··· 987 qla24xx_unprotect_flash(ha); 988 989 for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { 990 + 991 + findex = faddr; 992 + fdata = (findex & sec_mask) << 2; 993 994 /* Are we at the beginning of a sector? */ 995 if ((findex & rest_addr) == 0) {
+1 -1
drivers/scsi/qla2xxx/qla_version.h
··· 7 /* 8 * Driver version 9 */ 10 - #define QLA2XXX_VERSION "8.02.01-k8" 11 12 #define QLA_DRIVER_MAJOR_VER 8 13 #define QLA_DRIVER_MINOR_VER 2
··· 7 /* 8 * Driver version 9 */ 10 + #define QLA2XXX_VERSION "8.02.01-k9" 11 12 #define QLA_DRIVER_MAJOR_VER 8 13 #define QLA_DRIVER_MINOR_VER 2
+3 -2
drivers/scsi/scsi_error.c
··· 1340 * LLD/transport was disrupted during processing of the IO. 1341 * The transport class is now blocked/blocking, 1342 * and the transport will decide what to do with the IO 1343 - * based on its timers and recovery capablilities. 1344 */ 1345 - return ADD_TO_MLQUEUE; 1346 case DID_TRANSPORT_FAILFAST: 1347 /* 1348 * The transport decided to failfast the IO (most likely
··· 1340 * LLD/transport was disrupted during processing of the IO. 1341 * The transport class is now blocked/blocking, 1342 * and the transport will decide what to do with the IO 1343 + * based on its timers and recovery capablilities if 1344 + * there are enough retries. 1345 */ 1346 + goto maybe_retry; 1347 case DID_TRANSPORT_FAILFAST: 1348 /* 1349 * The transport decided to failfast the IO (most likely