Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] mpt fusion: removing references to hd->ioc

Cleaning up code by accesing the ioc pointer directly instead of via hd->ioc. In the future, most data members of struct MPT_SCSI_HOST will be either deleted or moved to struct MPT_ADAPTER.

Signed-off-by: Eric Moore <Eric.Moore@lsi.com>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>

authored by

Eric Moore and committed by
James Bottomley
e80b002b a69de507

+188 -162
+10 -7
drivers/message/fusion/mptfc.c
··· 194 194 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 195 195 unsigned long flags; 196 196 int ready; 197 + MPT_ADAPTER *ioc; 197 198 198 199 hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata; 200 + ioc = hd->ioc; 199 201 spin_lock_irqsave(shost->host_lock, flags); 200 202 while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY) { 201 203 spin_unlock_irqrestore(shost->host_lock, flags); 202 - dfcprintk (hd->ioc, printk(MYIOC_s_DEBUG_FMT 204 + dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT 203 205 "mptfc_block_error_handler.%d: %d:%d, port status is " 204 206 "DID_IMM_RETRY, deferring %s recovery.\n", 205 207 ((MPT_SCSI_HOST *) shost->hostdata)->ioc->name, ··· 213 211 spin_unlock_irqrestore(shost->host_lock, flags); 214 212 215 213 if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata) { 216 - dfcprintk (hd->ioc, printk(MYIOC_s_DEBUG_FMT 214 + dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT 217 215 "%s.%d: %d:%d, failing recovery, " 218 216 "port state %d, vdevice %p.\n", caller, 219 217 ((MPT_SCSI_HOST *) shost->hostdata)->ioc->name, ··· 222 220 SCpnt->device->hostdata)); 223 221 return FAILED; 224 222 } 225 - dfcprintk (hd->ioc, printk(MYIOC_s_DEBUG_FMT 223 + dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT 226 224 "%s.%d: %d:%d, executing recovery.\n", caller, 227 225 ((MPT_SCSI_HOST *) shost->hostdata)->ioc->name, 228 226 ((MPT_SCSI_HOST *) shost->hostdata)->ioc->sh->host_no, ··· 607 605 VirtDevice *vdevice; 608 606 struct scsi_target *starget; 609 607 struct fc_rport *rport; 610 - 608 + MPT_ADAPTER *ioc; 611 609 612 610 starget = scsi_target(sdev); 613 611 rport = starget_to_rport(starget); ··· 616 614 return -ENXIO; 617 615 618 616 hd = (MPT_SCSI_HOST *)sdev->host->hostdata; 617 + ioc = hd->ioc; 619 618 620 619 vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL); 621 620 if (!vdevice) { 622 621 printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n", 623 - hd->ioc->name, sizeof(VirtDevice)); 622 + ioc->name, sizeof(VirtDevice)); 624 623 return -ENOMEM; 625 624 } 626 625 ··· 630 627 vtarget = starget->hostdata; 631 628 632 629 if (vtarget->num_luns == 0) { 633 - vtarget->ioc_id = hd->ioc->id; 630 + vtarget->ioc_id = ioc->id; 634 631 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES; 635 632 } 636 633 ··· 640 637 vtarget->num_luns++; 641 638 642 639 643 - mptfc_dump_lun_info(hd->ioc, rport, sdev, vtarget); 640 + mptfc_dump_lun_info(ioc, rport, sdev, vtarget); 644 641 645 642 return 0; 646 643 }
+20 -17
drivers/message/fusion/mptsas.c
··· 846 846 struct sas_rphy *rphy; 847 847 struct mptsas_portinfo *p; 848 848 int i; 849 + MPT_ADAPTER *ioc = hd->ioc; 849 850 850 851 vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL); 851 852 if (!vtarget) 852 853 return -ENOMEM; 853 854 854 855 vtarget->starget = starget; 855 - vtarget->ioc_id = hd->ioc->id; 856 + vtarget->ioc_id = ioc->id; 856 857 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES; 857 858 id = starget->id; 858 859 channel = 0; ··· 862 861 * RAID volumes placed beyond the last expected port. 863 862 */ 864 863 if (starget->channel == MPTSAS_RAID_CHANNEL) { 865 - for (i=0; i < hd->ioc->raid_data.pIocPg2->NumActiveVolumes; i++) 866 - if (id == hd->ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID) 867 - channel = hd->ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus; 864 + for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) 865 + if (id == ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID) 866 + channel = ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus; 868 867 goto out; 869 868 } 870 869 871 870 rphy = dev_to_rphy(starget->dev.parent); 872 - mutex_lock(&hd->ioc->sas_topology_mutex); 873 - list_for_each_entry(p, &hd->ioc->sas_topology, list) { 871 + mutex_lock(&ioc->sas_topology_mutex); 872 + list_for_each_entry(p, &ioc->sas_topology, list) { 874 873 for (i = 0; i < p->num_phys; i++) { 875 874 if (p->phy_info[i].attached.sas_address != 876 875 rphy->identify.sas_address) ··· 882 881 /* 883 882 * Exposing hidden raid components 884 883 */ 885 - if (mptscsih_is_phys_disk(hd->ioc, channel, id)) { 886 - id = mptscsih_raid_id_to_num(hd->ioc, 884 + if (mptscsih_is_phys_disk(ioc, channel, id)) { 885 + id = mptscsih_raid_id_to_num(ioc, 887 886 channel, id); 888 887 vtarget->tflags |= 889 888 MPT_TARGET_FLAGS_RAID_COMPONENT; 890 889 p->phy_info[i].attached.phys_disk_num = id; 891 890 } 892 - mutex_unlock(&hd->ioc->sas_topology_mutex); 891 + mutex_unlock(&ioc->sas_topology_mutex); 893 892 goto out; 894 893 } 895 894 } 896 - mutex_unlock(&hd->ioc->sas_topology_mutex); 895 + mutex_unlock(&ioc->sas_topology_mutex); 897 896 898 897 kfree(vtarget); 899 898 return -ENXIO; ··· 913 912 struct sas_rphy *rphy; 914 913 struct mptsas_portinfo *p; 915 914 int i; 915 + MPT_ADAPTER *ioc = hd->ioc; 916 916 917 917 if (!starget->hostdata) 918 918 return; ··· 922 920 goto out; 923 921 924 922 rphy = dev_to_rphy(starget->dev.parent); 925 - list_for_each_entry(p, &hd->ioc->sas_topology, list) { 923 + list_for_each_entry(p, &ioc->sas_topology, list) { 926 924 for (i = 0; i < p->num_phys; i++) { 927 925 if (p->phy_info[i].attached.sas_address != 928 926 rphy->identify.sas_address) ··· 948 946 VirtDevice *vdevice; 949 947 struct scsi_target *starget; 950 948 int i; 949 + MPT_ADAPTER *ioc = hd->ioc; 951 950 952 951 vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL); 953 952 if (!vdevice) { 954 953 printk(MYIOC_s_ERR_FMT "slave_alloc kzalloc(%zd) FAILED!\n", 955 - hd->ioc->name, sizeof(VirtDevice)); 954 + ioc->name, sizeof(VirtDevice)); 956 955 return -ENOMEM; 957 956 } 958 957 starget = scsi_target(sdev); ··· 963 960 goto out; 964 961 965 962 rphy = dev_to_rphy(sdev->sdev_target->dev.parent); 966 - mutex_lock(&hd->ioc->sas_topology_mutex); 967 - list_for_each_entry(p, &hd->ioc->sas_topology, list) { 963 + mutex_lock(&ioc->sas_topology_mutex); 964 + list_for_each_entry(p, &ioc->sas_topology, list) { 968 965 for (i = 0; i < p->num_phys; i++) { 969 966 if (p->phy_info[i].attached.sas_address != 970 967 rphy->identify.sas_address) ··· 973 970 /* 974 971 * Exposing hidden raid components 975 972 */ 976 - if (mptscsih_is_phys_disk(hd->ioc, 973 + if (mptscsih_is_phys_disk(ioc, 977 974 p->phy_info[i].attached.channel, 978 975 p->phy_info[i].attached.id)) 979 976 sdev->no_uld_attach = 1; 980 - mutex_unlock(&hd->ioc->sas_topology_mutex); 977 + mutex_unlock(&ioc->sas_topology_mutex); 981 978 goto out; 982 979 } 983 980 } 984 - mutex_unlock(&hd->ioc->sas_topology_mutex); 981 + mutex_unlock(&ioc->sas_topology_mutex); 985 982 986 983 kfree(vdevice); 987 984 return -ENXIO;
+104 -95
drivers/message/fusion/mptscsih.c
··· 1036 1036 int max = hd->ioc->req_depth; 1037 1037 struct scsi_cmnd *sc; 1038 1038 struct scsi_lun lun; 1039 + MPT_ADAPTER *ioc = hd->ioc; 1039 1040 1040 - dsprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT ": search_running channel %d id %d lun %d max %d\n", 1041 - hd->ioc->name, vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun, max)); 1041 + dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": search_running channel %d id %d lun %d max %d\n", 1042 + ioc->name, vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun, max)); 1042 1043 1043 1044 for (ii=0; ii < max; ii++) { 1044 1045 if ((sc = hd->ScsiLookup[ii]) != NULL) { 1045 1046 1046 - mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii); 1047 + mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(ioc, ii); 1047 1048 if (mf == NULL) 1048 1049 continue; 1049 1050 /* If the device is a hidden raid component, then its ··· 1064 1063 /* Cleanup 1065 1064 */ 1066 1065 hd->ScsiLookup[ii] = NULL; 1067 - mptscsih_freeChainBuffers(hd->ioc, ii); 1068 - mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf); 1066 + mptscsih_freeChainBuffers(ioc, ii); 1067 + mpt_free_msg_frame(ioc, (MPT_FRAME_HDR *)mf); 1069 1068 if ((unsigned char *)mf != sc->host_scribble) 1070 1069 continue; 1071 1070 scsi_dma_unmap(sc); 1072 1071 sc->host_scribble = NULL; 1073 1072 sc->result = DID_NO_CONNECT << 16; 1074 1073 sdev_printk(MYIOC_s_INFO_FMT, sc->device, "completing cmds: fw_channel %d," 1075 - "fw_id %d, sc=%p, mf = %p, idx=%x\n", hd->ioc->name, vdevice->vtarget->channel, 1074 + "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name, vdevice->vtarget->channel, 1076 1075 vdevice->vtarget->id, sc, mf, ii); 1077 1076 sc->scsi_done(sc); 1078 1077 } ··· 1099 1098 { 1100 1099 long time = jiffies; 1101 1100 MPT_SCSI_HOST *hd; 1101 + MPT_ADAPTER *ioc; 1102 1102 1103 1103 if (sc->device == NULL) 1104 1104 return; ··· 1107 1105 return; 1108 1106 if ((hd = (MPT_SCSI_HOST *)sc->device->host->hostdata) == NULL) 1109 1107 return; 1110 - 1108 + ioc = hd->ioc; 1111 1109 if (time - hd->last_queue_full > 10 * HZ) { 1112 - dprintk(hd->ioc, printk(MYIOC_s_WARN_FMT "Device (%d:%d:%d) reported QUEUE_FULL!\n", 1113 - hd->ioc->name, 0, sc->device->id, sc->device->lun)); 1110 + dprintk(ioc, printk(MYIOC_s_WARN_FMT "Device (%d:%d:%d) reported QUEUE_FULL!\n", 1111 + ioc->name, 0, sc->device->id, sc->device->lun)); 1114 1112 hd->last_queue_full = time; 1115 1113 } 1116 1114 } ··· 1145 1143 sz1=0; 1146 1144 1147 1145 if (hd->ScsiLookup != NULL) { 1148 - sz1 = hd->ioc->req_depth * sizeof(void *); 1146 + sz1 = ioc->req_depth * sizeof(void *); 1149 1147 kfree(hd->ScsiLookup); 1150 1148 hd->ScsiLookup = NULL; 1151 1149 } 1152 1150 1153 - dprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT 1151 + dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 1154 1152 "Free'd ScsiLookup (%d) memory\n", 1155 - hd->ioc->name, sz1)); 1153 + ioc->name, sz1)); 1156 1154 1157 1155 kfree(hd->info_kbuf); 1158 1156 1159 1157 /* NULL the Scsi_Host pointer 1160 1158 */ 1161 - hd->ioc->sh = NULL; 1159 + ioc->sh = NULL; 1162 1160 1163 1161 scsi_host_put(host); 1164 1162 ··· 1388 1386 /* 1389 1387 * Put together a MPT SCSI request... 1390 1388 */ 1391 - if ((mf = mpt_get_msg_frame(hd->ioc->DoneCtx, hd->ioc)) == NULL) { 1389 + if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) { 1392 1390 dprintk(ioc, printk(MYIOC_s_WARN_FMT "QueueCmd, no msg frames!!\n", 1393 1391 ioc->name)); 1394 1392 return SCSI_MLQUEUE_HOST_BUSY; ··· 1456 1454 pScsiReq->DataLength = cpu_to_le32(datalen); 1457 1455 1458 1456 /* SenseBuffer low address */ 1459 - pScsiReq->SenseBufferLowAddr = cpu_to_le32(hd->ioc->sense_buf_low_dma 1457 + pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma 1460 1458 + (my_idx * MPT_SENSE_BUFFER_ALLOC)); 1461 1459 1462 1460 /* Now add the SG list ··· 1468 1466 (dma_addr_t) -1); 1469 1467 } else { 1470 1468 /* Add a 32 or 64 bit SGE */ 1471 - if (mptscsih_AddSGE(hd->ioc, SCpnt, pScsiReq, my_idx) != SUCCESS) 1469 + if (mptscsih_AddSGE(ioc, SCpnt, pScsiReq, my_idx) != SUCCESS) 1472 1470 goto fail; 1473 1471 } 1474 1472 1475 1473 SCpnt->host_scribble = (unsigned char *)mf; 1476 1474 hd->ScsiLookup[my_idx] = SCpnt; 1477 1475 1478 - mpt_put_msg_frame(hd->ioc->DoneCtx, hd->ioc, mf); 1476 + mpt_put_msg_frame(ioc->DoneCtx, ioc, mf); 1479 1477 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n", 1480 1478 ioc->name, SCpnt, mf, my_idx)); 1481 1479 DBG_DUMP_REQUEST_FRAME(ioc, (u32 *)mf); ··· 1483 1481 1484 1482 fail: 1485 1483 hd->ScsiLookup[my_idx] = NULL; 1486 - mptscsih_freeChainBuffers(hd->ioc, my_idx); 1487 - mpt_free_msg_frame(hd->ioc, mf); 1484 + mptscsih_freeChainBuffers(ioc, my_idx); 1485 + mpt_free_msg_frame(ioc, mf); 1488 1486 return SCSI_MLQUEUE_HOST_BUSY; 1489 1487 } 1490 1488 ··· 1610 1608 return FAILED; 1611 1609 } 1612 1610 } else { 1613 - spin_lock_irqsave(&hd->ioc->FreeQlock, flags); 1611 + spin_lock_irqsave(&ioc->FreeQlock, flags); 1614 1612 hd->tmPending |= (1 << type); 1615 - spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); 1613 + spin_unlock_irqrestore(&ioc->FreeQlock, flags); 1616 1614 } 1617 1615 1618 - ioc_raw_state = mpt_GetIocState(hd->ioc, 0); 1616 + ioc_raw_state = mpt_GetIocState(ioc, 0); 1619 1617 1620 1618 if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) { 1621 1619 printk(MYIOC_s_WARN_FMT ··· 1683 1681 SCSITaskMgmt_t *pScsiTm; 1684 1682 int ii; 1685 1683 int retval; 1684 + MPT_ADAPTER *ioc = hd->ioc; 1686 1685 1687 1686 /* Return Fail to calling function if no message frames available. 1688 1687 */ 1689 - if ((mf = mpt_get_msg_frame(hd->ioc->TaskCtx, hd->ioc)) == NULL) { 1690 - dfailprintk(hd->ioc, printk(MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n", 1691 - hd->ioc->name)); 1688 + if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { 1689 + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n", 1690 + ioc->name)); 1692 1691 return FAILED; 1693 1692 } 1694 - dtmprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", 1695 - hd->ioc->name, mf)); 1693 + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", 1694 + ioc->name, mf)); 1696 1695 1697 1696 /* Format the Request 1698 1697 */ ··· 1716 1713 1717 1714 pScsiTm->TaskMsgContext = ctx2abort; 1718 1715 1719 - dtmprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt: ctx2abort (0x%08x) " 1720 - "type=%d\n", hd->ioc->name, ctx2abort, type)); 1716 + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt: ctx2abort (0x%08x) " 1717 + "type=%d\n", ioc->name, ctx2abort, type)); 1721 1718 1722 1719 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm); 1723 1720 1724 - if ((hd->ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 1725 - (hd->ioc->facts.MsgVersion >= MPI_VERSION_01_05)) 1726 - mpt_put_msg_frame_hi_pri(hd->ioc->TaskCtx, hd->ioc, mf); 1721 + if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 1722 + (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) 1723 + mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); 1727 1724 else { 1728 - retval = mpt_send_handshake_request(hd->ioc->TaskCtx, hd->ioc, 1725 + retval = mpt_send_handshake_request(ioc->TaskCtx, ioc, 1729 1726 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); 1730 1727 if (retval) { 1731 - dfailprintk(hd->ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED!" 1732 - " (hd %p, ioc %p, mf %p, rc=%d) \n", hd->ioc->name, hd, 1733 - hd->ioc, mf, retval)); 1728 + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED!" 1729 + " (hd %p, ioc %p, mf %p, rc=%d) \n", ioc->name, hd, 1730 + ioc, mf, retval)); 1734 1731 goto fail_out; 1735 1732 } 1736 1733 } 1737 1734 1738 1735 if(mptscsih_tm_wait_for_completion(hd, timeout) == FAILED) { 1739 - dfailprintk(hd->ioc, printk(MYIOC_s_ERR_FMT "task management request TIMED OUT!" 1740 - " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd, 1741 - hd->ioc, mf)); 1742 - dtmprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", 1743 - hd->ioc->name)); 1744 - retval = mpt_HardResetHandler(hd->ioc, CAN_SLEEP); 1745 - dtmprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "rc=%d \n", 1746 - hd->ioc->name, retval)); 1736 + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "task management request TIMED OUT!" 1737 + " (hd %p, ioc %p, mf %p) \n", ioc->name, hd, 1738 + ioc, mf)); 1739 + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", 1740 + ioc->name)); 1741 + retval = mpt_HardResetHandler(ioc, CAN_SLEEP); 1742 + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rc=%d \n", 1743 + ioc->name, retval)); 1747 1744 goto fail_out; 1748 1745 } 1749 1746 ··· 1764 1761 /* 1765 1762 * Free task managment mf, and corresponding tm flags 1766 1763 */ 1767 - mpt_free_msg_frame(hd->ioc, mf); 1764 + mpt_free_msg_frame(ioc, mf); 1768 1765 hd->tmPending = 0; 1769 1766 hd->tmState = TM_STATE_NONE; 1770 1767 return FAILED; ··· 1871 1868 * swap it here either. It is an opaque cookie to 1872 1869 * the controller, so it does not matter. -DaveM 1873 1870 */ 1874 - mf = MPT_INDEX_2_MFPTR(hd->ioc, scpnt_idx); 1871 + mf = MPT_INDEX_2_MFPTR(ioc, scpnt_idx); 1875 1872 ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext; 1876 1873 1877 1874 hd->abortSCpnt = SCpnt; ··· 2033 2030 /* If our attempts to reset the host failed, then return a failed 2034 2031 * status. The host will be taken off line by the SCSI mid-layer. 2035 2032 */ 2036 - if (mpt_HardResetHandler(hd->ioc, CAN_SLEEP) < 0) { 2033 + if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) { 2037 2034 retval = FAILED; 2038 2035 } else { 2039 2036 /* Make sure TM pending is cleared and TM state is set to ··· 2063 2060 unsigned long flags; 2064 2061 int loop_count = 4 * 10; /* Wait 10 seconds */ 2065 2062 int status = FAILED; 2063 + MPT_ADAPTER *ioc = hd->ioc; 2066 2064 2067 2065 do { 2068 - spin_lock_irqsave(&hd->ioc->FreeQlock, flags); 2066 + spin_lock_irqsave(&ioc->FreeQlock, flags); 2069 2067 if (hd->tmState == TM_STATE_NONE) { 2070 2068 hd->tmState = TM_STATE_IN_PROGRESS; 2071 2069 hd->tmPending = 1; 2072 - spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); 2070 + spin_unlock_irqrestore(&ioc->FreeQlock, flags); 2073 2071 status = SUCCESS; 2074 2072 break; 2075 2073 } 2076 - spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); 2074 + spin_unlock_irqrestore(&ioc->FreeQlock, flags); 2077 2075 msleep(250); 2078 2076 } while (--loop_count); 2079 2077 ··· 2095 2091 unsigned long flags; 2096 2092 int loop_count = 4 * timeout; 2097 2093 int status = FAILED; 2094 + MPT_ADAPTER *ioc = hd->ioc; 2098 2095 2099 2096 do { 2100 - spin_lock_irqsave(&hd->ioc->FreeQlock, flags); 2097 + spin_lock_irqsave(&ioc->FreeQlock, flags); 2101 2098 if(hd->tmPending == 0) { 2102 2099 status = SUCCESS; 2103 - spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); 2100 + spin_unlock_irqrestore(&ioc->FreeQlock, flags); 2104 2101 break; 2105 2102 } 2106 - spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); 2103 + spin_unlock_irqrestore(&ioc->FreeQlock, flags); 2107 2104 msleep(250); 2108 2105 } while (--loop_count); 2109 2106 ··· 2412 2407 struct scsi_target *starget; 2413 2408 int max_depth; 2414 2409 int tagged; 2410 + MPT_ADAPTER *ioc = hd->ioc; 2415 2411 2416 2412 starget = scsi_target(sdev); 2417 2413 vtarget = starget->hostdata; 2418 2414 2419 - if (hd->ioc->bus_type == SPI) { 2415 + if (ioc->bus_type == SPI) { 2420 2416 if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) 2421 2417 max_depth = 1; 2422 2418 else if (sdev->type == TYPE_DISK && ··· 2453 2447 VirtDevice *vdevice; 2454 2448 struct scsi_target *starget; 2455 2449 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)sh->hostdata; 2450 + MPT_ADAPTER *ioc = hd->ioc; 2456 2451 2457 2452 starget = scsi_target(sdev); 2458 2453 vtarget = starget->hostdata; 2459 2454 vdevice = sdev->hostdata; 2460 2455 2461 - dsprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT 2456 + dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2462 2457 "device @ %p, channel=%d, id=%d, lun=%d\n", 2463 - hd->ioc->name, sdev, sdev->channel, sdev->id, sdev->lun)); 2464 - if (hd->ioc->bus_type == SPI) 2465 - dsprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT 2458 + ioc->name, sdev, sdev->channel, sdev->id, sdev->lun)); 2459 + if (ioc->bus_type == SPI) 2460 + dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2466 2461 "sdtr %d wdtr %d ppr %d inq length=%d\n", 2467 - hd->ioc->name, sdev->sdtr, sdev->wdtr, 2462 + ioc->name, sdev->sdtr, sdev->wdtr, 2468 2463 sdev->ppr, sdev->inquiry_len)); 2469 2464 2470 2465 if (sdev->id > sh->max_id) { ··· 2477 2470 vdevice->configured_lun = 1; 2478 2471 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH); 2479 2472 2480 - dsprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT 2473 + dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2481 2474 "Queue depth=%d, tflags=%x\n", 2482 - hd->ioc->name, sdev->queue_depth, vtarget->tflags)); 2475 + ioc->name, sdev->queue_depth, vtarget->tflags)); 2483 2476 2484 - if (hd->ioc->bus_type == SPI) 2485 - dsprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT 2477 + if (ioc->bus_type == SPI) 2478 + dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2486 2479 "negoFlags=%x, maxOffset=%x, SyncFactor=%x\n", 2487 - hd->ioc->name, vtarget->negoFlags, vtarget->maxOffset, 2480 + ioc->name, vtarget->negoFlags, vtarget->maxOffset, 2488 2481 vtarget->minSyncFactor)); 2489 2482 2490 2483 slave_configure_exit: 2491 2484 2492 - dsprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT 2485 + dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2493 2486 "tagged %d, simple %d, ordered %d\n", 2494 - hd->ioc->name,sdev->tagged_supported, sdev->simple_tags, 2487 + ioc->name,sdev->tagged_supported, sdev->simple_tags, 2495 2488 sdev->ordered_tags)); 2496 2489 2497 2490 return 0; ··· 2513 2506 VirtDevice *vdevice; 2514 2507 SCSIIORequest_t *pReq; 2515 2508 u32 sense_count = le32_to_cpu(pScsiReply->SenseCount); 2509 + MPT_ADAPTER *ioc = hd->ioc; 2516 2510 2517 2511 /* Get target structure 2518 2512 */ ··· 2526 2518 2527 2519 /* Copy the sense received into the scsi command block. */ 2528 2520 req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); 2529 - sense_data = ((u8 *)hd->ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); 2521 + sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); 2530 2522 memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc)); 2531 2523 2532 2524 /* Log SMART data (asc = 0x5D, non-IM case only) if required. 2533 2525 */ 2534 - if ((hd->ioc->events) && (hd->ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) { 2526 + if ((ioc->events) && (ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) { 2535 2527 if ((sense_data[12] == 0x5D) && (vdevice->vtarget->raidVolume == 0)) { 2536 2528 int idx; 2537 - MPT_ADAPTER *ioc = hd->ioc; 2538 2529 2539 2530 idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE; 2540 2531 ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE; ··· 2546 2539 ioc->events[idx].data[1] = (sense_data[13] << 8) | sense_data[12]; 2547 2540 2548 2541 ioc->eventContext++; 2549 - if (hd->ioc->pcidev->vendor == 2542 + if (ioc->pcidev->vendor == 2550 2543 PCI_VENDOR_ID_IBM) { 2551 - mptscsih_issue_sep_command(hd->ioc, 2544 + mptscsih_issue_sep_command(ioc, 2552 2545 vdevice->vtarget, MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); 2553 2546 vdevice->vtarget->tflags |= 2554 2547 MPT_TARGET_FLAGS_LED_ON; ··· 2556 2549 } 2557 2550 } 2558 2551 } else { 2559 - dprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "Hmmm... SenseData len=0! (?)\n", 2560 - hd->ioc->name)); 2552 + dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Hmmm... SenseData len=0! (?)\n", 2553 + ioc->name)); 2561 2554 } 2562 2555 } 2563 2556 ··· 2642 2635 2643 2636 /* ScsiLookup initialization 2644 2637 */ 2645 - for (ii=0; ii < hd->ioc->req_depth; ii++) 2638 + for (ii=0; ii < ioc->req_depth; ii++) 2646 2639 hd->ScsiLookup[ii] = NULL; 2647 2640 2648 2641 /* 2. Chain Buffer initialization ··· 2787 2780 2788 2781 if (mf != hd->cmdPtr) { 2789 2782 printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p, idx=%d)\n", 2790 - hd->ioc->name, (void *)mf, (void *) hd->cmdPtr, req_idx); 2783 + ioc->name, (void *)mf, (void *) hd->cmdPtr, req_idx); 2791 2784 } 2792 2785 hd->cmdPtr = NULL; 2793 2786 2794 2787 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScanDvComplete (mf=%p,mr=%p,idx=%d)\n", 2795 - hd->ioc->name, mf, mr, req_idx)); 2788 + ioc->name, mf, mr, req_idx)); 2796 2789 2797 2790 hd->pLocal = &hd->localReply; 2798 2791 hd->pLocal->scsiStatus = 0; ··· 2856 2849 */ 2857 2850 completionCode = MPT_SCANDV_SENSE; 2858 2851 hd->pLocal->scsiStatus = scsi_status; 2859 - sense_data = ((u8 *)hd->ioc->sense_buf_pool + 2852 + sense_data = ((u8 *)ioc->sense_buf_pool + 2860 2853 (req_idx * MPT_SENSE_BUFFER_ALLOC)); 2861 2854 2862 2855 sz = min_t(int, pReq->SenseBufferLength, ··· 2923 2916 mptscsih_timer_expired(unsigned long data) 2924 2917 { 2925 2918 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) data; 2919 + MPT_ADAPTER *ioc = hd->ioc; 2926 2920 2927 - ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "Timer Expired! Cmd %p\n", hd->ioc->name, hd->cmdPtr)); 2921 + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Timer Expired! Cmd %p\n", ioc->name, hd->cmdPtr)); 2928 2922 2929 2923 if (hd->cmdPtr) { 2930 2924 MPIHeader_t *cmd = (MPIHeader_t *)hd->cmdPtr; ··· 2939 2931 */ 2940 2932 } else { 2941 2933 /* Perform a FW reload */ 2942 - if (mpt_HardResetHandler(hd->ioc, NO_SLEEP) < 0) { 2943 - printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", hd->ioc->name); 2934 + if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) { 2935 + printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name); 2944 2936 } 2945 2937 } 2946 2938 } else { 2947 2939 /* This should NEVER happen */ 2948 - printk(MYIOC_s_WARN_FMT "Null cmdPtr!!!!\n", hd->ioc->name); 2940 + printk(MYIOC_s_WARN_FMT "Null cmdPtr!!!!\n", ioc->name); 2949 2941 } 2950 2942 2951 2943 /* No more processing. ··· 2953 2945 * The FW will reply to all outstanding commands, callback will finish cleanup. 2954 2946 * Hard reset clean-up will free all resources. 2955 2947 */ 2956 - ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "Timer Expired Complete!\n", hd->ioc->name)); 2948 + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Timer Expired Complete!\n", ioc->name)); 2957 2949 2958 2950 return; 2959 2951 } ··· 2991 2983 char cmdLen; 2992 2984 char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; 2993 2985 char cmd = io->cmd; 2986 + MPT_ADAPTER *ioc = hd->ioc; 2994 2987 2995 2988 in_isr = in_interrupt(); 2996 2989 if (in_isr) { 2997 - dprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "Internal SCSI IO request not allowed in ISR context!\n", 2998 - hd->ioc->name)); 2990 + dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Internal SCSI IO request not allowed in ISR context!\n", 2991 + ioc->name)); 2999 2992 return -EPERM; 3000 2993 } 3001 2994 ··· 3097 3088 3098 3089 /* Get and Populate a free Frame 3099 3090 */ 3100 - if ((mf = mpt_get_msg_frame(hd->ioc->InternalCtx, hd->ioc)) == NULL) { 3101 - dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT "No msg frames!\n", 3102 - hd->ioc->name)); 3091 + if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 3092 + dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "No msg frames!\n", 3093 + ioc->name)); 3103 3094 return -EBUSY; 3104 3095 } 3105 3096 ··· 3138 3129 3139 3130 if (cmd == REQUEST_SENSE) { 3140 3131 pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED); 3141 - ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "Untagged! 0x%2x\n", 3142 - hd->ioc->name, cmd)); 3132 + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Untagged! 0x%2x\n", 3133 + ioc->name, cmd)); 3143 3134 } 3144 3135 3145 3136 for (ii=0; ii < 16; ii++) 3146 3137 pScsiReq->CDB[ii] = CDB[ii]; 3147 3138 3148 3139 pScsiReq->DataLength = cpu_to_le32(io->size); 3149 - pScsiReq->SenseBufferLowAddr = cpu_to_le32(hd->ioc->sense_buf_low_dma 3140 + pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma 3150 3141 + (my_idx * MPT_SENSE_BUFFER_ALLOC)); 3151 3142 3152 - ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "Sending Command 0x%x for (%d:%d:%d)\n", 3153 - hd->ioc->name, cmd, io->channel, io->id, io->lun)); 3143 + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Command 0x%x for (%d:%d:%d)\n", 3144 + ioc->name, cmd, io->channel, io->id, io->lun)); 3154 3145 3155 3146 if (dir == MPI_SCSIIO_CONTROL_READ) { 3156 3147 mpt_add_sge((char *) &pScsiReq->SGL, ··· 3185 3176 hd->cmdPtr = mf; 3186 3177 3187 3178 add_timer(&hd->timer); 3188 - mpt_put_msg_frame(hd->ioc->InternalCtx, hd->ioc, mf); 3179 + mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); 3189 3180 wait_event(hd->scandv_waitq, hd->scandv_wait_done); 3190 3181 3191 3182 if (hd->pLocal) { ··· 3201 3192 } else { 3202 3193 rc = -EFAULT; 3203 3194 /* This should never happen. */ 3204 - ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "_do_cmd: Null pLocal!!!\n", 3205 - hd->ioc->name)); 3195 + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "_do_cmd: Null pLocal!!!\n", 3196 + ioc->name)); 3206 3197 } 3207 3198 3208 3199 return rc;
+54 -43
drivers/message/fusion/mptspi.c
··· 107 107 mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target, 108 108 struct scsi_device *sdev) 109 109 { 110 - SpiCfgData *pspi_data = &hd->ioc->spi_data; 110 + MPT_ADAPTER *ioc = hd->ioc; 111 + SpiCfgData *pspi_data = &ioc->spi_data; 111 112 int id = (int) target->id; 112 113 int nvram; 113 114 u8 width = MPT_NARROW; ··· 139 138 else { 140 139 factor = MPT_ULTRA320; 141 140 if (scsi_device_qas(sdev)) { 142 - ddvprintk(hd->ioc, 141 + ddvprintk(ioc, 143 142 printk(MYIOC_s_DEBUG_FMT "Enabling QAS due to " 144 - "byte56=%02x on id=%d!\n", hd->ioc->name, 143 + "byte56=%02x on id=%d!\n", ioc->name, 145 144 scsi_device_qas(sdev), id)); 146 145 noQas = 0; 147 146 } ··· 229 228 /* Disable QAS in a mixed configuration case 230 229 */ 231 230 232 - ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT 233 - "Disabling QAS due to noQas=%02x on id=%d!\n", hd->ioc->name, noQas, id)); 231 + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT 232 + "Disabling QAS due to noQas=%02x on id=%d!\n", ioc->name, noQas, id)); 234 233 } 235 234 } 236 235 ··· 376 375 mptspi_is_raid(struct _MPT_SCSI_HOST *hd, u32 id) 377 376 { 378 377 int i, rc = 0; 378 + MPT_ADAPTER *ioc = hd->ioc; 379 379 380 - if (!hd->ioc->raid_data.pIocPg2) 380 + if (!ioc->raid_data.pIocPg2) 381 381 goto out; 382 382 383 - if (!hd->ioc->raid_data.pIocPg2->NumActiveVolumes) 383 + if (!ioc->raid_data.pIocPg2->NumActiveVolumes) 384 384 goto out; 385 - for (i=0; i < hd->ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { 386 - if (hd->ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id) { 385 + for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { 386 + if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id) { 387 387 rc = 1; 388 388 goto out; 389 389 } ··· 399 397 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 400 398 struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)shost->hostdata; 401 399 VirtTarget *vtarget; 400 + MPT_ADAPTER *ioc; 402 401 403 402 if (hd == NULL) 404 403 return -ENODEV; 405 404 405 + ioc = hd->ioc; 406 406 vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL); 407 407 if (!vtarget) 408 408 return -ENOMEM; 409 409 410 - vtarget->ioc_id = hd->ioc->id; 410 + vtarget->ioc_id = ioc->id; 411 411 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES; 412 412 vtarget->id = (u8)starget->id; 413 413 vtarget->channel = (u8)starget->channel; ··· 417 413 starget->hostdata = vtarget; 418 414 419 415 if (starget->channel == 1) { 420 - if (mptscsih_is_phys_disk(hd->ioc, 0, starget->id) == 0) 416 + if (mptscsih_is_phys_disk(ioc, 0, starget->id) == 0) 421 417 return 0; 422 418 vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT; 423 419 /* The real channel for this device is zero */ 424 420 vtarget->channel = 0; 425 421 /* The actual physdisknum (for RAID passthrough) */ 426 - vtarget->id = mptscsih_raid_id_to_num(hd->ioc, 0, 422 + vtarget->id = mptscsih_raid_id_to_num(ioc, 0, 427 423 starget->id); 428 424 } 429 425 430 426 if (starget->channel == 0 && 431 427 mptspi_is_raid(hd, starget->id)) { 432 428 vtarget->raidVolume = 1; 433 - ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT 434 - "RAID Volume @ channel=%d id=%d\n", hd->ioc->name, starget->channel, 429 + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT 430 + "RAID Volume @ channel=%d id=%d\n", ioc->name, starget->channel, 435 431 starget->id)); 436 432 } 437 433 438 - if (hd->ioc->spi_data.nvram && 439 - hd->ioc->spi_data.nvram[starget->id] != MPT_HOST_NVRAM_INVALID) { 440 - u32 nvram = hd->ioc->spi_data.nvram[starget->id]; 434 + if (ioc->spi_data.nvram && 435 + ioc->spi_data.nvram[starget->id] != MPT_HOST_NVRAM_INVALID) { 436 + u32 nvram = ioc->spi_data.nvram[starget->id]; 441 437 spi_min_period(starget) = (nvram & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT; 442 438 spi_max_width(starget) = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1; 443 439 } else { 444 - spi_min_period(starget) = hd->ioc->spi_data.minSyncFactor; 445 - spi_max_width(starget) = hd->ioc->spi_data.maxBusWidth; 440 + spi_min_period(starget) = ioc->spi_data.minSyncFactor; 441 + spi_max_width(starget) = ioc->spi_data.maxBusWidth; 446 442 } 447 - spi_max_offset(starget) = hd->ioc->spi_data.maxSyncOffset; 443 + spi_max_offset(starget) = ioc->spi_data.maxSyncOffset; 448 444 449 445 spi_offset(starget) = 0; 450 446 mptspi_write_width(starget, 0); ··· 618 614 { 619 615 MpiRaidActionRequest_t *pReq; 620 616 MPT_FRAME_HDR *mf; 617 + MPT_ADAPTER *ioc = hd->ioc; 621 618 622 619 /* Get and Populate a free Frame 623 620 */ 624 - if ((mf = mpt_get_msg_frame(hd->ioc->InternalCtx, hd->ioc)) == NULL) { 625 - ddvprintk(hd->ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n", 626 - hd->ioc->name)); 621 + if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 622 + ddvprintk(ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n", 623 + ioc->name)); 627 624 return -EAGAIN; 628 625 } 629 626 pReq = (MpiRaidActionRequest_t *)mf; ··· 645 640 mpt_add_sge((char *)&pReq->ActionDataSGE, 646 641 MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); 647 642 648 - ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", 649 - hd->ioc->name, pReq->Action, channel, id)); 643 + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", 644 + ioc->name, pReq->Action, channel, id)); 650 645 651 646 hd->pLocal = NULL; 652 647 hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */ ··· 658 653 hd->cmdPtr = mf; 659 654 660 655 add_timer(&hd->timer); 661 - mpt_put_msg_frame(hd->ioc->InternalCtx, hd->ioc, mf); 656 + mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); 662 657 wait_event(hd->scandv_waitq, hd->scandv_wait_done); 663 658 664 659 if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0)) ··· 671 666 struct scsi_device *sdev) 672 667 { 673 668 VirtTarget *vtarget = scsi_target(sdev)->hostdata; 669 + MPT_ADAPTER *ioc = hd->ioc; 674 670 675 671 /* no DV on RAID devices */ 676 672 if (sdev->channel == 0 && ··· 682 676 if (sdev->channel == 1 && 683 677 mptscsih_quiesce_raid(hd, 1, vtarget->channel, vtarget->id) < 0) { 684 678 starget_printk(MYIOC_s_ERR_FMT, scsi_target(sdev), 685 - "Integrated RAID quiesce failed\n", hd->ioc->name); 679 + "Integrated RAID quiesce failed\n", ioc->name); 686 680 return; 687 681 } 688 682 ··· 693 687 if (sdev->channel == 1 && 694 688 mptscsih_quiesce_raid(hd, 0, vtarget->channel, vtarget->id) < 0) 695 689 starget_printk(MYIOC_s_ERR_FMT, scsi_target(sdev), 696 - "Integrated RAID resume failed\n", hd->ioc->name); 690 + "Integrated RAID resume failed\n", ioc->name); 697 691 698 692 mptspi_read_parameters(sdev->sdev_target); 699 693 spi_display_xfer_agreement(sdev->sdev_target); ··· 706 700 VirtTarget *vtarget; 707 701 VirtDevice *vdevice; 708 702 struct scsi_target *starget; 703 + MPT_ADAPTER *ioc = hd->ioc; 709 704 710 705 if (sdev->channel == 1 && 711 - mptscsih_is_phys_disk(hd->ioc, 0, sdev->id) == 0) 706 + mptscsih_is_phys_disk(ioc, 0, sdev->id) == 0) 712 707 return -ENXIO; 713 708 714 709 vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL); 715 710 if (!vdevice) { 716 711 printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n", 717 - hd->ioc->name, sizeof(VirtDevice)); 712 + ioc->name, sizeof(VirtDevice)); 718 713 return -ENOMEM; 719 714 } 720 715 ··· 766 759 { 767 760 struct _MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata; 768 761 VirtDevice *vdevice = SCpnt->device->hostdata; 762 + MPT_ADAPTER *ioc = hd->ioc; 769 763 770 764 if (!vdevice || !vdevice->vtarget) { 771 765 SCpnt->result = DID_NO_CONNECT << 16; ··· 775 767 } 776 768 777 769 if (SCpnt->device->channel == 1 && 778 - mptscsih_is_phys_disk(hd->ioc, 0, SCpnt->device->id) == 0) { 770 + mptscsih_is_phys_disk(ioc, 0, SCpnt->device->id) == 0) { 779 771 SCpnt->result = DID_NO_CONNECT << 16; 780 772 done(SCpnt); 781 773 return 0; 782 774 } 783 775 784 776 if (spi_dv_pending(scsi_target(SCpnt->device))) 785 - ddvprintk(hd->ioc, scsi_print_command(SCpnt)); 777 + ddvprintk(ioc, scsi_print_command(SCpnt)); 786 778 787 779 return mptscsih_qcmd(SCpnt,done); 788 780 } ··· 1079 1071 struct work_queue_wrapper *wqw = 1080 1072 container_of(work, struct work_queue_wrapper, work); 1081 1073 struct _MPT_SCSI_HOST *hd = wqw->hd; 1082 - struct Scsi_Host *shost = hd->ioc->sh; 1074 + MPT_ADAPTER *ioc = hd->ioc; 1075 + struct Scsi_Host *shost = ioc->sh; 1083 1076 struct scsi_device *sdev; 1084 1077 int disk = wqw->disk; 1085 1078 struct _CONFIG_PAGE_IOC_3 *pg3; 1086 1079 1087 1080 kfree(wqw); 1088 1081 1089 - mpt_findImVolumes(hd->ioc); 1090 - pg3 = hd->ioc->raid_data.pIocPg3; 1082 + mpt_findImVolumes(ioc); 1083 + pg3 = ioc->raid_data.pIocPg3; 1091 1084 if (!pg3) 1092 1085 return; 1093 1086 ··· 1106 1097 continue; 1107 1098 1108 1099 starget_printk(MYIOC_s_INFO_FMT, vtarget->starget, 1109 - "Integrated RAID requests DV of new device\n", hd->ioc->name); 1100 + "Integrated RAID requests DV of new device\n", ioc->name); 1110 1101 mptspi_dv_device(hd, sdev); 1111 1102 } 1112 1103 shost_printk(MYIOC_s_INFO_FMT, shost, 1113 - "Integrated RAID detects new device %d\n", hd->ioc->name, disk); 1114 - scsi_scan_target(&hd->ioc->sh->shost_gendev, 1, disk, 0, 1); 1104 + "Integrated RAID detects new device %d\n", ioc->name, disk); 1105 + scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, 1); 1115 1106 } 1116 1107 1117 1108 1118 1109 static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk) 1119 1110 { 1120 1111 struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC); 1112 + MPT_ADAPTER *ioc = hd->ioc; 1121 1113 1122 1114 if (!wqw) { 1123 - shost_printk(MYIOC_s_ERR_FMT, hd->ioc->sh, 1115 + shost_printk(MYIOC_s_ERR_FMT, ioc->sh, 1124 1116 "Failed to act on RAID event for physical disk %d\n", 1125 - hd->ioc->name, disk); 1117 + ioc->name, disk); 1126 1118 return; 1127 1119 } 1128 1120 INIT_WORK(&wqw->work, mpt_work_wrapper); ··· 1226 1216 struct scsi_target *starget; 1227 1217 struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; 1228 1218 u32 nego; 1219 + MPT_ADAPTER *ioc = hd->ioc; 1229 1220 1230 1221 kfree(wqw); 1231 1222 1232 1223 if (hd->spi_pending) { 1233 - shost_for_each_device(sdev, hd->ioc->sh) { 1224 + shost_for_each_device(sdev, ioc->sh) { 1234 1225 if (hd->spi_pending & (1 << sdev->id)) 1235 1226 continue; 1236 1227 starget = scsi_target(sdev); ··· 1242 1231 mptspi_write_spi_device_pg1(starget, &pg1); 1243 1232 } 1244 1233 } else { 1245 - shost_for_each_device(sdev, hd->ioc->sh) 1234 + shost_for_each_device(sdev, ioc->sh) 1246 1235 mptspi_dv_device(hd, sdev); 1247 1236 } 1248 1237 } ··· 1494 1483 1495 1484 /* Some versions of the firmware don't support page 0; without 1496 1485 * that we can't get the parameters */ 1497 - if (hd->ioc->spi_data.sdp0length != 0) 1486 + if (ioc->spi_data.sdp0length != 0) 1498 1487 sh->transportt = mptspi_transport_template; 1499 1488 1500 1489 error = scsi_add_host (sh, &ioc->pcidev->dev);