Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI update from James Bottomley:
"This is a much shorter set of patches that were on the go but didn't
make it in to the early pull request for the merge window. It's
really a set of bug fixes plus some final cleanup work on the new tag
queue API"

* tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
storvsc: ring buffer failures may result in I/O freeze
ipr: set scsi_level correctly for disk arrays
ipr: add support for async scanning to speed up boot
scsi_debug: fix missing "break;" in SDEBUG_UA_CAPACITY_CHANGED case
scsi_debug: take sdebug_host_list_lock when changing capacity
scsi_debug: improve driver description in Kconfig
scsi_debug: fix compare and write errors
qla2xxx: fix race in handling rport deletion during recovery causes panic
scsi: blacklist RSOC for Microsoft iSCSI target devices
scsi: fix random memory corruption with scsi-mq + T10 PI
Revert "[SCSI] mpt3sas: Remove phys on topology change"
Revert "[SCSI] mpt2sas: Remove phys on topology change."
esas2r: Correct typos of "validate" in a comment
fc: FCP_PTA_SIMPLE is 0
ibmvfc: remove unused tag variable
scsi: remove MSG_*_TAG defines
scsi: remove scsi_set_tag_type
scsi: remove scsi_get_tag_type
scsi: never drop to untagged mode during queue ramp down
scsi: remove ->change_queue_type method

-1
drivers/infiniband/ulp/srp/ib_srp.c
··· 2740 2740 .info = srp_target_info, 2741 2741 .queuecommand = srp_queuecommand, 2742 2742 .change_queue_depth = srp_change_queue_depth, 2743 - .change_queue_type = scsi_change_queue_type, 2744 2743 .eh_abort_handler = srp_abort, 2745 2744 .eh_device_reset_handler = srp_reset_device, 2746 2745 .eh_host_reset_handler = srp_reset_host,
+5 -5
drivers/infiniband/ulp/srpt/ib_srpt.c
··· 1708 1708 1709 1709 switch (srp_cmd->task_attr) { 1710 1710 case SRP_CMD_SIMPLE_Q: 1711 - cmd->sam_task_attr = MSG_SIMPLE_TAG; 1711 + cmd->sam_task_attr = TCM_SIMPLE_TAG; 1712 1712 break; 1713 1713 case SRP_CMD_ORDERED_Q: 1714 1714 default: 1715 - cmd->sam_task_attr = MSG_ORDERED_TAG; 1715 + cmd->sam_task_attr = TCM_ORDERED_TAG; 1716 1716 break; 1717 1717 case SRP_CMD_HEAD_OF_Q: 1718 - cmd->sam_task_attr = MSG_HEAD_TAG; 1718 + cmd->sam_task_attr = TCM_HEAD_TAG; 1719 1719 break; 1720 1720 case SRP_CMD_ACA: 1721 - cmd->sam_task_attr = MSG_ACA_TAG; 1721 + cmd->sam_task_attr = TCM_ACA_TAG; 1722 1722 break; 1723 1723 } 1724 1724 ··· 1733 1733 sizeof(srp_cmd->lun)); 1734 1734 rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb, 1735 1735 &send_ioctx->sense_data[0], unpacked_lun, data_len, 1736 - MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF); 1736 + TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF); 1737 1737 if (rc != 0) { 1738 1738 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1739 1739 goto send_sense;
+3 -38
drivers/scsi/53c700.c
··· 176 176 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); 177 177 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); 178 178 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); 179 - static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth); 180 179 181 180 STATIC struct device_attribute *NCR_700_dev_attrs[]; 182 181 ··· 325 326 tpnt->slave_destroy = NCR_700_slave_destroy; 326 327 tpnt->slave_alloc = NCR_700_slave_alloc; 327 328 tpnt->change_queue_depth = NCR_700_change_queue_depth; 328 - tpnt->change_queue_type = NCR_700_change_queue_type; 329 329 tpnt->use_blk_tags = 1; 330 330 331 331 if(tpnt->name == NULL) ··· 902 904 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); 903 905 904 906 SCp->device->tagged_supported = 0; 907 + SCp->device->simple_tags = 0; 905 908 scsi_change_queue_depth(SCp->device, host->cmd_per_lun); 906 - scsi_set_tag_type(SCp->device, 0); 907 909 } else { 908 910 shost_printk(KERN_WARNING, host, 909 911 "(%d:%d) Unexpected REJECT Message %s\n", ··· 1816 1818 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); 1817 1819 } 1818 1820 1819 - if((hostdata->tag_negotiated &(1<<scmd_id(SCp))) 1820 - && scsi_get_tag_type(SCp->device)) { 1821 + if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) && 1822 + SCp->device->simple_tags) { 1821 1823 slot->tag = SCp->request->tag; 1822 1824 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n", 1823 1825 slot->tag, slot); ··· 2078 2080 if (depth > NCR_700_MAX_TAGS) 2079 2081 depth = NCR_700_MAX_TAGS; 2080 2082 return scsi_change_queue_depth(SDp, depth); 2081 - } 2082 - 2083 - static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type) 2084 - { 2085 - int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0) 2086 - || (tag_type != 0 && scsi_get_tag_type(SDp) == 0)); 2087 - struct NCR_700_Host_Parameters *hostdata = 2088 - (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0]; 2089 - 2090 - /* We have a global (per target) flag to track whether TCQ is 2091 - * enabled, so we'll be turning it off for the entire target here. 2092 - * our tag algorithm will fail if we mix tagged and untagged commands, 2093 - * so quiesce the device before doing this */ 2094 - if (change_tag) 2095 - scsi_target_quiesce(SDp->sdev_target); 2096 - 2097 - scsi_set_tag_type(SDp, tag_type); 2098 - if (!tag_type) { 2099 - /* shift back to the default unqueued number of commands 2100 - * (the user can still raise this) */ 2101 - scsi_change_queue_depth(SDp, SDp->host->cmd_per_lun); 2102 - hostdata->tag_negotiated &= ~(1 << sdev_id(SDp)); 2103 - } else { 2104 - /* Here, we cleared the negotiation flag above, so this 2105 - * will force the driver to renegotiate */ 2106 - scsi_change_queue_depth(SDp, SDp->queue_depth); 2107 - if (change_tag) 2108 - NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION); 2109 - } 2110 - if (change_tag) 2111 - scsi_target_resume(SDp->sdev_target); 2112 - 2113 - return tag_type; 2114 2083 } 2115 2084 2116 2085 static ssize_t
+8 -9
drivers/scsi/Kconfig
··· 1462 1462 SCSI controllers (based on WD33C296A chip). 1463 1463 1464 1464 config SCSI_DEBUG 1465 - tristate "SCSI debugging host simulator" 1465 + tristate "SCSI debugging host and device simulator" 1466 1466 depends on SCSI 1467 1467 select CRC_T10DIF 1468 1468 help 1469 - This is a host adapter simulator that can simulate multiple hosts 1470 - each with multiple dummy SCSI devices (disks). It defaults to one 1471 - host adapter with one dummy SCSI disk. Each dummy disk uses kernel 1472 - RAM as storage (i.e. it is a ramdisk). To save space when multiple 1473 - dummy disks are simulated, they share the same kernel RAM for 1474 - their storage. See <http://sg.danny.cz/sg/sdebug26.html> for more 1475 - information. This driver is primarily of use to those testing the 1476 - SCSI and block subsystems. If unsure, say N. 1469 + This pseudo driver simulates one or more hosts (SCSI initiators), 1470 + each with one or more targets, each with one or more logical units. 1471 + Defaults to one of each, creating a small RAM disk device. Many 1472 + parameters found in the /sys/bus/pseudo/drivers/scsi_debug 1473 + directory can be tweaked at run time. 1474 + See <http://sg.danny.cz/sg/sdebug26.html> for more information. 1475 + Mainly used for testing and best as a module. If unsure, say N. 1477 1476 1478 1477 config SCSI_MESH 1479 1478 tristate "MESH (Power Mac internal SCSI) support"
+4 -4
drivers/scsi/advansys.c
··· 7921 7921 */ 7922 7922 if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) && 7923 7923 (boardp->reqcnt[scp->device->id] % 255) == 0) { 7924 - asc_scsi_q->q2.tag_code = MSG_ORDERED_TAG; 7924 + asc_scsi_q->q2.tag_code = ORDERED_QUEUE_TAG; 7925 7925 } else { 7926 - asc_scsi_q->q2.tag_code = MSG_SIMPLE_TAG; 7926 + asc_scsi_q->q2.tag_code = SIMPLE_QUEUE_TAG; 7927 7927 } 7928 7928 7929 7929 /* Build ASC_SCSI_Q */ ··· 8351 8351 } 8352 8352 q_addr = ASC_QNO_TO_QADDR(q_no); 8353 8353 if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) { 8354 - scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG; 8354 + scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG; 8355 8355 } 8356 8356 scsiq->q1.status = QS_FREE; 8357 8357 AscMemWordCopyPtrToLram(iop_base, ··· 8669 8669 } 8670 8670 } 8671 8671 if (disable_syn_offset_one_fix) { 8672 - scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG; 8672 + scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG; 8673 8673 scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX | 8674 8674 ASC_TAG_FLAG_DISABLE_DISCONNECT); 8675 8675 } else {
-1
drivers/scsi/aic94xx/aic94xx_init.c
··· 63 63 .scan_finished = asd_scan_finished, 64 64 .scan_start = asd_scan_start, 65 65 .change_queue_depth = sas_change_queue_depth, 66 - .change_queue_type = sas_change_queue_type, 67 66 .bios_param = sas_bios_param, 68 67 .can_queue = 1, 69 68 .cmd_per_lun = 1,
-1
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 2792 2792 .eh_host_reset_handler = fc_eh_host_reset, 2793 2793 .slave_alloc = fc_slave_alloc, 2794 2794 .change_queue_depth = scsi_change_queue_depth, 2795 - .change_queue_type = scsi_change_queue_type, 2796 2795 .this_id = -1, 2797 2796 .cmd_per_lun = 3, 2798 2797 .use_clustering = ENABLE_CLUSTERING,
+1 -5
drivers/scsi/bnx2fc/bnx2fc_io.c
··· 1737 1737 fcp_cmnd->fc_pri_ta = 0; 1738 1738 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; 1739 1739 fcp_cmnd->fc_flags = io_req->io_req_flags; 1740 - 1741 - if (sc_cmd->flags & SCMD_TAGGED) 1742 - fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 1743 - else 1744 - fcp_cmnd->fc_pri_ta = 0; 1740 + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 1745 1741 } 1746 1742 1747 1743 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+1 -4
drivers/scsi/csiostor/csio_scsi.c
··· 172 172 fcp_cmnd->fc_cmdref = 0; 173 173 174 174 memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); 175 - if (scmnd->flags & SCMD_TAGGED) 176 - fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 177 - else 178 - fcp_cmnd->fc_pri_ta = 0; 175 + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 179 176 fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); 180 177 181 178 if (req->nsge)
+2 -2
drivers/scsi/esas2r/esas2r_flash.c
··· 684 684 * 1) verify the fi_version is correct 685 685 * 2) verify the checksum of the entire image. 686 686 * 3) validate the adap_typ, action and length fields. 687 - * 4) valdiate each component header. check the img_type and 687 + * 4) validate each component header. check the img_type and 688 688 * length fields 689 - * 5) valdiate each component image. validate signatures and 689 + * 5) validate each component image. validate signatures and 690 690 * local checksums 691 691 */ 692 692 static bool verify_fi(struct esas2r_adapter *a,
-1
drivers/scsi/esas2r/esas2r_main.c
··· 255 255 .emulated = 0, 256 256 .proc_name = ESAS2R_DRVR_NAME, 257 257 .change_queue_depth = scsi_change_queue_depth, 258 - .change_queue_type = scsi_change_queue_type, 259 258 .max_sectors = 0xFFFF, 260 259 .use_blk_tags = 1, 261 260 };
-1
drivers/scsi/fcoe/fcoe.c
··· 281 281 .eh_host_reset_handler = fc_eh_host_reset, 282 282 .slave_alloc = fc_slave_alloc, 283 283 .change_queue_depth = scsi_change_queue_depth, 284 - .change_queue_type = scsi_change_queue_type, 285 284 .this_id = -1, 286 285 .cmd_per_lun = 3, 287 286 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
-1
drivers/scsi/fnic/fnic_main.c
··· 111 111 .eh_host_reset_handler = fnic_host_reset, 112 112 .slave_alloc = fnic_slave_alloc, 113 113 .change_queue_depth = scsi_change_queue_depth, 114 - .change_queue_type = scsi_change_queue_type, 115 114 .this_id = -1, 116 115 .cmd_per_lun = 3, 117 116 .can_queue = FNIC_DFLT_IO_REQ,
-2
drivers/scsi/ibmvscsi/ibmvfc.c
··· 1615 1615 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1616 1616 struct ibmvfc_cmd *vfc_cmd; 1617 1617 struct ibmvfc_event *evt; 1618 - u8 tag[2]; 1619 1618 int rc; 1620 1619 1621 1620 if (unlikely((rc = fc_remote_port_chkready(rport))) || ··· 3088 3089 .target_alloc = ibmvfc_target_alloc, 3089 3090 .scan_finished = ibmvfc_scan_finished, 3090 3091 .change_queue_depth = ibmvfc_change_queue_depth, 3091 - .change_queue_type = scsi_change_queue_type, 3092 3092 .cmd_per_lun = 16, 3093 3093 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT, 3094 3094 .this_id = -1,
+45 -71
drivers/scsi/ipr.c
··· 1426 1426 if (res->sdev) { 1427 1427 res->del_from_ml = 1; 1428 1428 res->res_handle = IPR_INVALID_RES_HANDLE; 1429 - if (ioa_cfg->allow_ml_add_del) 1430 - schedule_work(&ioa_cfg->work_q); 1429 + schedule_work(&ioa_cfg->work_q); 1431 1430 } else { 1432 1431 ipr_clear_res_target(res); 1433 1432 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 1434 1433 } 1435 1434 } else if (!res->sdev || res->del_from_ml) { 1436 1435 res->add_to_ml = 1; 1437 - if (ioa_cfg->allow_ml_add_del) 1438 - schedule_work(&ioa_cfg->work_q); 1436 + schedule_work(&ioa_cfg->work_q); 1439 1437 } 1440 1438 1441 1439 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); ··· 3271 3273 restart: 3272 3274 do { 3273 3275 did_work = 0; 3274 - if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || 3275 - !ioa_cfg->allow_ml_add_del) { 3276 + if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { 3276 3277 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3277 3278 return; 3278 3279 } ··· 3308 3311 } 3309 3312 } 3310 3313 3314 + ioa_cfg->scan_done = 1; 3311 3315 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3312 3316 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); 3313 3317 LEAVE; ··· 4344 4346 } 4345 4347 4346 4348 /** 4347 - * ipr_change_queue_type - Change the device's queue type 4348 - * @dsev: scsi device struct 4349 - * @tag_type: type of tags to use 4350 - * 4351 - * Return value: 4352 - * actual queue type set 4353 - **/ 4354 - static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type) 4355 - { 4356 - struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; 4357 - struct ipr_resource_entry *res; 4358 - unsigned long lock_flags = 0; 4359 - 4360 - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4361 - res = (struct ipr_resource_entry *)sdev->hostdata; 4362 - if (res && ipr_is_gscsi(res)) 4363 - tag_type = scsi_change_queue_type(sdev, tag_type); 4364 - else 4365 - tag_type = 0; 4366 - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4367 - return tag_type; 4368 - } 4369 - 4370 - /** 4371 4349 * ipr_show_adapter_handle - Show the adapter's resource handle for this device 4372 4350 * @dev: device struct 4373 4351 * @attr: device attribute structure ··· 4713 4739 sdev->no_uld_attach = 1; 4714 4740 } 4715 4741 if (ipr_is_vset_device(res)) { 4742 + sdev->scsi_level = SCSI_SPC_3; 4716 4743 blk_queue_rq_timeout(sdev->request_queue, 4717 4744 IPR_VSET_RW_TIMEOUT); 4718 4745 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); ··· 5203 5228 5204 5229 /** 5205 5230 * ipr_eh_abort - Abort a single op 5231 + * @scsi_cmd: scsi command struct 5232 + * 5233 + * Return value: 5234 + * 0 if scan in progress / 1 if scan is complete 5235 + **/ 5236 + static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time) 5237 + { 5238 + unsigned long lock_flags; 5239 + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 5240 + int rc = 0; 5241 + 5242 + spin_lock_irqsave(shost->host_lock, lock_flags); 5243 + if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) 5244 + rc = 1; 5245 + if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) 5246 + rc = 1; 5247 + spin_unlock_irqrestore(shost->host_lock, lock_flags); 5248 + return rc; 5249 + } 5250 + 5251 + /** 5252 + * ipr_eh_host_reset - Reset the host adapter 5206 5253 * @scsi_cmd: scsi command struct 5207 5254 * 5208 5255 * Return value: ··· 5776 5779 5777 5780 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 5778 5781 5779 - if (!scsi_get_tag_type(scsi_cmd->device)) { 5782 + if (!scsi_cmd->device->simple_tags) { 5780 5783 ipr_erp_request_sense(ipr_cmd); 5781 5784 return; 5782 5785 } ··· 6296 6299 .slave_alloc = ipr_slave_alloc, 6297 6300 .slave_configure = ipr_slave_configure, 6298 6301 .slave_destroy = ipr_slave_destroy, 6302 + .scan_finished = ipr_scan_finished, 6299 6303 .target_alloc = ipr_target_alloc, 6300 6304 .target_destroy = ipr_target_destroy, 6301 6305 .change_queue_depth = ipr_change_queue_depth, 6302 - .change_queue_type = ipr_change_queue_type, 6303 6306 .bios_param = ipr_biosparam, 6304 6307 .can_queue = IPR_MAX_COMMANDS, 6305 6308 .this_id = -1, ··· 6838 6841 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; 6839 6842 6840 6843 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 6841 - if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) { 6844 + if (res->add_to_ml || res->del_from_ml) { 6842 6845 ipr_trace; 6843 6846 break; 6844 6847 } ··· 6867 6870 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) 6868 6871 scsi_block_requests(ioa_cfg->host); 6869 6872 6873 + schedule_work(&ioa_cfg->work_q); 6870 6874 LEAVE; 6871 6875 return IPR_RC_JOB_RETURN; 6872 6876 } ··· 7607 7609 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); 7608 7610 type[4] = '\0'; 7609 7611 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); 7612 + 7613 + if (ipr_invalid_adapter(ioa_cfg)) { 7614 + dev_err(&ioa_cfg->pdev->dev, 7615 + "Adapter not supported in this hardware configuration.\n"); 7616 + 7617 + if (!ipr_testmode) { 7618 + ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; 7619 + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7620 + list_add_tail(&ipr_cmd->queue, 7621 + &ioa_cfg->hrrq->hrrq_free_q); 7622 + return IPR_RC_JOB_RETURN; 7623 + } 7624 + } 7610 7625 7611 7626 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; 7612 7627 ··· 8808 8797 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, 8809 8798 IPR_SHUTDOWN_NONE); 8810 8799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 8811 - wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 8812 - spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 8813 - 8814 - if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { 8815 - rc = -EIO; 8816 - } else if (ipr_invalid_adapter(ioa_cfg)) { 8817 - if (!ipr_testmode) 8818 - rc = -EIO; 8819 - 8820 - dev_err(&ioa_cfg->pdev->dev, 8821 - "Adapter not supported in this hardware configuration.\n"); 8822 - } 8823 - 8824 - spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 8825 8800 8826 8801 LEAVE; 8827 8802 return rc; ··· 9261 9264 * ioa_cfg->max_devs_supported))); 9262 9265 } 9263 9266 9264 - host->max_channel = IPR_MAX_BUS_TO_SCAN; 9267 + host->max_channel = IPR_VSET_BUS; 9265 9268 host->unique_id = host->host_no; 9266 9269 host->max_cmd_len = IPR_MAX_CDB_LEN; 9267 9270 host->can_queue = ioa_cfg->max_cmds; ··· 9761 9764 } 9762 9765 9763 9766 /** 9764 - * ipr_scan_vsets - Scans for VSET devices 9765 - * @ioa_cfg: ioa config struct 9766 - * 9767 - * Description: Since the VSET resources do not follow SAM in that we can have 9768 - * sparse LUNs with no LUN 0, we have to scan for these ourselves. 9769 - * 9770 - * Return value: 9771 - * none 9772 - **/ 9773 - static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg) 9774 - { 9775 - int target, lun; 9776 - 9777 - for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++) 9778 - for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++) 9779 - scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun); 9780 - } 9781 - 9782 - /** 9783 9767 * ipr_initiate_ioa_bringdown - Bring down an adapter 9784 9768 * @ioa_cfg: ioa config struct 9785 9769 * @shutdown_type: shutdown type ··· 9915 9937 } 9916 9938 9917 9939 scsi_scan_host(ioa_cfg->host); 9918 - ipr_scan_vsets(ioa_cfg); 9919 - scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN); 9920 - ioa_cfg->allow_ml_add_del = 1; 9921 - ioa_cfg->host->max_channel = IPR_VSET_BUS; 9922 9940 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; 9923 9941 9924 9942 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+1 -3
drivers/scsi/ipr.h
··· 157 157 158 158 #define IPR_MAX_NUM_TARGETS_PER_BUS 256 159 159 #define IPR_MAX_NUM_LUNS_PER_TARGET 256 160 - #define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8 161 160 #define IPR_VSET_BUS 0xff 162 161 #define IPR_IOA_BUS 0xff 163 162 #define IPR_IOA_TARGET 0xff 164 163 #define IPR_IOA_LUN 0xff 165 164 #define IPR_MAX_NUM_BUSES 16 166 - #define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES 167 165 168 166 #define IPR_NUM_RESET_RELOAD_RETRIES 3 169 167 ··· 1451 1453 u8 in_ioa_bringdown:1; 1452 1454 u8 ioa_unit_checked:1; 1453 1455 u8 dump_taken:1; 1454 - u8 allow_ml_add_del:1; 1456 + u8 scan_done:1; 1455 1457 u8 needs_hard_reset:1; 1456 1458 u8 dual_raid:1; 1457 1459 u8 needs_warm_reset:1;
-1
drivers/scsi/isci/init.c
··· 158 158 .scan_finished = isci_host_scan_finished, 159 159 .scan_start = isci_host_start, 160 160 .change_queue_depth = sas_change_queue_depth, 161 - .change_queue_type = sas_change_queue_type, 162 161 .bios_param = sas_bios_param, 163 162 .can_queue = ISCI_CAN_QUEUE_VAL, 164 163 .cmd_per_lun = 1,
-8
drivers/scsi/libsas/sas_scsi_host.c
··· 906 906 return scsi_change_queue_depth(sdev, depth); 907 907 } 908 908 909 - int sas_change_queue_type(struct scsi_device *scsi_dev, int type) 910 - { 911 - if (dev_is_sata(sdev_to_domain_dev(scsi_dev))) 912 - return -EINVAL; 913 - return scsi_change_queue_type(scsi_dev, type); 914 - } 915 - 916 909 int sas_bios_param(struct scsi_device *scsi_dev, 917 910 struct block_device *bdev, 918 911 sector_t capacity, int *hsc) ··· 1004 1011 EXPORT_SYMBOL_GPL(sas_target_alloc); 1005 1012 EXPORT_SYMBOL_GPL(sas_slave_configure); 1006 1013 EXPORT_SYMBOL_GPL(sas_change_queue_depth); 1007 - EXPORT_SYMBOL_GPL(sas_change_queue_type); 1008 1014 EXPORT_SYMBOL_GPL(sas_bios_param); 1009 1015 EXPORT_SYMBOL_GPL(sas_task_abort); 1010 1016 EXPORT_SYMBOL_GPL(sas_phy_reset);
-2
drivers/scsi/lpfc/lpfc_scsi.c
··· 5879 5879 .max_sectors = 0xFFFF, 5880 5880 .vendor_id = LPFC_NL_VENDOR_ID, 5881 5881 .change_queue_depth = scsi_change_queue_depth, 5882 - .change_queue_type = scsi_change_queue_type, 5883 5882 .use_blk_tags = 1, 5884 5883 .track_queue_depth = 1, 5885 5884 }; ··· 5903 5904 .shost_attrs = lpfc_vport_attrs, 5904 5905 .max_sectors = 0xFFFF, 5905 5906 .change_queue_depth = scsi_change_queue_depth, 5906 - .change_queue_type = scsi_change_queue_type, 5907 5907 .use_blk_tags = 1, 5908 5908 .track_queue_depth = 1, 5909 5909 };
-1
drivers/scsi/mpt2sas/mpt2sas_scsih.c
··· 7592 7592 .scan_finished = _scsih_scan_finished, 7593 7593 .scan_start = _scsih_scan_start, 7594 7594 .change_queue_depth = _scsih_change_queue_depth, 7595 - .change_queue_type = scsi_change_queue_type, 7596 7595 .eh_abort_handler = _scsih_abort, 7597 7596 .eh_device_reset_handler = _scsih_dev_reset, 7598 7597 .eh_target_reset_handler = _scsih_target_reset,
+1 -4
drivers/scsi/mpt2sas/mpt2sas_transport.c
··· 1006 1006 &mpt2sas_phy->remote_identify); 1007 1007 _transport_add_phy_to_an_existing_port(ioc, sas_node, 1008 1008 mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address); 1009 - } else { 1009 + } else 1010 1010 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct 1011 1011 sas_identify)); 1012 - _transport_del_phy_from_an_existing_port(ioc, sas_node, 1013 - mpt2sas_phy); 1014 - } 1015 1012 1016 1013 if (mpt2sas_phy->phy) 1017 1014 mpt2sas_phy->phy->negotiated_linkrate =
-1
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 7229 7229 .scan_finished = _scsih_scan_finished, 7230 7230 .scan_start = _scsih_scan_start, 7231 7231 .change_queue_depth = _scsih_change_queue_depth, 7232 - .change_queue_type = scsi_change_queue_type, 7233 7232 .eh_abort_handler = _scsih_abort, 7234 7233 .eh_device_reset_handler = _scsih_dev_reset, 7235 7234 .eh_target_reset_handler = _scsih_target_reset,
+1 -4
drivers/scsi/mpt3sas/mpt3sas_transport.c
··· 1003 1003 &mpt3sas_phy->remote_identify); 1004 1004 _transport_add_phy_to_an_existing_port(ioc, sas_node, 1005 1005 mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address); 1006 - } else { 1006 + } else 1007 1007 memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct 1008 1008 sas_identify)); 1009 - _transport_del_phy_from_an_existing_port(ioc, sas_node, 1010 - mpt3sas_phy); 1011 - } 1012 1009 1013 1010 if (mpt3sas_phy->phy) 1014 1011 mpt3sas_phy->phy->negotiated_linkrate =
-1
drivers/scsi/mvsas/mv_init.c
··· 54 54 .scan_finished = mvs_scan_finished, 55 55 .scan_start = mvs_scan_start, 56 56 .change_queue_depth = sas_change_queue_depth, 57 - .change_queue_type = sas_change_queue_type, 58 57 .bios_param = sas_bios_param, 59 58 .can_queue = 1, 60 59 .cmd_per_lun = 1,
-1
drivers/scsi/pm8001/pm8001_init.c
··· 76 76 .scan_finished = pm8001_scan_finished, 77 77 .scan_start = pm8001_scan_start, 78 78 .change_queue_depth = sas_change_queue_depth, 79 - .change_queue_type = sas_change_queue_type, 80 79 .bios_param = sas_bios_param, 81 80 .can_queue = 1, 82 81 .cmd_per_lun = 1,
-1
drivers/scsi/pmcraid.c
··· 4251 4251 .slave_configure = pmcraid_slave_configure, 4252 4252 .slave_destroy = pmcraid_slave_destroy, 4253 4253 .change_queue_depth = pmcraid_change_queue_depth, 4254 - .change_queue_type = scsi_change_queue_type, 4255 4254 .can_queue = PMCRAID_MAX_IO_CMD, 4256 4255 .this_id = -1, 4257 4256 .sg_tablesize = PMCRAID_MAX_IOADLS,
-2
drivers/scsi/qla2xxx/qla_init.c
··· 3237 3237 struct fc_rport *rport; 3238 3238 unsigned long flags; 3239 3239 3240 - qla2x00_rport_del(fcport); 3241 - 3242 3240 rport_ids.node_name = wwn_to_u64(fcport->node_name); 3243 3241 rport_ids.port_name = wwn_to_u64(fcport->port_name); 3244 3242 rport_ids.port_id = fcport->d_id.b.domain << 16 |
-1
drivers/scsi/qla2xxx/qla_os.c
··· 258 258 .scan_finished = qla2xxx_scan_finished, 259 259 .scan_start = qla2xxx_scan_start, 260 260 .change_queue_depth = scsi_change_queue_depth, 261 - .change_queue_type = scsi_change_queue_type, 262 261 .this_id = -1, 263 262 .cmd_per_lun = 3, 264 263 .use_clustering = ENABLE_CLUSTERING,
+6 -6
drivers/scsi/qla2xxx/qla_target.c
··· 3218 3218 3219 3219 switch (task_codes) { 3220 3220 case ATIO_SIMPLE_QUEUE: 3221 - fcp_task_attr = MSG_SIMPLE_TAG; 3221 + fcp_task_attr = TCM_SIMPLE_TAG; 3222 3222 break; 3223 3223 case ATIO_HEAD_OF_QUEUE: 3224 - fcp_task_attr = MSG_HEAD_TAG; 3224 + fcp_task_attr = TCM_HEAD_TAG; 3225 3225 break; 3226 3226 case ATIO_ORDERED_QUEUE: 3227 - fcp_task_attr = MSG_ORDERED_TAG; 3227 + fcp_task_attr = TCM_ORDERED_TAG; 3228 3228 break; 3229 3229 case ATIO_ACA_QUEUE: 3230 - fcp_task_attr = MSG_ACA_TAG; 3230 + fcp_task_attr = TCM_ACA_TAG; 3231 3231 break; 3232 3232 case ATIO_UNTAGGED: 3233 - fcp_task_attr = MSG_SIMPLE_TAG; 3233 + fcp_task_attr = TCM_SIMPLE_TAG; 3234 3234 break; 3235 3235 default: 3236 3236 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 3237 3237 "qla_target: unknown task code %x, use ORDERED instead\n", 3238 3238 task_codes); 3239 - fcp_task_attr = MSG_ORDERED_TAG; 3239 + fcp_task_attr = TCM_ORDERED_TAG; 3240 3240 break; 3241 3241 } 3242 3242
-22
drivers/scsi/scsi.c
··· 739 739 740 740 if (sdev->last_queue_full_count <= 10) 741 741 return 0; 742 - if (sdev->last_queue_full_depth < 8) { 743 - /* Drop back to untagged */ 744 - scsi_set_tag_type(sdev, 0); 745 - scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun); 746 - return -1; 747 - } 748 742 749 743 return scsi_change_queue_depth(sdev, depth); 750 744 } 751 745 EXPORT_SYMBOL(scsi_track_queue_full); 752 - 753 - /** 754 - * scsi_change_queue_type() - Change a device's queue type 755 - * @sdev: The SCSI device whose queue depth is to change 756 - * @tag_type: Identifier for queue type 757 - */ 758 - int scsi_change_queue_type(struct scsi_device *sdev, int tag_type) 759 - { 760 - if (!sdev->tagged_supported) 761 - return 0; 762 - 763 - scsi_set_tag_type(sdev, tag_type); 764 - return tag_type; 765 - 766 - } 767 - EXPORT_SYMBOL(scsi_change_queue_type); 768 746 769 747 /** 770 748 * scsi_vpd_inquiry - Request a device provide us with a VPD page
+19 -43
drivers/scsi/scsi_debug.c
··· 128 128 #define DEF_REMOVABLE false 129 129 #define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */ 130 130 #define DEF_SECTOR_SIZE 512 131 - #define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */ 132 131 #define DEF_UNMAP_ALIGNMENT 0 133 132 #define DEF_UNMAP_GRANULARITY 1 134 133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF ··· 816 817 UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ); 817 818 if (debug) 818 819 cp = "capacity data changed"; 820 + break; 819 821 default: 820 822 pr_warn("%s: unexpected unit attention code=%d\n", 821 823 __func__, k); ··· 3045 3045 u8 num; 3046 3046 unsigned long iflags; 3047 3047 int ret; 3048 + int retval = 0; 3048 3049 3049 - lba = get_unaligned_be32(cmd + 2); 3050 + lba = get_unaligned_be64(cmd + 2); 3050 3051 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */ 3051 3052 if (0 == num) 3052 3053 return 0; /* degenerate case, not an error */ 3053 - dnum = 2 * num; 3054 - arr = kzalloc(dnum * lb_size, GFP_ATOMIC); 3055 - if (NULL == arr) { 3056 - mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, 3057 - INSUFF_RES_ASCQ); 3058 - return check_condition_result; 3059 - } 3060 3054 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && 3061 3055 (cmd[1] & 0xe0)) { 3062 3056 mk_sense_invalid_opcode(scp); ··· 3073 3079 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 3074 3080 return check_condition_result; 3075 3081 } 3082 + dnum = 2 * num; 3083 + arr = kzalloc(dnum * lb_size, GFP_ATOMIC); 3084 + if (NULL == arr) { 3085 + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, 3086 + INSUFF_RES_ASCQ); 3087 + return check_condition_result; 3088 + } 3076 3089 3077 3090 write_lock_irqsave(&atomic_rw, iflags); 3078 3091 ··· 3090 3089 ret = do_device_access(scp, 0, dnum, true); 3091 3090 fake_storep = fake_storep_hold; 3092 3091 if (ret == -1) { 3093 - write_unlock_irqrestore(&atomic_rw, iflags); 3094 - kfree(arr); 3095 - return DID_ERROR << 16; 3092 + retval = DID_ERROR << 16; 3093 + goto cleanup; 3096 3094 } else if ((ret < (dnum * lb_size)) && 3097 3095 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 3098 3096 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " 3099 3097 "indicated=%u, IO sent=%d bytes\n", my_name, 3100 3098 dnum * lb_size, ret); 3101 3099 if (!comp_write_worker(lba, num, arr)) { 3102 - write_unlock_irqrestore(&atomic_rw, iflags); 3103 - kfree(arr); 3104 3100 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0); 3105 - return check_condition_result; 3101 + retval = check_condition_result; 3102 + goto cleanup; 3106 3103 } 3107 3104 if (scsi_debug_lbp()) 3108 3105 map_region(lba, num); 3106 + cleanup: 3109 3107 write_unlock_irqrestore(&atomic_rw, iflags); 3110 - return 0; 3108 + kfree(arr); 3109 + return retval; 3111 3110 } 3112 3111 3113 3112 struct unmap_block_desc { ··· 4439 4438 struct sdebug_host_info *sdhp; 4440 4439 struct sdebug_dev_info *dp; 4441 4440 4441 + spin_lock(&sdebug_host_list_lock); 4442 4442 list_for_each_entry(sdhp, &sdebug_host_list, 4443 4443 host_list) { 4444 4444 list_for_each_entry(dp, &sdhp->dev_info_list, ··· 4448 4446 dp->uas_bm); 4449 4447 } 4450 4448 } 4449 + spin_unlock(&sdebug_host_list_lock); 4451 4450 } 4452 4451 return count; 4453 4452 } ··· 4991 4988 } 4992 4989 4993 4990 static int 4994 - sdebug_change_qtype(struct scsi_device *sdev, int qtype) 4995 - { 4996 - qtype = scsi_change_queue_type(sdev, qtype); 4997 - if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) { 4998 - const char *cp; 4999 - 5000 - switch (qtype) { 5001 - case 0: 5002 - cp = "untagged"; 5003 - break; 5004 - case MSG_SIMPLE_TAG: 5005 - cp = "simple tags"; 5006 - break; 5007 - case MSG_ORDERED_TAG: 5008 - cp = "ordered tags"; 5009 - break; 5010 - default: 5011 - cp = "unknown"; 5012 - break; 5013 - } 5014 - sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp); 5015 - } 5016 - return qtype; 5017 - } 5018 - 5019 - static int 5020 4991 check_inject(struct scsi_cmnd *scp) 5021 4992 { 5022 4993 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); ··· 5189 5212 .ioctl = scsi_debug_ioctl, 5190 5213 .queuecommand = sdebug_queuecommand_lock_or_not, 5191 5214 .change_queue_depth = sdebug_change_qdepth, 5192 - .change_queue_type = sdebug_change_qtype, 5193 5215 .eh_abort_handler = scsi_debug_abort, 5194 5216 .eh_device_reset_handler = scsi_debug_device_reset, 5195 5217 .eh_target_reset_handler = scsi_debug_target_reset,
+1
drivers/scsi/scsi_devinfo.c
··· 211 211 {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN}, 212 212 {"MegaRAID", "LD", NULL, BLIST_FORCELUN}, 213 213 {"MICROP", "4110", NULL, BLIST_NOTQ}, 214 + {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC}, 214 215 {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2}, 215 216 {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN}, 216 217 {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+3 -1
drivers/scsi/scsi_lib.c
··· 1918 1918 1919 1919 if (scsi_host_get_prot(shost)) { 1920 1920 cmd->prot_sdb = (void *)sg + 1921 - shost->sg_tablesize * sizeof(struct scatterlist); 1921 + min_t(unsigned int, 1922 + shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) * 1923 + sizeof(struct scatterlist); 1922 1924 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); 1923 1925 1924 1926 cmd->prot_sdb->table.sgl =
+4 -26
drivers/scsi/scsi_sysfs.c
··· 738 738 const char *buf, size_t count) 739 739 { 740 740 struct scsi_device *sdev = to_scsi_device(dev); 741 - struct scsi_host_template *sht = sdev->host->hostt; 742 - int tag_type = 0, retval; 743 - int prev_tag_type = scsi_get_tag_type(sdev); 744 741 745 - if (!sdev->tagged_supported || !sht->change_queue_type) 742 + if (!sdev->tagged_supported) 746 743 return -EINVAL; 747 - 748 - /* 749 - * We're never issueing order tags these days, but allow the value 750 - * for backwards compatibility. 751 - */ 752 - if (strncmp(buf, "ordered", 7) == 0 || 753 - strncmp(buf, "simple", 6) == 0) 754 - tag_type = MSG_SIMPLE_TAG; 755 - else if (strncmp(buf, "none", 4) != 0) 756 - return -EINVAL; 757 - 758 - if (tag_type == prev_tag_type) 759 - return count; 760 - 761 - retval = sht->change_queue_type(sdev, tag_type); 762 - if (retval < 0) 763 - return retval; 764 - 744 + 745 + sdev_printk(KERN_INFO, sdev, 746 + "ignoring write to deprecated queue_type attribute"); 765 747 return count; 766 748 } 767 749 ··· 919 937 if (attr == &dev_attr_queue_ramp_up_period.attr && 920 938 !sdev->host->hostt->change_queue_depth) 921 939 return 0; 922 - 923 - if (attr == &dev_attr_queue_type.attr && 924 - !sdev->host->hostt->change_queue_type) 925 - return S_IRUGO; 926 940 927 941 return attr->mode; 928 942 }
+1 -1
drivers/scsi/scsi_transport_spi.c
··· 1221 1221 int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd) 1222 1222 { 1223 1223 if (cmd->flags & SCMD_TAGGED) { 1224 - *msg++ = MSG_SIMPLE_TAG; 1224 + *msg++ = SIMPLE_QUEUE_TAG; 1225 1225 *msg++ = cmd->request->tag; 1226 1226 return 2; 1227 1227 }
+3 -4
drivers/scsi/storvsc_drv.c
··· 1688 1688 if (ret == -EAGAIN) { 1689 1689 /* no more space */ 1690 1690 1691 - if (cmd_request->bounce_sgl_count) { 1691 + if (cmd_request->bounce_sgl_count) 1692 1692 destroy_bounce_buffer(cmd_request->bounce_sgl, 1693 1693 cmd_request->bounce_sgl_count); 1694 1694 1695 - ret = SCSI_MLQUEUE_DEVICE_BUSY; 1696 - goto queue_error; 1697 - } 1695 + ret = SCSI_MLQUEUE_DEVICE_BUSY; 1696 + goto queue_error; 1698 1697 } 1699 1698 1700 1699 return 0;
+7 -7
drivers/target/iscsi/iscsi_target.c
··· 944 944 */ 945 945 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || 946 946 (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) 947 - sam_task_attr = MSG_SIMPLE_TAG; 947 + sam_task_attr = TCM_SIMPLE_TAG; 948 948 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) 949 - sam_task_attr = MSG_ORDERED_TAG; 949 + sam_task_attr = TCM_ORDERED_TAG; 950 950 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) 951 - sam_task_attr = MSG_HEAD_TAG; 951 + sam_task_attr = TCM_HEAD_TAG; 952 952 else if (iscsi_task_attr == ISCSI_ATTR_ACA) 953 - sam_task_attr = MSG_ACA_TAG; 953 + sam_task_attr = TCM_ACA_TAG; 954 954 else { 955 955 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" 956 - " MSG_SIMPLE_TAG\n", iscsi_task_attr); 957 - sam_task_attr = MSG_SIMPLE_TAG; 956 + " TCM_SIMPLE_TAG\n", iscsi_task_attr); 957 + sam_task_attr = TCM_SIMPLE_TAG; 958 958 } 959 959 960 960 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; ··· 1812 1812 transport_init_se_cmd(&cmd->se_cmd, 1813 1813 &lio_target_fabric_configfs->tf_ops, 1814 1814 conn->sess->se_sess, 0, DMA_NONE, 1815 - MSG_SIMPLE_TAG, cmd->sense_buffer + 2); 1815 + TCM_SIMPLE_TAG, cmd->sense_buffer + 2); 1816 1816 1817 1817 target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true); 1818 1818 sess_ref = true;
+2 -3
drivers/target/loopback/tcm_loop.c
··· 168 168 169 169 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 170 170 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 171 - transfer_length, MSG_SIMPLE_TAG, 171 + transfer_length, TCM_SIMPLE_TAG, 172 172 sc->sc_data_direction, 0, 173 173 scsi_sglist(sc), scsi_sg_count(sc), 174 174 sgl_bidi, sgl_bidi_count, ··· 258 258 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 259 259 */ 260 260 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, 261 - DMA_NONE, MSG_SIMPLE_TAG, 261 + DMA_NONE, TCM_SIMPLE_TAG, 262 262 &tl_cmd->tl_sense_buf[0]); 263 263 264 264 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL); ··· 369 369 .name = "TCM_Loopback", 370 370 .queuecommand = tcm_loop_queuecommand, 371 371 .change_queue_depth = scsi_change_queue_depth, 372 - .change_queue_type = scsi_change_queue_type, 373 372 .eh_abort_handler = tcm_loop_abort_task, 374 373 .eh_device_reset_handler = tcm_loop_device_reset, 375 374 .eh_target_reset_handler = tcm_loop_target_reset,
+1 -1
drivers/target/sbp/sbp_target.c
··· 1237 1237 1238 1238 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, 1239 1239 req->sense_buf, unpacked_lun, data_length, 1240 - MSG_SIMPLE_TAG, data_dir, 0)) 1240 + TCM_SIMPLE_TAG, data_dir, 0)) 1241 1241 goto err; 1242 1242 1243 1243 return;
+1 -1
drivers/target/target_core_pscsi.c
··· 1095 1095 req->retries = PS_RETRY; 1096 1096 1097 1097 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, 1098 - (cmd->sam_task_attr == MSG_HEAD_TAG), 1098 + (cmd->sam_task_attr == TCM_HEAD_TAG), 1099 1099 pscsi_req_done); 1100 1100 1101 1101 return 0;
+1 -1
drivers/target/target_core_sbc.c
··· 485 485 cmd->t_data_nents_orig = cmd->t_data_nents; 486 486 cmd->t_data_nents = 1; 487 487 488 - cmd->sam_task_attr = MSG_HEAD_TAG; 488 + cmd->sam_task_attr = TCM_HEAD_TAG; 489 489 cmd->transport_complete_callback = compare_and_write_post; 490 490 /* 491 491 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
+2 -2
drivers/target/target_core_spc.c
··· 1357 1357 * Do implicit HEAD_OF_QUEUE processing for INQUIRY. 1358 1358 * See spc4r17 section 5.3 1359 1359 */ 1360 - cmd->sam_task_attr = MSG_HEAD_TAG; 1360 + cmd->sam_task_attr = TCM_HEAD_TAG; 1361 1361 cmd->execute_cmd = spc_emulate_inquiry; 1362 1362 break; 1363 1363 case SECURITY_PROTOCOL_IN: ··· 1391 1391 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS 1392 1392 * See spc4r17 section 5.3 1393 1393 */ 1394 - cmd->sam_task_attr = MSG_HEAD_TAG; 1394 + cmd->sam_task_attr = TCM_HEAD_TAG; 1395 1395 break; 1396 1396 case TEST_UNIT_READY: 1397 1397 cmd->execute_cmd = spc_emulate_testunitready;
+8 -8
drivers/target/target_core_transport.c
··· 1159 1159 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1160 1160 return 0; 1161 1161 1162 - if (cmd->sam_task_attr == MSG_ACA_TAG) { 1162 + if (cmd->sam_task_attr == TCM_ACA_TAG) { 1163 1163 pr_debug("SAM Task Attribute ACA" 1164 1164 " emulation is not supported\n"); 1165 1165 return TCM_INVALID_CDB_FIELD; ··· 1531 1531 BUG_ON(!se_tpg); 1532 1532 1533 1533 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1534 - 0, DMA_NONE, MSG_SIMPLE_TAG, sense); 1534 + 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1535 1535 /* 1536 1536 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1537 1537 * allocation failure. ··· 1718 1718 * to allow the passed struct se_cmd list of tasks to the front of the list. 1719 1719 */ 1720 1720 switch (cmd->sam_task_attr) { 1721 - case MSG_HEAD_TAG: 1721 + case TCM_HEAD_TAG: 1722 1722 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " 1723 1723 "se_ordered_id: %u\n", 1724 1724 cmd->t_task_cdb[0], cmd->se_ordered_id); 1725 1725 return false; 1726 - case MSG_ORDERED_TAG: 1726 + case TCM_ORDERED_TAG: 1727 1727 atomic_inc_mb(&dev->dev_ordered_sync); 1728 1728 1729 1729 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " ··· 1828 1828 1829 1829 __target_execute_cmd(cmd); 1830 1830 1831 - if (cmd->sam_task_attr == MSG_ORDERED_TAG) 1831 + if (cmd->sam_task_attr == TCM_ORDERED_TAG) 1832 1832 break; 1833 1833 } 1834 1834 } ··· 1844 1844 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1845 1845 return; 1846 1846 1847 - if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1847 + if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 1848 1848 atomic_dec_mb(&dev->simple_cmds); 1849 1849 dev->dev_cur_ordered_id++; 1850 1850 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1851 1851 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1852 1852 cmd->se_ordered_id); 1853 - } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1853 + } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 1854 1854 dev->dev_cur_ordered_id++; 1855 1855 pr_debug("Incremented dev_cur_ordered_id: %u for" 1856 1856 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1857 1857 cmd->se_ordered_id); 1858 - } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1858 + } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 1859 1859 atomic_dec_mb(&dev->dev_ordered_sync); 1860 1860 1861 1861 dev->dev_cur_ordered_id++;
+4 -4
drivers/target/tcm_fc/tfc_cmd.c
··· 554 554 */ 555 555 switch (fcp->fc_pri_ta & FCP_PTA_MASK) { 556 556 case FCP_PTA_HEADQ: 557 - task_attr = MSG_HEAD_TAG; 557 + task_attr = TCM_HEAD_TAG; 558 558 break; 559 559 case FCP_PTA_ORDERED: 560 - task_attr = MSG_ORDERED_TAG; 560 + task_attr = TCM_ORDERED_TAG; 561 561 break; 562 562 case FCP_PTA_ACA: 563 - task_attr = MSG_ACA_TAG; 563 + task_attr = TCM_ACA_TAG; 564 564 break; 565 565 case FCP_PTA_SIMPLE: /* Fallthrough */ 566 566 default: 567 - task_attr = MSG_SIMPLE_TAG; 567 + task_attr = TCM_SIMPLE_TAG; 568 568 } 569 569 570 570 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+5 -5
drivers/usb/gadget/legacy/tcm_usb_gadget.c
··· 1131 1131 1132 1132 switch (cmd_iu->prio_attr & 0x7) { 1133 1133 case UAS_HEAD_TAG: 1134 - cmd->prio_attr = MSG_HEAD_TAG; 1134 + cmd->prio_attr = TCM_HEAD_TAG; 1135 1135 break; 1136 1136 case UAS_ORDERED_TAG: 1137 - cmd->prio_attr = MSG_ORDERED_TAG; 1137 + cmd->prio_attr = TCM_ORDERED_TAG; 1138 1138 break; 1139 1139 case UAS_ACA: 1140 - cmd->prio_attr = MSG_ACA_TAG; 1140 + cmd->prio_attr = TCM_ACA_TAG; 1141 1141 break; 1142 1142 default: 1143 1143 pr_debug_once("Unsupported prio_attr: %02x.\n", 1144 1144 cmd_iu->prio_attr); 1145 1145 case UAS_SIMPLE_TAG: 1146 - cmd->prio_attr = MSG_SIMPLE_TAG; 1146 + cmd->prio_attr = TCM_SIMPLE_TAG; 1147 1147 break; 1148 1148 } 1149 1149 ··· 1240 1240 goto err; 1241 1241 } 1242 1242 1243 - cmd->prio_attr = MSG_SIMPLE_TAG; 1243 + cmd->prio_attr = TCM_SIMPLE_TAG; 1244 1244 se_cmd = &cmd->se_cmd; 1245 1245 cmd->unpacked_lun = cbw->Lun; 1246 1246 cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
+1 -1
drivers/xen/xen-scsiback.c
··· 606 606 init_waitqueue_head(&tmr->tmr_wait); 607 607 608 608 transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo, 609 - tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, MSG_SIMPLE_TAG, 609 + tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG, 610 610 &pending_req->sense_buffer[0]); 611 611 612 612 rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL);
-1
include/scsi/libsas.h
··· 688 688 extern int sas_target_alloc(struct scsi_target *); 689 689 extern int sas_slave_configure(struct scsi_device *); 690 690 extern int sas_change_queue_depth(struct scsi_device *, int new_depth); 691 - extern int sas_change_queue_type(struct scsi_device *, int qt); 692 691 extern int sas_bios_param(struct scsi_device *, 693 692 struct block_device *, 694 693 sector_t capacity, int *hsc);
-13
include/scsi/scsi_host.h
··· 278 278 int (* change_queue_depth)(struct scsi_device *, int); 279 279 280 280 /* 281 - * Fill in this function to allow the changing of tag types 282 - * (this also allows the enabling/disabling of tag command 283 - * queueing). An error should only be returned if something 284 - * went wrong in the driver while trying to set the tag type. 285 - * If the driver doesn't support the requested tag type, then 286 - * it should set the closest type it does support without 287 - * returning an error. Returns the actual tag type set. 288 - * 289 - * Status: OPTIONAL 290 - */ 291 - int (* change_queue_type)(struct scsi_device *, int); 292 - 293 - /* 294 281 * This function determines the BIOS parameters for a given 295 282 * harddisk. These tend to be numbers that are made up by 296 283 * the host adapter. Parameters:
-36
include/scsi/scsi_tcq.h
··· 6 6 #include <scsi/scsi_device.h> 7 7 #include <scsi/scsi_host.h> 8 8 9 - #define MSG_SIMPLE_TAG 0x20 10 - #define MSG_HEAD_TAG 0x21 11 - #define MSG_ORDERED_TAG 0x22 12 - #define MSG_ACA_TAG 0x24 /* unsupported */ 13 - 14 9 #define SCSI_NO_TAG (-1) /* identify no tag in use */ 15 10 16 11 17 12 #ifdef CONFIG_BLOCK 18 - 19 - int scsi_change_queue_type(struct scsi_device *sdev, int tag_type); 20 - 21 - /** 22 - * scsi_get_tag_type - get the type of tag the device supports 23 - * @sdev: the scsi device 24 - */ 25 - static inline int scsi_get_tag_type(struct scsi_device *sdev) 26 - { 27 - if (!sdev->tagged_supported) 28 - return 0; 29 - if (sdev->simple_tags) 30 - return MSG_SIMPLE_TAG; 31 - return 0; 32 - } 33 - 34 - static inline void scsi_set_tag_type(struct scsi_device *sdev, int tag) 35 - { 36 - switch (tag) { 37 - case MSG_ORDERED_TAG: 38 - case MSG_SIMPLE_TAG: 39 - sdev->simple_tags = 1; 40 - break; 41 - case 0: 42 - /* fall through */ 43 - default: 44 - sdev->simple_tags = 0; 45 - break; 46 - } 47 - } 48 - 49 13 static inline struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost, 50 14 int unique_tag) 51 15 {
+6
include/target/target_core_base.h
··· 476 476 __be32 ref_tag; 477 477 }; 478 478 479 + /* for sam_task_attr */ 480 + #define TCM_SIMPLE_TAG 0x20 481 + #define TCM_HEAD_TAG 0x21 482 + #define TCM_ORDERED_TAG 0x22 483 + #define TCM_ACA_TAG 0x24 484 + 479 485 struct se_cmd { 480 486 /* SAM response code being sent to initiator */ 481 487 u8 scsi_status;
+4 -4
include/trace/events/target.h
··· 109 109 110 110 #define show_task_attribute_name(val) \ 111 111 __print_symbolic(val, \ 112 - { MSG_SIMPLE_TAG, "SIMPLE" }, \ 113 - { MSG_HEAD_TAG, "HEAD" }, \ 114 - { MSG_ORDERED_TAG, "ORDERED" }, \ 115 - { MSG_ACA_TAG, "ACA" } ) 112 + { TCM_SIMPLE_TAG, "SIMPLE" }, \ 113 + { TCM_HEAD_TAG, "HEAD" }, \ 114 + { TCM_ORDERED_TAG, "ORDERED" }, \ 115 + { TCM_ACA_TAG, "ACA" } ) 116 116 117 117 #define show_scsi_status_name(val) \ 118 118 __print_symbolic(val, \