Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge patch series "scsi: target: Allow userspace to config cmd submission"

Mike Christie <michael.christie@oracle.com> says:

The following patches were made over Linus's tree but apply over
Martin's branches. They allow userspace to configure how fabric
drivers submit cmds to backend drivers.

Right now loop and vhost use a worker thread, and the other drivers
submit from the contexts they receive/process the cmd from. For
multiple LUN cases where the target can queue more cmds than the
backend can handle then deferring to a worker thread is safest because
the backend driver can block when doing things like waiting for a free
request/tag. Deferring also helps when the target has to handle
transport level requests from the recv context.

For cases where the backend devices can queue everything the target
sends, then there is no need to defer to a workqueue and you can see a
perf boost of up to 26% for small IO workloads. For a nvme device and
vhost-scsi I can see with 4K IOs:

fio jobs 1 2 4 8 10
--------------------------------------------------
workqueue
submit 94K 190K 394K 770K 890K

direct
submit 128K 252K 488K 950K -

Link: https://lore.kernel.org/r/1b1f7a5c-0988-45f9-b103-dfed2c0405b1@oracle.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

+170 -76
+3
drivers/infiniband/ulp/srpt/ib_srpt.c
··· 3867 3867 .tfc_discovery_attrs = srpt_da_attrs, 3868 3868 .tfc_wwn_attrs = srpt_wwn_attrs, 3869 3869 .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs, 3870 + 3871 + .default_submit_type = TARGET_DIRECT_SUBMIT, 3872 + .direct_submit_supp = 1, 3870 3873 }; 3871 3874 3872 3875 /**
+5
drivers/scsi/elx/efct/efct_lio.c
··· 1611 1611 .sess_get_initiator_sid = NULL, 1612 1612 .tfc_tpg_base_attrs = efct_lio_tpg_attrs, 1613 1613 .tfc_tpg_attrib_attrs = efct_lio_tpg_attrib_attrs, 1614 + .default_submit_type = TARGET_DIRECT_SUBMIT, 1615 + .direct_submit_supp = 1, 1614 1616 }; 1615 1617 1616 1618 static const struct target_core_fabric_ops efct_lio_npiv_ops = { ··· 1648 1646 .sess_get_initiator_sid = NULL, 1649 1647 .tfc_tpg_base_attrs = efct_lio_npiv_tpg_attrs, 1650 1648 .tfc_tpg_attrib_attrs = efct_lio_npiv_tpg_attrib_attrs, 1649 + 1650 + .default_submit_type = TARGET_DIRECT_SUBMIT, 1651 + .direct_submit_supp = 1, 1651 1652 }; 1652 1653 1653 1654 int efct_scsi_tgt_driver_init(void)
+3
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
··· 3975 3975 .fabric_drop_tpg = ibmvscsis_drop_tpg, 3976 3976 3977 3977 .tfc_wwn_attrs = ibmvscsis_wwn_attrs, 3978 + 3979 + .default_submit_type = TARGET_DIRECT_SUBMIT, 3980 + .direct_submit_supp = 1, 3978 3981 }; 3979 3982 3980 3983 static void ibmvscsis_dev_release(struct device *dev) {};
+6
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 1822 1822 .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, 1823 1823 .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs, 1824 1824 .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs, 1825 + 1826 + .default_submit_type = TARGET_DIRECT_SUBMIT, 1827 + .direct_submit_supp = 1, 1825 1828 }; 1826 1829 1827 1830 static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { ··· 1862 1859 .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, 1863 1860 1864 1861 .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, 1862 + 1863 + .default_submit_type = TARGET_DIRECT_SUBMIT, 1864 + .direct_submit_supp = 1, 1865 1865 }; 1866 1866 1867 1867 static int tcm_qla2xxx_register_configfs(void)
-6
drivers/target/iscsi/iscsi_target.c
··· 1234 1234 spin_lock_bh(&conn->cmd_lock); 1235 1235 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1236 1236 spin_unlock_bh(&conn->cmd_lock); 1237 - /* 1238 - * Check if we need to delay processing because of ALUA 1239 - * Active/NonOptimized primary access state.. 1240 - */ 1241 - core_alua_check_nonop_delay(&cmd->se_cmd); 1242 - 1243 1237 return 0; 1244 1238 } 1245 1239 EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
+4 -1
drivers/target/iscsi/iscsi_target_configfs.c
··· 1589 1589 .tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs, 1590 1590 .tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs, 1591 1591 1592 - .write_pending_must_be_called = true, 1592 + .write_pending_must_be_called = 1, 1593 + 1594 + .default_submit_type = TARGET_DIRECT_SUBMIT, 1595 + .direct_submit_supp = 1, 1593 1596 };
+1 -1
drivers/target/iscsi/iscsi_target_erl1.c
··· 948 948 949 949 iscsit_set_unsolicited_dataout(cmd); 950 950 } 951 - return transport_handle_cdb_direct(&cmd->se_cmd); 951 + return target_submit(&cmd->se_cmd); 952 952 953 953 case ISCSI_OP_NOOP_OUT: 954 954 case ISCSI_OP_TEXT:
+1 -1
drivers/target/iscsi/iscsi_target_tmr.c
··· 318 318 pr_debug("READ ITT: 0x%08x: t_state: %d never sent to" 319 319 " transport\n", cmd->init_task_tag, 320 320 cmd->se_cmd.t_state); 321 - transport_handle_cdb_direct(se_cmd); 321 + target_submit(se_cmd); 322 322 return 0; 323 323 } 324 324
+3 -1
drivers/target/loopback/tcm_loop.c
··· 154 154 GFP_ATOMIC)) 155 155 return; 156 156 157 - target_queue_submission(se_cmd); 157 + target_submit(se_cmd); 158 158 return; 159 159 160 160 out_done: ··· 1102 1102 .tfc_wwn_attrs = tcm_loop_wwn_attrs, 1103 1103 .tfc_tpg_base_attrs = tcm_loop_tpg_attrs, 1104 1104 .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs, 1105 + .default_submit_type = TARGET_QUEUE_SUBMIT, 1106 + .direct_submit_supp = 0, 1105 1107 }; 1106 1108 1107 1109 static int __init tcm_loop_fabric_init(void)
+3
drivers/target/sbp/sbp_target.c
··· 2278 2278 .tfc_wwn_attrs = sbp_wwn_attrs, 2279 2279 .tfc_tpg_base_attrs = sbp_tpg_base_attrs, 2280 2280 .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs, 2281 + 2282 + .default_submit_type = TARGET_DIRECT_SUBMIT, 2283 + .direct_submit_supp = 1, 2281 2284 }; 2282 2285 2283 2286 static int __init sbp_init(void)
-1
drivers/target/target_core_alua.c
··· 850 850 msleep_interruptible(cmd->alua_nonop_delay); 851 851 return 0; 852 852 } 853 - EXPORT_SYMBOL(core_alua_check_nonop_delay); 854 853 855 854 static int core_alua_write_tpg_metadata( 856 855 const char *path,
+22
drivers/target/target_core_configfs.c
··· 577 577 DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data); 578 578 DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len); 579 579 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc); 580 + DEF_CONFIGFS_ATTRIB_SHOW(submit_type); 580 581 581 582 #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \ 582 583 static ssize_t _name##_store(struct config_item *item, const char *page,\ ··· 1232 1231 return count; 1233 1232 } 1234 1233 1234 + static ssize_t submit_type_store(struct config_item *item, const char *page, 1235 + size_t count) 1236 + { 1237 + struct se_dev_attrib *da = to_attrib(item); 1238 + int ret; 1239 + u8 val; 1240 + 1241 + ret = kstrtou8(page, 0, &val); 1242 + if (ret < 0) 1243 + return ret; 1244 + 1245 + if (val > TARGET_QUEUE_SUBMIT) 1246 + return -EINVAL; 1247 + 1248 + da->submit_type = val; 1249 + return count; 1250 + } 1251 + 1235 1252 CONFIGFS_ATTR(, emulate_model_alias); 1236 1253 CONFIGFS_ATTR(, emulate_dpo); 1237 1254 CONFIGFS_ATTR(, emulate_fua_write); ··· 1285 1266 CONFIGFS_ATTR(, max_write_same_len); 1286 1267 CONFIGFS_ATTR(, alua_support); 1287 1268 CONFIGFS_ATTR(, pgr_support); 1269 + CONFIGFS_ATTR(, submit_type); 1288 1270 1289 1271 /* 1290 1272 * dev_attrib attributes for devices using the target core SBC/SPC ··· 1328 1308 &attr_alua_support, 1329 1309 &attr_pgr_support, 1330 1310 &attr_emulate_rsoc, 1311 + &attr_submit_type, 1331 1312 NULL, 1332 1313 }; 1333 1314 EXPORT_SYMBOL(sbc_attrib_attrs); ··· 1346 1325 &attr_emulate_pr, 1347 1326 &attr_alua_support, 1348 1327 &attr_pgr_support, 1328 + &attr_submit_type, 1349 1329 NULL, 1350 1330 }; 1351 1331 EXPORT_SYMBOL(passthrough_attrib_attrs);
+1
drivers/target/target_core_device.c
··· 779 779 dev->dev_attrib.unmap_zeroes_data = 780 780 DA_UNMAP_ZEROES_DATA_DEFAULT; 781 781 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 782 + dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT; 782 783 783 784 xcopy_lun = &dev->xcopy_lun; 784 785 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
+24
drivers/target/target_core_fabric_configfs.c
··· 1065 1065 } 1066 1066 CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity); 1067 1067 1068 + static ssize_t 1069 + target_fabric_wwn_default_submit_type_show(struct config_item *item, 1070 + char *page) 1071 + { 1072 + struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn, 1073 + param_group); 1074 + return sysfs_emit(page, "%u\n", 1075 + wwn->wwn_tf->tf_ops->default_submit_type); 1076 + } 1077 + CONFIGFS_ATTR_RO(target_fabric_wwn_, default_submit_type); 1078 + 1079 + static ssize_t 1080 + target_fabric_wwn_direct_submit_supported_show(struct config_item *item, 1081 + char *page) 1082 + { 1083 + struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn, 1084 + param_group); 1085 + return sysfs_emit(page, "%u\n", 1086 + wwn->wwn_tf->tf_ops->direct_submit_supp); 1087 + } 1088 + CONFIGFS_ATTR_RO(target_fabric_wwn_, direct_submit_supported); 1089 + 1068 1090 static struct configfs_attribute *target_fabric_wwn_param_attrs[] = { 1069 1091 &target_fabric_wwn_attr_cmd_completion_affinity, 1092 + &target_fabric_wwn_attr_default_submit_type, 1093 + &target_fabric_wwn_attr_direct_submit_supported, 1070 1094 NULL, 1071 1095 }; 1072 1096
+59 -57
drivers/target/target_core_transport.c
··· 1576 1576 } 1577 1577 EXPORT_SYMBOL(target_cmd_parse_cdb); 1578 1578 1579 - /* 1580 - * Used by fabric module frontends to queue tasks directly. 1581 - * May only be used from process context. 1582 - */ 1583 - int transport_handle_cdb_direct( 1584 - struct se_cmd *cmd) 1579 + static int __target_submit(struct se_cmd *cmd) 1585 1580 { 1586 1581 sense_reason_t ret; 1587 1582 1588 1583 might_sleep(); 1584 + 1585 + /* 1586 + * Check if we need to delay processing because of ALUA 1587 + * Active/NonOptimized primary access state.. 1588 + */ 1589 + core_alua_check_nonop_delay(cmd); 1590 + 1591 + if (cmd->t_data_nents != 0) { 1592 + /* 1593 + * This is primarily a hack for udev and tcm loop which sends 1594 + * INQUIRYs with a single page and expects the data to be 1595 + * cleared. 1596 + */ 1597 + if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1598 + cmd->data_direction == DMA_FROM_DEVICE) { 1599 + struct scatterlist *sgl = cmd->t_data_sg; 1600 + unsigned char *buf = NULL; 1601 + 1602 + BUG_ON(!sgl); 1603 + 1604 + buf = kmap_local_page(sg_page(sgl)); 1605 + if (buf) { 1606 + memset(buf + sgl->offset, 0, sgl->length); 1607 + kunmap_local(buf); 1608 + } 1609 + } 1610 + } 1589 1611 1590 1612 if (!cmd->se_lun) { 1591 1613 dump_stack(); ··· 1636 1614 transport_generic_request_failure(cmd, ret); 1637 1615 return 0; 1638 1616 } 1639 - EXPORT_SYMBOL(transport_handle_cdb_direct); 1640 1617 1641 1618 sense_reason_t 1642 1619 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, ··· 1803 1782 EXPORT_SYMBOL_GPL(target_submit_prep); 1804 1783 1805 1784 /** 1806 - * target_submit - perform final initialization and submit cmd to LIO core 1807 - * @se_cmd: command descriptor to submit 1808 - * 1809 - * target_submit_prep must have been called on the cmd, and this must be 1810 - * called from process context. 1811 - */ 1812 - void target_submit(struct se_cmd *se_cmd) 1813 - { 1814 - struct scatterlist *sgl = se_cmd->t_data_sg; 1815 - unsigned char *buf = NULL; 1816 - 1817 - might_sleep(); 1818 - 1819 - if (se_cmd->t_data_nents != 0) { 1820 - BUG_ON(!sgl); 1821 - /* 1822 - * A work-around for tcm_loop as some userspace code via 1823 - * scsi-generic do not memset their associated read buffers, 1824 - * so go ahead and do that here for type non-data CDBs. Also 1825 - * note that this is currently guaranteed to be a single SGL 1826 - * for this case by target core in target_setup_cmd_from_cdb() 1827 - * -> transport_generic_cmd_sequencer(). 1828 - */ 1829 - if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1830 - se_cmd->data_direction == DMA_FROM_DEVICE) { 1831 - if (sgl) 1832 - buf = kmap(sg_page(sgl)) + sgl->offset; 1833 - 1834 - if (buf) { 1835 - memset(buf, 0, sgl->length); 1836 - kunmap(sg_page(sgl)); 1837 - } 1838 - } 1839 - 1840 - } 1841 - 1842 - /* 1843 - * Check if we need to delay processing because of ALUA 1844 - * Active/NonOptimized primary access state.. 1845 - */ 1846 - core_alua_check_nonop_delay(se_cmd); 1847 - 1848 - transport_handle_cdb_direct(se_cmd); 1849 - } 1850 - EXPORT_SYMBOL_GPL(target_submit); 1851 - 1852 - /** 1853 1785 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1854 1786 * 1855 1787 * @se_cmd: command descriptor to submit ··· 1897 1923 se_plug = target_plug_device(se_dev); 1898 1924 } 1899 1925 1900 - target_submit(se_cmd); 1926 + __target_submit(se_cmd); 1901 1927 } 1902 1928 1903 1929 if (se_plug) ··· 1908 1934 * target_queue_submission - queue the cmd to run on the LIO workqueue 1909 1935 * @se_cmd: command descriptor to submit 1910 1936 */ 1911 - void target_queue_submission(struct se_cmd *se_cmd) 1937 + static void target_queue_submission(struct se_cmd *se_cmd) 1912 1938 { 1913 1939 struct se_device *se_dev = se_cmd->se_dev; 1914 1940 int cpu = se_cmd->cpuid; ··· 1918 1944 llist_add(&se_cmd->se_cmd_list, &sq->cmd_list); 1919 1945 queue_work_on(cpu, target_submission_wq, &sq->work); 1920 1946 } 1921 - EXPORT_SYMBOL_GPL(target_queue_submission); 1947 + 1948 + /** 1949 + * target_submit - perform final initialization and submit cmd to LIO core 1950 + * @cmd: command descriptor to submit 1951 + * 1952 + * target_submit_prep or something similar must have been called on the cmd, 1953 + * and this must be called from process context. 1954 + */ 1955 + int target_submit(struct se_cmd *se_cmd) 1956 + { 1957 + const struct target_core_fabric_ops *tfo = se_cmd->se_sess->se_tpg->se_tpg_tfo; 1958 + struct se_dev_attrib *da = &se_cmd->se_dev->dev_attrib; 1959 + u8 submit_type; 1960 + 1961 + if (da->submit_type == TARGET_FABRIC_DEFAULT_SUBMIT) 1962 + submit_type = tfo->default_submit_type; 1963 + else if (da->submit_type == TARGET_DIRECT_SUBMIT && 1964 + tfo->direct_submit_supp) 1965 + submit_type = TARGET_DIRECT_SUBMIT; 1966 + else 1967 + submit_type = TARGET_QUEUE_SUBMIT; 1968 + 1969 + if (submit_type == TARGET_DIRECT_SUBMIT) 1970 + return __target_submit(se_cmd); 1971 + 1972 + target_queue_submission(se_cmd); 1973 + return 0; 1974 + } 1975 + EXPORT_SYMBOL_GPL(target_submit); 1922 1976 1923 1977 static void target_complete_tmr_failure(struct work_struct *work) 1924 1978 {
+3
drivers/target/tcm_fc/tfc_conf.c
··· 432 432 433 433 .tfc_wwn_attrs = ft_wwn_attrs, 434 434 .tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs, 435 + 436 + .default_submit_type = TARGET_DIRECT_SUBMIT, 437 + .direct_submit_supp = 1, 435 438 }; 436 439 437 440 static struct notifier_block ft_notifier = {
+3
drivers/usb/gadget/function/f_tcm.c
··· 1687 1687 1688 1688 .tfc_wwn_attrs = usbg_wwn_attrs, 1689 1689 .tfc_tpg_base_attrs = usbg_base_attrs, 1690 + 1691 + .default_submit_type = TARGET_DIRECT_SUBMIT, 1692 + .direct_submit_supp = 1, 1690 1693 }; 1691 1694 1692 1695 /* Start gadget.c code */
+4 -1
drivers/vhost/scsi.c
··· 909 909 cmd->tvc_prot_sgl_count, GFP_KERNEL)) 910 910 return; 911 911 912 - target_queue_submission(se_cmd); 912 + target_submit(se_cmd); 913 913 } 914 914 915 915 static void ··· 2598 2598 .tfc_wwn_attrs = vhost_scsi_wwn_attrs, 2599 2599 .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs, 2600 2600 .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs, 2601 + 2602 + .default_submit_type = TARGET_QUEUE_SUBMIT, 2603 + .direct_submit_supp = 1, 2601 2604 }; 2602 2605 2603 2606 static int __init vhost_scsi_init(void)
+3
drivers/xen/xen-scsiback.c
··· 1832 1832 .tfc_wwn_attrs = scsiback_wwn_attrs, 1833 1833 .tfc_tpg_base_attrs = scsiback_tpg_attrs, 1834 1834 .tfc_tpg_param_attrs = scsiback_param_attrs, 1835 + 1836 + .default_submit_type = TARGET_DIRECT_SUBMIT, 1837 + .direct_submit_supp = 1, 1835 1838 }; 1836 1839 1837 1840 static const struct xenbus_device_id scsiback_ids[] = {
+10
include/target/target_core_base.h
··· 108 108 #define SE_MODE_PAGE_BUF 512 109 109 #define SE_SENSE_BUF 96 110 110 111 + enum target_submit_type { 112 + /* Use the fabric driver's default submission type */ 113 + TARGET_FABRIC_DEFAULT_SUBMIT, 114 + /* Submit from the calling context */ 115 + TARGET_DIRECT_SUBMIT, 116 + /* Defer submission to the LIO workqueue */ 117 + TARGET_QUEUE_SUBMIT, 118 + }; 119 + 111 120 /* struct se_hba->hba_flags */ 112 121 enum hba_flags_table { 113 122 HBA_FLAGS_INTERNAL_USE = 0x01, ··· 726 717 u32 unmap_granularity; 727 718 u32 unmap_granularity_alignment; 728 719 u32 max_write_same_len; 720 + u8 submit_type; 729 721 struct se_device *da_dev; 730 722 struct config_group da_group; 731 723 };
+12 -7
include/target/target_core_fabric.h
··· 113 113 struct configfs_attribute **tfc_tpg_nacl_param_attrs; 114 114 115 115 /* 116 - * Set this member variable to true if the SCSI transport protocol 116 + * Set this member variable if the SCSI transport protocol 117 117 * (e.g. iSCSI) requires that the Data-Out buffer is transferred in 118 118 * its entirety before a command is aborted. 119 119 */ 120 - bool write_pending_must_be_called; 120 + unsigned int write_pending_must_be_called:1; 121 + /* 122 + * Set this if the driver supports submitting commands to the backend 123 + * from target_submit/target_submit_cmd. 124 + */ 125 + unsigned int direct_submit_supp:1; 126 + /* 127 + * Set this to a target_submit_type value. 128 + */ 129 + u8 default_submit_type; 121 130 }; 122 131 123 132 int target_register_template(const struct target_core_fabric_ops *fo); ··· 175 166 struct scatterlist *sgl, u32 sgl_count, 176 167 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 177 168 struct scatterlist *sgl_prot, u32 sgl_prot_count, gfp_t gfp); 178 - void target_submit(struct se_cmd *se_cmd); 169 + int target_submit(struct se_cmd *se_cmd); 179 170 sense_reason_t transport_lookup_cmd_lun(struct se_cmd *); 180 171 sense_reason_t target_cmd_init_cdb(struct se_cmd *se_cmd, unsigned char *cdb, 181 172 gfp_t gfp); 182 173 sense_reason_t target_cmd_parse_cdb(struct se_cmd *); 183 174 void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, 184 175 unsigned char *, u64, u32, int, int, int); 185 - void target_queue_submission(struct se_cmd *se_cmd); 186 176 187 177 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 188 178 unsigned char *sense, u64 unpacked_lun, 189 179 void *fabric_tmr_ptr, unsigned char tm_type, 190 180 gfp_t, u64, int); 191 - int transport_handle_cdb_direct(struct se_cmd *); 192 181 sense_reason_t transport_generic_new_cmd(struct se_cmd *); 193 182 194 183 void target_put_cmd_and_wait(struct se_cmd *cmd); ··· 203 196 void target_stop_session(struct se_session *se_sess); 204 197 void target_wait_for_sess_cmds(struct se_session *); 205 198 void target_show_cmd(const char *pfx, struct se_cmd *cmd); 206 - 207 - int core_alua_check_nonop_delay(struct se_cmd *); 208 199 209 200 int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t); 210 201 void core_tmr_release_req(struct se_tmr_req *);