Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull scsi target fixes from Nicholas Bellinger:
"Here is the current set of target-pending fixes headed for v3.6-final

The main parts of this series include bug-fixes from Paolo Bonzini to
address an use-after-free bug in pSCSI sense exception handling, along
with addressing some long-standing bugs wrt the handling of zero-
length SCSI CDB payloads also specific to pSCSI pass-through device
backends."

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
target: go through normal processing for zero-length REQUEST_SENSE
target: support zero allocation length in REQUEST SENSE
target: support zero-size allocation lengths in transport_kmap_data_sg
target: fail REPORT LUNS with less than 16 bytes of payload
target: report too-small parameter lists everywhere
target: go through normal processing for zero-length PSCSI commands
target: fix use-after-free with PSCSI sense data
target: simplify code around transport_get_sense_data
target: move transport_get_sense_data
target: Check idr_get_new return value in iscsi_login_zero_tsih_s1
target: Fix ->data_length re-assignment bug with SCSI overflow

+10 -1
drivers/target/iscsi/iscsi_target_login.c
··· 221 221 { 222 222 struct iscsi_session *sess = NULL; 223 223 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; 224 + int ret; 224 225 225 226 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); 226 227 if (!sess) { ··· 258 257 return -ENOMEM; 259 258 } 260 259 spin_lock(&sess_idr_lock); 261 - idr_get_new(&sess_idr, NULL, &sess->session_index); 260 + ret = idr_get_new(&sess_idr, NULL, &sess->session_index); 262 261 spin_unlock(&sess_idr_lock); 262 + 263 + if (ret < 0) { 264 + pr_err("idr_get_new() for sess_idr failed\n"); 265 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 266 + ISCSI_LOGIN_STATUS_NO_RESOURCES); 267 + kfree(sess); 268 + return -ENOMEM; 269 + } 263 270 264 271 sess->creation_time = get_jiffies_64(); 265 272 spin_lock_init(&sess->session_stats_lock);
+7
drivers/target/target_core_alua.c
··· 218 218 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 219 219 return -EINVAL; 220 220 } 221 + if (cmd->data_length < 4) { 222 + pr_warn("SET TARGET PORT GROUPS parameter list length %u too" 223 + " small\n", cmd->data_length); 224 + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 225 + return -EINVAL; 226 + } 227 + 221 228 buf = transport_kmap_data_sg(cmd); 222 229 223 230 /*
+7
drivers/target/target_core_device.c
··· 669 669 unsigned char *buf; 670 670 u32 lun_count = 0, offset = 8, i; 671 671 672 + if (se_cmd->data_length < 16) { 673 + pr_warn("REPORT LUNS allocation length %u too small\n", 674 + se_cmd->data_length); 675 + se_cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 676 + return -EINVAL; 677 + } 678 + 672 679 buf = transport_kmap_data_sg(se_cmd); 673 680 if (!buf) 674 681 return -ENOMEM;
+15 -2
drivers/target/target_core_iblock.c
··· 325 325 struct iblock_dev *ibd = dev->dev_ptr; 326 326 unsigned char *buf, *ptr = NULL; 327 327 sector_t lba; 328 - int size = cmd->data_length; 328 + int size; 329 329 u32 range; 330 330 int ret = 0; 331 331 int dl, bd_dl; 332 + 333 + if (cmd->data_length < 8) { 334 + pr_warn("UNMAP parameter list length %u too small\n", 335 + cmd->data_length); 336 + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 337 + return -EINVAL; 338 + } 332 339 333 340 buf = transport_kmap_data_sg(cmd); 334 341 335 342 dl = get_unaligned_be16(&buf[0]); 336 343 bd_dl = get_unaligned_be16(&buf[2]); 337 344 338 - size = min(size - 8, bd_dl); 345 + size = cmd->data_length - 8; 346 + if (bd_dl > size) 347 + pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 348 + cmd->data_length, bd_dl); 349 + else 350 + size = bd_dl; 351 + 339 352 if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 340 353 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 341 354 ret = -EINVAL;
+8
drivers/target/target_core_pr.c
··· 1540 1540 tidh_new->dest_local_nexus = 1; 1541 1541 list_add_tail(&tidh_new->dest_list, &tid_dest_list); 1542 1542 1543 + if (cmd->data_length < 28) { 1544 + pr_warn("SPC-PR: Received PR OUT parameter list" 1545 + " length too small: %u\n", cmd->data_length); 1546 + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 1547 + ret = -EINVAL; 1548 + goto out; 1549 + } 1550 + 1543 1551 buf = transport_kmap_data_sg(cmd); 1544 1552 /* 1545 1553 * For a PERSISTENT RESERVE OUT specify initiator ports payload,
+11 -18
drivers/target/target_core_pscsi.c
··· 667 667 kfree(pdv); 668 668 } 669 669 670 - static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg) 670 + static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, 671 + unsigned char *sense_buffer) 671 672 { 672 673 struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; 673 674 struct scsi_device *sd = pdv->pdv_sd; ··· 680 679 * not been allocated because TCM is handling the emulation directly. 681 680 */ 682 681 if (!pt) 683 - return 0; 682 + return; 684 683 685 684 cdb = &pt->pscsi_cdb[0]; 686 685 result = pt->pscsi_result; ··· 688 687 * Hack to make sure that Write-Protect modepage is set if R/O mode is 689 688 * forced. 690 689 */ 690 + if (!cmd->se_deve || !cmd->data_length) 691 + goto after_mode_sense; 692 + 691 693 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && 692 694 (status_byte(result) << 1) == SAM_STAT_GOOD) { 693 - if (!cmd->se_deve) 694 - goto after_mode_sense; 695 - 696 695 if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { 697 696 unsigned char *buf = transport_kmap_data_sg(cmd); 698 697 ··· 709 708 } 710 709 after_mode_sense: 711 710 712 - if (sd->type != TYPE_TAPE) 711 + if (sd->type != TYPE_TAPE || !cmd->data_length) 713 712 goto after_mode_select; 714 713 715 714 /* ··· 751 750 } 752 751 after_mode_select: 753 752 754 - if (status_byte(result) & CHECK_CONDITION) 755 - return 1; 756 - 757 - return 0; 753 + if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) { 754 + memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER); 755 + cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 756 + } 758 757 } 759 758 760 759 enum { ··· 1185 1184 return -ENOMEM; 1186 1185 } 1187 1186 1188 - static unsigned char *pscsi_get_sense_buffer(struct se_cmd *cmd) 1189 - { 1190 - struct pscsi_plugin_task *pt = cmd->priv; 1191 - 1192 - return pt->pscsi_sense; 1193 - } 1194 - 1195 1187 /* pscsi_get_device_rev(): 1196 1188 * 1197 1189 * ··· 1267 1273 .check_configfs_dev_params = pscsi_check_configfs_dev_params, 1268 1274 .set_configfs_dev_params = pscsi_set_configfs_dev_params, 1269 1275 .show_configfs_dev_params = pscsi_show_configfs_dev_params, 1270 - .get_sense_buffer = pscsi_get_sense_buffer, 1271 1276 .get_device_rev = pscsi_get_device_rev, 1272 1277 .get_device_type = pscsi_get_device_type, 1273 1278 .get_blocks = pscsi_get_blocks,
+18 -17
drivers/target/target_core_spc.c
··· 877 877 static int spc_emulate_request_sense(struct se_cmd *cmd) 878 878 { 879 879 unsigned char *cdb = cmd->t_task_cdb; 880 - unsigned char *buf; 880 + unsigned char *rbuf; 881 881 u8 ua_asc = 0, ua_ascq = 0; 882 - int err = 0; 882 + unsigned char buf[SE_SENSE_BUF]; 883 + 884 + memset(buf, 0, SE_SENSE_BUF); 883 885 884 886 if (cdb[1] & 0x01) { 885 887 pr_err("REQUEST_SENSE description emulation not" ··· 890 888 return -ENOSYS; 891 889 } 892 890 893 - buf = transport_kmap_data_sg(cmd); 894 - 895 - if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { 891 + rbuf = transport_kmap_data_sg(cmd); 892 + if (cmd->scsi_sense_reason != 0) { 893 + /* 894 + * Out of memory. We will fail with CHECK CONDITION, so 895 + * we must not clear the unit attention condition. 896 + */ 897 + target_complete_cmd(cmd, CHECK_CONDITION); 898 + return 0; 899 + } else if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { 896 900 /* 897 901 * CURRENT ERROR, UNIT ATTENTION 898 902 */ 899 903 buf[0] = 0x70; 900 904 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 901 905 902 - if (cmd->data_length < 18) { 903 - buf[7] = 0x00; 904 - err = -EINVAL; 905 - goto end; 906 - } 907 906 /* 908 907 * The Additional Sense Code (ASC) from the UNIT ATTENTION 909 908 */ ··· 918 915 buf[0] = 0x70; 919 916 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; 920 917 921 - if (cmd->data_length < 18) { 922 - buf[7] = 0x00; 923 - err = -EINVAL; 924 - goto end; 925 - } 926 918 /* 927 919 * NO ADDITIONAL SENSE INFORMATION 928 920 */ ··· 925 927 buf[7] = 0x0A; 926 928 } 927 929 928 - end: 929 - transport_kunmap_data_sg(cmd); 930 + if (rbuf) { 931 + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 932 + transport_kunmap_data_sg(cmd); 933 + } 934 + 930 935 target_complete_cmd(cmd, GOOD); 931 936 return 0; 932 937 }
+61 -87
drivers/target/target_core_transport.c
··· 567 567 transport_generic_request_failure(cmd); 568 568 } 569 569 570 + /* 571 + * Used when asking transport to copy Sense Data from the underlying 572 + * Linux/SCSI struct scsi_cmnd 573 + */ 574 + static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 575 + { 576 + unsigned char *buffer = cmd->sense_buffer; 577 + struct se_device *dev = cmd->se_dev; 578 + u32 offset = 0; 579 + 580 + WARN_ON(!cmd->se_lun); 581 + 582 + if (!dev) 583 + return NULL; 584 + 585 + if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 586 + return NULL; 587 + 588 + offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); 589 + 590 + /* Automatically padded */ 591 + cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; 592 + 593 + pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 594 + dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 595 + return &buffer[offset]; 596 + } 597 + 570 598 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 571 599 { 572 600 struct se_device *dev = cmd->se_dev; ··· 608 580 cmd->transport_state &= ~CMD_T_BUSY; 609 581 610 582 if (dev && dev->transport->transport_complete) { 611 - if (dev->transport->transport_complete(cmd, 612 - cmd->t_data_sg) != 0) { 613 - cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 583 + dev->transport->transport_complete(cmd, 584 + cmd->t_data_sg, 585 + transport_get_sense_buffer(cmd)); 586 + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 614 587 success = 1; 615 - } 616 588 } 617 589 618 590 /* ··· 1209 1181 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1210 1182 goto out_invalid_cdb_field; 1211 1183 } 1212 - 1184 + /* 1185 + * For the overflow case keep the existing fabric provided 1186 + * ->data_length. Otherwise for the underflow case, reset 1187 + * ->data_length to the smaller SCSI expected data transfer 1188 + * length. 1189 + */ 1213 1190 if (size > cmd->data_length) { 1214 1191 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1215 1192 cmd->residual_count = (size - cmd->data_length); 1216 1193 } else { 1217 1194 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1218 1195 cmd->residual_count = (cmd->data_length - size); 1196 + cmd->data_length = size; 1219 1197 } 1220 - cmd->data_length = size; 1221 1198 } 1222 1199 1223 1200 return 0; ··· 1849 1816 EXPORT_SYMBOL(target_execute_cmd); 1850 1817 1851 1818 /* 1852 - * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd 1853 - */ 1854 - static int transport_get_sense_data(struct se_cmd *cmd) 1855 - { 1856 - unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; 1857 - struct se_device *dev = cmd->se_dev; 1858 - unsigned long flags; 1859 - u32 offset = 0; 1860 - 1861 - WARN_ON(!cmd->se_lun); 1862 - 1863 - if (!dev) 1864 - return 0; 1865 - 1866 - spin_lock_irqsave(&cmd->t_state_lock, flags); 1867 - if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 1868 - spin_unlock_irqrestore(&cmd->t_state_lock, flags); 1869 - return 0; 1870 - } 1871 - 1872 - if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) 1873 - goto out; 1874 - 1875 - if (!dev->transport->get_sense_buffer) { 1876 - pr_err("dev->transport->get_sense_buffer is NULL\n"); 1877 - goto out; 1878 - } 1879 - 1880 - sense_buffer = dev->transport->get_sense_buffer(cmd); 1881 - if (!sense_buffer) { 1882 - pr_err("ITT 0x%08x cmd %p: Unable to locate" 1883 - " sense buffer for task with sense\n", 1884 - cmd->se_tfo->get_task_tag(cmd), cmd); 1885 - goto out; 1886 - } 1887 - 1888 - spin_unlock_irqrestore(&cmd->t_state_lock, flags); 1889 - 1890 - offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); 1891 - 1892 - memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER); 1893 - 1894 - /* Automatically padded */ 1895 - cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; 1896 - 1897 - pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n", 1898 - dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 1899 - return 0; 1900 - 1901 - out: 1902 - spin_unlock_irqrestore(&cmd->t_state_lock, flags); 1903 - return -1; 1904 - } 1905 - 1906 - /* 1907 1819 * Process all commands up to the last received ORDERED task attribute which 1908 1820 * requires another blocking boundary 1909 1821 */ ··· 1963 1985 static void target_complete_ok_work(struct work_struct *work) 1964 1986 { 1965 1987 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 1966 - int reason = 0, ret; 1988 + int ret; 1967 1989 1968 1990 /* 1969 1991 * Check if we need to move delayed/dormant tasks from cmds on the ··· 1980 2002 schedule_work(&cmd->se_dev->qf_work_queue); 1981 2003 1982 2004 /* 1983 - * Check if we need to retrieve a sense buffer from 2005 + * Check if we need to send a sense buffer from 1984 2006 * the struct se_cmd in question. 1985 2007 */ 1986 2008 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1987 - if (transport_get_sense_data(cmd) < 0) 1988 - reason = TCM_NON_EXISTENT_LUN; 2009 + WARN_ON(!cmd->scsi_status); 2010 + ret = transport_send_check_condition_and_sense( 2011 + cmd, 0, 1); 2012 + if (ret == -EAGAIN || ret == -ENOMEM) 2013 + goto queue_full; 1989 2014 1990 - if (cmd->scsi_status) { 1991 - ret = transport_send_check_condition_and_sense( 1992 - cmd, reason, 1); 1993 - if (ret == -EAGAIN || ret == -ENOMEM) 1994 - goto queue_full; 1995 - 1996 - transport_lun_remove_cmd(cmd); 1997 - transport_cmd_check_stop_to_fabric(cmd); 1998 - return; 1999 - } 2015 + transport_lun_remove_cmd(cmd); 2016 + transport_cmd_check_stop_to_fabric(cmd); 2017 + return; 2000 2018 } 2001 2019 /* 2002 2020 * Check for a callback, used by amongst other things ··· 2190 2216 struct page **pages; 2191 2217 int i; 2192 2218 2193 - BUG_ON(!sg); 2194 2219 /* 2195 2220 * We need to take into account a possible offset here for fabrics like 2196 2221 * tcm_loop who may be using a contig buffer from the SCSI midlayer for ··· 2197 2224 */ 2198 2225 if (!cmd->t_data_nents) 2199 2226 return NULL; 2200 - else if (cmd->t_data_nents == 1) 2227 + 2228 + BUG_ON(!sg); 2229 + if (cmd->t_data_nents == 1) 2201 2230 return kmap(sg_page(sg)) + sg->offset; 2202 2231 2203 2232 /* >1 page. use vmap */ 2204 2233 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2205 - if (!pages) 2234 + if (!pages) { 2235 + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2206 2236 return NULL; 2237 + } 2207 2238 2208 2239 /* convert sg[] to pages[] */ 2209 2240 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { ··· 2216 2239 2217 2240 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2218 2241 kfree(pages); 2219 - if (!cmd->t_data_vmap) 2242 + if (!cmd->t_data_vmap) { 2243 + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2220 2244 return NULL; 2245 + } 2221 2246 2222 2247 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2223 2248 } ··· 2305 2326 * into the fabric for data transfers, go ahead and complete it right 2306 2327 * away. 2307 2328 */ 2308 - if (!cmd->data_length) { 2329 + if (!cmd->data_length && 2330 + cmd->t_task_cdb[0] != REQUEST_SENSE && 2331 + cmd->se_dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 2309 2332 spin_lock_irq(&cmd->t_state_lock); 2310 2333 cmd->t_state = TRANSPORT_COMPLETE; 2311 2334 cmd->transport_state |= CMD_T_ACTIVE; 2312 2335 spin_unlock_irq(&cmd->t_state_lock); 2313 - 2314 - if (cmd->t_task_cdb[0] == REQUEST_SENSE) { 2315 - u8 ua_asc = 0, ua_ascq = 0; 2316 - 2317 - core_scsi3_ua_clear_for_request_sense(cmd, 2318 - &ua_asc, &ua_ascq); 2319 - } 2320 2336 2321 2337 INIT_WORK(&cmd->work, target_complete_ok_work); 2322 2338 queue_work(target_completion_wq, &cmd->work);
+3 -1
include/target/target_core_backend.h
··· 23 23 struct se_device *(*create_virtdevice)(struct se_hba *, 24 24 struct se_subsystem_dev *, void *); 25 25 void (*free_device)(void *); 26 - int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *); 26 + void (*transport_complete)(struct se_cmd *cmd, 27 + struct scatterlist *, 28 + unsigned char *); 27 29 28 30 int (*parse_cdb)(struct se_cmd *cmd); 29 31 ssize_t (*check_configfs_dev_params)(struct se_hba *,
+1
include/target/target_core_base.h
··· 121 121 122 122 #define SE_INQUIRY_BUF 512 123 123 #define SE_MODE_PAGE_BUF 512 124 + #define SE_SENSE_BUF 96 124 125 125 126 /* struct se_hba->hba_flags */ 126 127 enum hba_flags_table {