Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
"This is nine fixes, seven of which are for the qedi driver (new as of
4.10) the other two are a use after free in the cxgbi drivers and a
potential NULL dereference in the rdac device handler"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
scsi: libcxgbi: fix skb use after free
scsi: qedi: Fix endpoint NULL panic during recovery.
scsi: qedi: set max_fin_rt default value
scsi: qedi: Set firmware tcp msl timer value.
scsi: qedi: Fix endpoint NULL panic in qedi_set_path.
scsi: qedi: Set dma_boundary to 0xfff.
scsi: qedi: Correctly set firmware max supported BDs.
scsi: qedi: Fix bad pte call trace when iscsiuio is stopped.
scsi: scsi_dh_rdac: Use ctlr directly in rdac_failover_get()

Changed files
+53 -35
drivers
+18 -7
drivers/scsi/cxgbi/libcxgbi.c
··· 1873 1873 tcp_task->dd_data = tdata; 1874 1874 task->hdr = NULL; 1875 1875 1876 + if (tdata->skb) { 1877 + kfree_skb(tdata->skb); 1878 + tdata->skb = NULL; 1879 + } 1880 + 1876 1881 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && 1877 1882 (opcode == ISCSI_OP_SCSI_DATA_OUT || 1878 1883 (opcode == ISCSI_OP_SCSI_CMD && ··· 1895 1890 return -ENOMEM; 1896 1891 } 1897 1892 1893 + skb_get(tdata->skb); 1898 1894 skb_reserve(tdata->skb, cdev->skb_tx_rsvd); 1899 1895 task->hdr = (struct iscsi_hdr *)tdata->skb->data; 1900 1896 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ ··· 2041 2035 unsigned int datalen; 2042 2036 int err; 2043 2037 2044 - if (!skb) { 2038 + if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) { 2045 2039 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2046 - "task 0x%p, skb NULL.\n", task); 2040 + "task 0x%p, skb 0x%p\n", task, skb); 2047 2041 return 0; 2048 2042 } 2049 2043 ··· 2056 2050 } 2057 2051 2058 2052 datalen = skb->data_len; 2059 - tdata->skb = NULL; 2060 2053 2061 2054 /* write ppod first if using ofldq to write ppod */ 2062 2055 if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { ··· 2083 2078 pdulen += ISCSI_DIGEST_SIZE; 2084 2079 2085 2080 task->conn->txdata_octets += pdulen; 2081 + cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE); 2086 2082 return 0; 2087 2083 } 2088 2084 ··· 2092 2086 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", 2093 2087 task, skb, skb->len, skb->data_len, err); 2094 2088 /* reset skb to send when we are called again */ 2095 - tdata->skb = skb; 2096 2089 return err; 2097 2090 } 2098 2091 ··· 2099 2094 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", 2100 2095 task->itt, skb, skb->len, skb->data_len, err); 2101 2096 2102 - kfree_skb(skb); 2097 + __kfree_skb(tdata->skb); 2098 + tdata->skb = NULL; 2103 2099 2104 2100 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); 2105 2101 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); ··· 2119 2113 2120 2114 tcp_task->dd_data = NULL; 2121 2115 /* never reached the xmit task callout */ 2122 - if (tdata->skb) 2123 - __kfree_skb(tdata->skb); 2116 + if (tdata->skb) { 2117 + kfree_skb(tdata->skb); 2118 + tdata->skb = NULL; 2119 + } 2124 2120 2125 2121 task_release_itt(task, task->hdr_itt); 2126 2122 memset(tdata, 0, sizeof(*tdata)); ··· 2722 2714 static int __init libcxgbi_init_module(void) 2723 2715 { 2724 2716 pr_info("%s", version); 2717 + 2718 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < 2719 + sizeof(struct cxgbi_skb_cb)); 2725 2720 return 0; 2726 2721 } 2727 2722
+8 -8
drivers/scsi/cxgbi/libcxgbi.h
··· 195 195 }; 196 196 197 197 struct cxgbi_skb_tx_cb { 198 - void *l2t; 198 + void *handle; 199 + void *arp_err_handler; 199 200 struct sk_buff *wr_next; 200 201 }; 201 202 ··· 204 203 SKCBF_TX_NEED_HDR, /* packet needs a header */ 205 204 SKCBF_TX_MEM_WRITE, /* memory write */ 206 205 SKCBF_TX_FLAG_COMPL, /* wr completion flag */ 206 + SKCBF_TX_DONE, /* skb tx done */ 207 207 SKCBF_RX_COALESCED, /* received whole pdu */ 208 208 SKCBF_RX_HDR, /* received pdu header */ 209 209 SKCBF_RX_DATA, /* received pdu payload */ ··· 217 215 }; 218 216 219 217 struct cxgbi_skb_cb { 220 - unsigned char ulp_mode; 221 - unsigned long flags; 222 - unsigned int seq; 223 218 union { 224 219 struct cxgbi_skb_rx_cb rx; 225 220 struct cxgbi_skb_tx_cb tx; 226 221 }; 222 + unsigned char ulp_mode; 223 + unsigned long flags; 224 + unsigned int seq; 227 225 }; 228 226 229 227 #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) ··· 376 374 cxgbi_skcb_tx_wr_next(skb) = NULL; 377 375 /* 378 376 * We want to take an extra reference since both us and the driver 379 - * need to free the packet before it's really freed. We know there's 380 - * just one user currently so we use atomic_set rather than skb_get 381 - * to avoid the atomic op. 377 + * need to free the packet before it's really freed. 382 378 */ 383 - atomic_set(&skb->users, 2); 379 + skb_get(skb); 384 380 385 381 if (!csk->wr_pending_head) 386 382 csk->wr_pending_head = skb;
+4 -6
drivers/scsi/device_handler/scsi_dh_rdac.c
··· 265 265 struct list_head *list, 266 266 unsigned char *cdb) 267 267 { 268 - struct scsi_device *sdev = ctlr->ms_sdev; 269 - struct rdac_dh_data *h = sdev->handler_data; 270 268 struct rdac_mode_common *common; 271 269 unsigned data_size; 272 270 struct rdac_queue_data *qdata; 273 271 u8 *lun_table; 274 272 275 - if (h->ctlr->use_ms10) { 273 + if (ctlr->use_ms10) { 276 274 struct rdac_pg_expanded *rdac_pg; 277 275 278 276 data_size = sizeof(struct rdac_pg_expanded); 279 - rdac_pg = &h->ctlr->mode_select.expanded; 277 + rdac_pg = &ctlr->mode_select.expanded; 280 278 memset(rdac_pg, 0, data_size); 281 279 common = &rdac_pg->common; 282 280 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; ··· 286 288 struct rdac_pg_legacy *rdac_pg; 287 289 288 290 data_size = sizeof(struct rdac_pg_legacy); 289 - rdac_pg = &h->ctlr->mode_select.legacy; 291 + rdac_pg = &ctlr->mode_select.legacy; 290 292 memset(rdac_pg, 0, data_size); 291 293 common = &rdac_pg->common; 292 294 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; ··· 302 304 } 303 305 304 306 /* Prepare the command. */ 305 - if (h->ctlr->use_ms10) { 307 + if (ctlr->use_ms10) { 306 308 cdb[0] = MODE_SELECT_10; 307 309 cdb[7] = data_size >> 8; 308 310 cdb[8] = data_size & 0xff;
+2 -1
drivers/scsi/qedi/qedi.h
··· 38 38 #define QEDI_MAX_ISCSI_TASK 4096 39 39 #define QEDI_MAX_TASK_NUM 0x0FFF 40 40 #define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 41 - #define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */ 41 + #define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */ 42 42 #define MAX_OUSTANDING_TASKS_PER_CON 1024 43 43 44 44 #define QEDI_MAX_BD_LEN 0xffff ··· 63 63 #define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) 64 64 65 65 #define QEDI_PAGE_SIZE 4096 66 + #define QEDI_HW_DMA_BOUNDARY 0xfff 66 67 #define QEDI_PATH_HANDLE 0xFE0000000UL 67 68 68 69 struct qedi_uio_ctrl {
+2
drivers/scsi/qedi/qedi_fw.c
··· 1494 1494 tmf_hdr = (struct iscsi_tm *)mtask->hdr; 1495 1495 qedi_cmd = (struct qedi_cmd *)mtask->dd_data; 1496 1496 ep = qedi_conn->ep; 1497 + if (!ep) 1498 + return -ENODEV; 1497 1499 1498 1500 tid = qedi_get_task_idx(qedi); 1499 1501 if (tid == -1)
+6 -1
drivers/scsi/qedi/qedi_iscsi.c
··· 59 59 .this_id = -1, 60 60 .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD, 61 61 .max_sectors = 0xffff, 62 + .dma_boundary = QEDI_HW_DMA_BOUNDARY, 62 63 .cmd_per_lun = 128, 63 64 .use_clustering = ENABLE_CLUSTERING, 64 65 .shost_attrs = qedi_shost_attrs, ··· 1224 1223 1225 1224 iscsi_cid = (u32)path_data->handle; 1226 1225 qedi_ep = qedi->ep_tbl[iscsi_cid]; 1227 - QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 1226 + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 1228 1227 "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); 1228 + if (!qedi_ep) { 1229 + ret = -EINVAL; 1230 + goto set_path_exit; 1231 + } 1229 1232 1230 1233 if (!is_valid_ether_addr(&path_data->mac_addr[0])) { 1231 1234 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+13 -12
drivers/scsi/qedi/qedi_main.c
··· 151 151 152 152 static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) 153 153 { 154 + if (udev->uctrl) { 155 + free_page((unsigned long)udev->uctrl); 156 + udev->uctrl = NULL; 157 + } 158 + 154 159 if (udev->ll2_ring) { 155 160 free_page((unsigned long)udev->ll2_ring); 156 161 udev->ll2_ring = NULL; ··· 174 169 __qedi_free_uio_rings(udev); 175 170 176 171 pci_dev_put(udev->pdev); 177 - kfree(udev->uctrl); 178 172 kfree(udev); 179 173 } 180 174 ··· 212 208 if (udev->ll2_ring || udev->ll2_buf) 213 209 return rc; 214 210 211 + /* Memory for control area. */ 212 + udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL); 213 + if (!udev->uctrl) 214 + return -ENOMEM; 215 + 215 216 /* Allocating memory for LL2 ring */ 216 217 udev->ll2_ring_size = QEDI_PAGE_SIZE; 217 218 udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); ··· 246 237 static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) 247 238 { 248 239 struct qedi_uio_dev *udev = NULL; 249 - struct qedi_uio_ctrl *uctrl = NULL; 250 240 int rc = 0; 251 241 252 242 list_for_each_entry(udev, &qedi_udev_list, list) { ··· 266 258 goto err_udev; 267 259 } 268 260 269 - uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL); 270 - if (!uctrl) { 271 - rc = -ENOMEM; 272 - goto err_uctrl; 273 - } 274 - 275 261 udev->uio_dev = -1; 276 262 277 263 udev->qedi = qedi; 278 264 udev->pdev = qedi->pdev; 279 - udev->uctrl = uctrl; 280 265 281 266 rc = __qedi_alloc_uio_rings(udev); 282 267 if (rc) 283 - goto err_uio_rings; 268 + goto err_uctrl; 284 269 285 270 list_add(&udev->list, &qedi_udev_list); 286 271 ··· 284 283 udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE; 285 284 return 0; 286 285 287 - err_uio_rings: 288 - kfree(uctrl); 289 286 err_uctrl: 290 287 kfree(udev); 291 288 err_udev: ··· 827 828 qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; 828 829 qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; 829 830 qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; 831 + qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000; 832 + qedi->pf_params.iscsi_pf_params.max_fin_rt = 2; 830 833 831 834 for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { 832 835 if ((1 << log_page_size) == PAGE_SIZE)