Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
IB/mthca: Don't execute QUERY_QP firmware command for QP in RESET state
IB/ehca: Use proper GFP_ flags for get_zeroed_page()
IB/mthca: Fix PRM compliance problem in atomic-send completions
RDMA/ucma: Don't report events with invalid user context
RDMA/ucma: Fix struct ucma_event leak when backlog is full
RDMA/iwcm: iWARP connection timeouts shouldn't be reported as rejects
IB/iser: Return error code when PDUs may not be sent
IB/mthca: Fix off-by-one in FMR handling on memfree

+79 -47
+14 -3
drivers/infiniband/core/cma.c
··· 1088 *sin = iw_event->local_addr; 1089 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1090 *sin = iw_event->remote_addr; 1091 - if (iw_event->status) 1092 - event.event = RDMA_CM_EVENT_REJECTED; 1093 - else 1094 event.event = RDMA_CM_EVENT_ESTABLISHED; 1095 break; 1096 case IW_CM_EVENT_ESTABLISHED: 1097 event.event = RDMA_CM_EVENT_ESTABLISHED;
··· 1088 *sin = iw_event->local_addr; 1089 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1090 *sin = iw_event->remote_addr; 1091 + switch (iw_event->status) { 1092 + case 0: 1093 event.event = RDMA_CM_EVENT_ESTABLISHED; 1094 + break; 1095 + case -ECONNRESET: 1096 + case -ECONNREFUSED: 1097 + event.event = RDMA_CM_EVENT_REJECTED; 1098 + break; 1099 + case -ETIMEDOUT: 1100 + event.event = RDMA_CM_EVENT_UNREACHABLE; 1101 + break; 1102 + default: 1103 + event.event = RDMA_CM_EVENT_CONNECT_ERROR; 1104 + break; 1105 + } 1106 break; 1107 case IW_CM_EVENT_ESTABLISHED: 1108 event.event = RDMA_CM_EVENT_ESTABLISHED;
+11
drivers/infiniband/core/ucma.c
··· 209 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 210 if (!ctx->backlog) { 211 ret = -EDQUOT; 212 goto out; 213 } 214 ctx->backlog--; 215 } 216 list_add_tail(&uevent->list, &ctx->file->event_list); 217 wake_up_interruptible(&ctx->file->poll_wait); 218 out:
··· 209 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 210 if (!ctx->backlog) { 211 ret = -EDQUOT; 212 + kfree(uevent); 213 goto out; 214 } 215 ctx->backlog--; 216 + } else if (!ctx->uid) { 217 + /* 218 + * We ignore events for new connections until userspace has set 219 + * their context. This can only happen if an error occurs on a 220 + * new connection before the user accepts it. This is okay, 221 + * since the accept will just fail later. 222 + */ 223 + kfree(uevent); 224 + goto out; 225 } 226 + 227 list_add_tail(&uevent->list, &ctx->file->event_list); 228 wake_up_interruptible(&ctx->file->poll_wait); 229 out:
+4 -4
drivers/infiniband/hw/ehca/ehca_hca.c
··· 50 ib_device); 51 struct hipz_query_hca *rblock; 52 53 - rblock = ehca_alloc_fw_ctrlblock(); 54 if (!rblock) { 55 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 56 return -ENOMEM; ··· 110 ib_device); 111 struct hipz_query_port *rblock; 112 113 - rblock = ehca_alloc_fw_ctrlblock(); 114 if (!rblock) { 115 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 116 return -ENOMEM; ··· 179 return -EINVAL; 180 } 181 182 - rblock = ehca_alloc_fw_ctrlblock(); 183 if (!rblock) { 184 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 185 return -ENOMEM; ··· 212 return -EINVAL; 213 } 214 215 - rblock = ehca_alloc_fw_ctrlblock(); 216 if (!rblock) { 217 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 218 return -ENOMEM;
··· 50 ib_device); 51 struct hipz_query_hca *rblock; 52 53 + rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 54 if (!rblock) { 55 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 56 return -ENOMEM; ··· 110 ib_device); 111 struct hipz_query_port *rblock; 112 113 + rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 114 if (!rblock) { 115 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 116 return -ENOMEM; ··· 179 return -EINVAL; 180 } 181 182 + rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 183 if (!rblock) { 184 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 185 return -ENOMEM; ··· 212 return -EINVAL; 213 } 214 215 + rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 216 if (!rblock) { 217 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 218 return -ENOMEM;
+1 -1
drivers/infiniband/hw/ehca/ehca_irq.c
··· 138 u64 *rblock; 139 unsigned long block_count; 140 141 - rblock = ehca_alloc_fw_ctrlblock(); 142 if (!rblock) { 143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); 144 ret = -ENOMEM;
··· 138 u64 *rblock; 139 unsigned long block_count; 140 141 + rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC); 142 if (!rblock) { 143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); 144 ret = -ENOMEM;
+2 -2
drivers/infiniband/hw/ehca/ehca_iverbs.h
··· 180 int ehca_munmap(unsigned long addr, size_t len); 181 182 #ifdef CONFIG_PPC_64K_PAGES 183 - void *ehca_alloc_fw_ctrlblock(void); 184 void ehca_free_fw_ctrlblock(void *ptr); 185 #else 186 - #define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL)) 187 #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) 188 #endif 189
··· 180 int ehca_munmap(unsigned long addr, size_t len); 181 182 #ifdef CONFIG_PPC_64K_PAGES 183 + void *ehca_alloc_fw_ctrlblock(gfp_t flags); 184 void ehca_free_fw_ctrlblock(void *ptr); 185 #else 186 + #define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags)) 187 #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) 188 #endif 189
+5 -5
drivers/infiniband/hw/ehca/ehca_main.c
··· 106 #ifdef CONFIG_PPC_64K_PAGES 107 static struct kmem_cache *ctblk_cache = NULL; 108 109 - void *ehca_alloc_fw_ctrlblock(void) 110 { 111 - void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL); 112 if (!ret) 113 ehca_gen_err("Out of memory for ctblk"); 114 return ret; ··· 206 u64 h_ret; 207 struct hipz_query_hca *rblock; 208 209 - rblock = ehca_alloc_fw_ctrlblock(); 210 if (!rblock) { 211 ehca_gen_err("Cannot allocate rblock memory."); 212 return -ENOMEM; ··· 258 int ret = 0; 259 struct hipz_query_hca *rblock; 260 261 - rblock = ehca_alloc_fw_ctrlblock(); 262 if (!rblock) { 263 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 264 return -ENOMEM; ··· 469 \ 470 shca = dev->driver_data; \ 471 \ 472 - rblock = ehca_alloc_fw_ctrlblock(); \ 473 if (!rblock) { \ 474 dev_err(dev, "Can't allocate rblock memory."); \ 475 return 0; \
··· 106 #ifdef CONFIG_PPC_64K_PAGES 107 static struct kmem_cache *ctblk_cache = NULL; 108 109 + void *ehca_alloc_fw_ctrlblock(gfp_t flags) 110 { 111 + void *ret = kmem_cache_zalloc(ctblk_cache, flags); 112 if (!ret) 113 ehca_gen_err("Out of memory for ctblk"); 114 return ret; ··· 206 u64 h_ret; 207 struct hipz_query_hca *rblock; 208 209 + rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 210 if (!rblock) { 211 ehca_gen_err("Cannot allocate rblock memory."); 212 return -ENOMEM; ··· 258 int ret = 0; 259 struct hipz_query_hca *rblock; 260 261 + rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 262 if (!rblock) { 263 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 264 return -ENOMEM; ··· 469 \ 470 shca = dev->driver_data; \ 471 \ 472 + rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \ 473 if (!rblock) { \ 474 dev_err(dev, "Can't allocate rblock memory."); \ 475 return 0; \
+2 -2
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 1013 u32 i; 1014 u64 *kpage; 1015 1016 - kpage = ehca_alloc_fw_ctrlblock(); 1017 if (!kpage) { 1018 ehca_err(&shca->ib_device, "kpage alloc failed"); 1019 ret = -ENOMEM; ··· 1124 ehca_mrmw_map_acl(acl, &hipz_acl); 1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1126 1127 - kpage = ehca_alloc_fw_ctrlblock(); 1128 if (!kpage) { 1129 ehca_err(&shca->ib_device, "kpage alloc failed"); 1130 ret = -ENOMEM;
··· 1013 u32 i; 1014 u64 *kpage; 1015 1016 + kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1017 if (!kpage) { 1018 ehca_err(&shca->ib_device, "kpage alloc failed"); 1019 ret = -ENOMEM; ··· 1124 ehca_mrmw_map_acl(acl, &hipz_acl); 1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1126 1127 + kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1128 if (!kpage) { 1129 ehca_err(&shca->ib_device, "kpage alloc failed"); 1130 ret = -ENOMEM;
+2 -2
drivers/infiniband/hw/ehca/ehca_qp.c
··· 807 unsigned long spl_flags = 0; 808 809 /* do query_qp to obtain current attr values */ 810 - mqpcb = ehca_alloc_fw_ctrlblock(); 811 if (!mqpcb) { 812 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 813 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); ··· 1273 return -EINVAL; 1274 } 1275 1276 - qpcb = ehca_alloc_fw_ctrlblock(); 1277 if (!qpcb) { 1278 ehca_err(qp->device,"Out of memory for qpcb " 1279 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
··· 807 unsigned long spl_flags = 0; 808 809 /* do query_qp to obtain current attr values */ 810 + mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 811 if (!mqpcb) { 812 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 813 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); ··· 1273 return -EINVAL; 1274 } 1275 1276 + qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1277 if (!qpcb) { 1278 ehca_err(qp->device,"Out of memory for qpcb " 1279 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
+6 -2
drivers/infiniband/hw/mthca/mthca_cq.c
··· 54 MTHCA_CQ_ENTRY_SIZE = 0x20 55 }; 56 57 /* 58 * Must be packed because start is 64 bits but only aligned to 32 bits. 59 */ ··· 603 break; 604 case MTHCA_OPCODE_ATOMIC_CS: 605 entry->opcode = IB_WC_COMP_SWAP; 606 - entry->byte_len = be32_to_cpu(cqe->byte_cnt); 607 break; 608 case MTHCA_OPCODE_ATOMIC_FA: 609 entry->opcode = IB_WC_FETCH_ADD; 610 - entry->byte_len = be32_to_cpu(cqe->byte_cnt); 611 break; 612 case MTHCA_OPCODE_BIND_MW: 613 entry->opcode = IB_WC_BIND_MW;
··· 54 MTHCA_CQ_ENTRY_SIZE = 0x20 55 }; 56 57 + enum { 58 + MTHCA_ATOMIC_BYTE_LEN = 8 59 + }; 60 + 61 /* 62 * Must be packed because start is 64 bits but only aligned to 32 bits. 63 */ ··· 599 break; 600 case MTHCA_OPCODE_ATOMIC_CS: 601 entry->opcode = IB_WC_COMP_SWAP; 602 + entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; 603 break; 604 case MTHCA_OPCODE_ATOMIC_FA: 605 entry->opcode = IB_WC_FETCH_ADD; 606 + entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; 607 break; 608 case MTHCA_OPCODE_BIND_MW: 609 entry->opcode = IB_WC_BIND_MW;
+1 -1
drivers/infiniband/hw/mthca/mthca_memfree.c
··· 232 233 list_for_each_entry(chunk, &icm->chunk_list, list) { 234 for (i = 0; i < chunk->npages; ++i) { 235 - if (chunk->mem[i].length >= offset) { 236 page = chunk->mem[i].page; 237 goto out; 238 }
··· 232 233 list_for_each_entry(chunk, &icm->chunk_list, list) { 234 for (i = 0; i < chunk->npages; ++i) { 235 + if (chunk->mem[i].length > offset) { 236 page = chunk->mem[i].page; 237 goto out; 238 }
+17 -9
drivers/infiniband/hw/mthca/mthca_qp.c
··· 429 { 430 struct mthca_dev *dev = to_mdev(ibqp->device); 431 struct mthca_qp *qp = to_mqp(ibqp); 432 - int err; 433 - struct mthca_mailbox *mailbox; 434 struct mthca_qp_param *qp_param; 435 struct mthca_qp_context *context; 436 int mthca_state; 437 u8 status; 438 439 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 440 if (IS_ERR(mailbox)) ··· 459 mthca_state = be32_to_cpu(context->flags) >> 28; 460 461 qp_attr->qp_state = to_ib_qp_state(mthca_state); 462 - qp_attr->cur_qp_state = qp_attr->qp_state; 463 qp_attr->path_mtu = context->mtu_msgmax >> 5; 464 qp_attr->path_mig_state = 465 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); ··· 468 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; 469 qp_attr->qp_access_flags = 470 to_ib_qp_access_flags(be32_to_cpu(context->params2)); 471 - qp_attr->cap.max_send_wr = qp->sq.max; 472 - qp_attr->cap.max_recv_wr = qp->rq.max; 473 - qp_attr->cap.max_send_sge = qp->sq.max_gs; 474 - qp_attr->cap.max_recv_sge = qp->rq.max_gs; 475 - qp_attr->cap.max_inline_data = qp->max_inline_data; 476 477 if (qp->transport == RC || qp->transport == UC) { 478 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); ··· 494 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 495 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; 496 qp_attr->alt_timeout = context->alt_path.ackto >> 3; 497 - qp_init_attr->cap = qp_attr->cap; 498 499 out: 500 mthca_free_mailbox(dev, mailbox);
··· 429 { 430 struct mthca_dev *dev = to_mdev(ibqp->device); 431 struct mthca_qp *qp = to_mqp(ibqp); 432 + int err = 0; 433 + struct mthca_mailbox *mailbox = NULL; 434 struct mthca_qp_param *qp_param; 435 struct mthca_qp_context *context; 436 int mthca_state; 437 u8 status; 438 + 439 + if (qp->state == IB_QPS_RESET) { 440 + qp_attr->qp_state = IB_QPS_RESET; 441 + goto done; 442 + } 443 444 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 445 if (IS_ERR(mailbox)) ··· 454 mthca_state = be32_to_cpu(context->flags) >> 28; 455 456 qp_attr->qp_state = to_ib_qp_state(mthca_state); 457 qp_attr->path_mtu = context->mtu_msgmax >> 5; 458 qp_attr->path_mig_state = 459 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); ··· 464 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; 465 qp_attr->qp_access_flags = 466 to_ib_qp_access_flags(be32_to_cpu(context->params2)); 467 468 if (qp->transport == RC || qp->transport == UC) { 469 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); ··· 495 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 496 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; 497 qp_attr->alt_timeout = context->alt_path.ackto >> 3; 498 + 499 + done: 500 + qp_attr->cur_qp_state = qp_attr->qp_state; 501 + qp_attr->cap.max_send_wr = qp->sq.max; 502 + qp_attr->cap.max_recv_wr = qp->rq.max; 503 + qp_attr->cap.max_send_sge = qp->sq.max_gs; 504 + qp_attr->cap.max_recv_sge = qp->rq.max_gs; 505 + qp_attr->cap.max_inline_data = qp->max_inline_data; 506 + 507 + qp_init_attr->cap = qp_attr->cap; 508 509 out: 510 mthca_free_mailbox(dev, mailbox);
+2 -2
drivers/infiniband/ulp/iser/iscsi_iser.c
··· 177 * - if yes, the mtask is recycled at iscsi_complete_pdu 178 * - if no, the mtask is recycled at iser_snd_completion 179 */ 180 - if (error && error != -EAGAIN) 181 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 182 183 return error; ··· 241 error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); 242 243 iscsi_iser_ctask_xmit_exit: 244 - if (error && error != -EAGAIN) 245 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 246 return error; 247 }
··· 177 * - if yes, the mtask is recycled at iscsi_complete_pdu 178 * - if no, the mtask is recycled at iser_snd_completion 179 */ 180 + if (error && error != -ENOBUFS) 181 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 182 183 return error; ··· 241 error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); 242 243 iscsi_iser_ctask_xmit_exit: 244 + if (error && error != -ENOBUFS) 245 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 246 return error; 247 }
+12 -14
drivers/infiniband/ulp/iser/iser_initiator.c
··· 304 static int 305 iser_check_xmit(struct iscsi_conn *conn, void *task) 306 { 307 - int rc = 0; 308 struct iscsi_iser_conn *iser_conn = conn->dd_data; 309 310 - write_lock_bh(conn->recv_lock); 311 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == 312 ISER_QP_MAX_REQ_DTOS) { 313 - iser_dbg("%ld can't xmit task %p, suspending tx\n",jiffies,task); 314 - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 315 - rc = -EAGAIN; 316 } 317 - write_unlock_bh(conn->recv_lock); 318 - return rc; 319 } 320 321 ··· 336 return -EPERM; 337 } 338 if (iser_check_xmit(conn, ctask)) 339 - return -EAGAIN; 340 341 edtl = ntohl(hdr->data_length); 342 ··· 422 } 423 424 if (iser_check_xmit(conn, ctask)) 425 - return -EAGAIN; 426 427 itt = ntohl(hdr->itt); 428 data_seg_len = ntoh24(hdr->dlength); ··· 494 } 495 496 if (iser_check_xmit(conn,mtask)) 497 - return -EAGAIN; 498 499 /* build the tx desc regd header and add it to the tx desc dto */ 500 mdesc->type = ISCSI_TX_CONTROL; ··· 601 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; 602 struct iscsi_conn *conn = iser_conn->iscsi_conn; 603 struct iscsi_mgmt_task *mtask; 604 605 iser_dbg("Initiator, Data sent dto=0x%p\n", dto); 606 ··· 610 if (tx_desc->type == ISCSI_TX_DATAOUT) 611 kmem_cache_free(ig.desc_cache, tx_desc); 612 613 atomic_dec(&ib_conn->post_send_buf_count); 614 615 - write_lock(conn->recv_lock); 616 - if (conn->suspend_tx) { 617 iser_dbg("%ld resuming tx\n",jiffies); 618 - clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 619 scsi_queue_work(conn->session->host, &conn->xmitwork); 620 } 621 - write_unlock(conn->recv_lock); 622 623 if (tx_desc->type == ISCSI_TX_CONTROL) { 624 /* this arithmetic is legal by libiscsi dd_data allocation */
··· 304 static int 305 iser_check_xmit(struct iscsi_conn *conn, void *task) 306 { 307 struct iscsi_iser_conn *iser_conn = conn->dd_data; 308 309 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == 310 ISER_QP_MAX_REQ_DTOS) { 311 + iser_dbg("%ld can't xmit task %p\n",jiffies,task); 312 + return -ENOBUFS; 313 } 314 + return 0; 315 } 316 317 ··· 340 return -EPERM; 341 } 342 if (iser_check_xmit(conn, ctask)) 343 + return -ENOBUFS; 344 345 edtl = ntohl(hdr->data_length); 346 ··· 426 } 427 428 if (iser_check_xmit(conn, ctask)) 429 + return -ENOBUFS; 430 431 itt = ntohl(hdr->itt); 432 data_seg_len = ntoh24(hdr->dlength); ··· 498 } 499 500 if (iser_check_xmit(conn,mtask)) 501 + return -ENOBUFS; 502 503 /* build the tx desc regd header and add it to the tx desc dto */ 504 mdesc->type = ISCSI_TX_CONTROL; ··· 605 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; 606 struct iscsi_conn *conn = iser_conn->iscsi_conn; 607 struct iscsi_mgmt_task *mtask; 608 + int resume_tx = 0; 609 610 iser_dbg("Initiator, Data sent dto=0x%p\n", dto); 611 ··· 613 if (tx_desc->type == ISCSI_TX_DATAOUT) 614 kmem_cache_free(ig.desc_cache, tx_desc); 615 616 + if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == 617 + ISER_QP_MAX_REQ_DTOS) 618 + resume_tx = 1; 619 + 620 atomic_dec(&ib_conn->post_send_buf_count); 621 622 + if (resume_tx) { 623 iser_dbg("%ld resuming tx\n",jiffies); 624 scsi_queue_work(conn->session->host, &conn->xmitwork); 625 } 626 627 if (tx_desc->type == ISCSI_TX_CONTROL) { 628 /* this arithmetic is legal by libiscsi dd_data allocation */