Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IB/ehca: SRQ fixes to enable IPoIB CM
IB/ehca: Fix Small QP regressions

+46 -26
+7 -3
drivers/infiniband/hw/ehca/ehca_hca.c
··· 93 93 props->max_pd = min_t(int, rblock->max_pd, INT_MAX); 94 94 props->max_ah = min_t(int, rblock->max_ah, INT_MAX); 95 95 props->max_fmr = min_t(int, rblock->max_mr, INT_MAX); 96 - props->max_srq = 0; 97 - props->max_srq_wr = 0; 98 - props->max_srq_sge = 0; 96 + 97 + if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { 98 + props->max_srq = props->max_qp; 99 + props->max_srq_wr = props->max_qp_wr; 100 + props->max_srq_sge = 3; 101 + } 102 + 99 103 props->max_pkeys = 16; 100 104 props->local_ca_ack_delay 101 105 = rblock->local_ca_ack_delay;
+32 -18
drivers/infiniband/hw/ehca/ehca_irq.c
··· 175 175 176 176 } 177 177 178 + static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp, 179 + enum ib_event_type event_type) 180 + { 181 + struct ib_event event; 182 + 183 + event.device = &shca->ib_device; 184 + event.event = event_type; 185 + 186 + if (qp->ext_type == EQPT_SRQ) { 187 + if (!qp->ib_srq.event_handler) 188 + return; 189 + 190 + event.element.srq = &qp->ib_srq; 191 + qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context); 192 + } else { 193 + if (!qp->ib_qp.event_handler) 194 + return; 195 + 196 + event.element.qp = &qp->ib_qp; 197 + qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); 198 + } 199 + } 200 + 178 201 static void qp_event_callback(struct ehca_shca *shca, u64 eqe, 179 202 enum ib_event_type event_type, int fatal) 180 203 { 181 - struct ib_event event; 182 204 struct ehca_qp *qp; 183 205 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); 184 206 ··· 208 186 qp = idr_find(&ehca_qp_idr, token); 209 187 read_unlock(&ehca_qp_idr_lock); 210 188 211 - 212 189 if (!qp) 213 190 return; 214 191 215 192 if (fatal) 216 193 ehca_error_data(shca, qp, qp->ipz_qp_handle.handle); 217 194 218 - event.device = &shca->ib_device; 195 + dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ? 196 + IB_EVENT_SRQ_ERR : event_type); 219 197 220 - if (qp->ext_type == EQPT_SRQ) { 221 - if (!qp->ib_srq.event_handler) 222 - return; 223 - 224 - event.event = fatal ? IB_EVENT_SRQ_ERR : event_type; 225 - event.element.srq = &qp->ib_srq; 226 - qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context); 227 - } else { 228 - if (!qp->ib_qp.event_handler) 229 - return; 230 - 231 - event.event = event_type; 232 - event.element.qp = &qp->ib_qp; 233 - qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); 234 - } 198 + /* 199 + * eHCA only processes one WQE at a time for SRQ base QPs, 200 + * so the last WQE has been processed as soon as the QP enters 201 + * error state. 202 + */ 203 + if (fatal && qp->ext_type == EQPT_SRQBASE) 204 + dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED); 235 205 236 206 return; 237 207 }
+6 -4
drivers/infiniband/hw/ehca/ehca_qp.c
··· 600 600 601 601 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap) 602 602 && !(context && udata)) { /* no small QP support in userspace ATM */ 603 - ehca_determine_small_queue( 604 - &parms.squeue, max_send_sge, is_llqp); 605 - ehca_determine_small_queue( 606 - &parms.rqueue, max_recv_sge, is_llqp); 603 + if (HAS_SQ(my_qp)) 604 + ehca_determine_small_queue( 605 + &parms.squeue, max_send_sge, is_llqp); 606 + if (HAS_RQ(my_qp)) 607 + ehca_determine_small_queue( 608 + &parms.rqueue, max_recv_sge, is_llqp); 607 609 parms.qp_storage = 608 610 (parms.squeue.is_small || parms.rqueue.is_small); 609 611 }
+1 -1
drivers/infiniband/hw/ehca/ipz_pt_fn.c
··· 172 172 unsigned long bit; 173 173 int free_page = 0; 174 174 175 - bit = ((unsigned long)queue->queue_pages[0] & PAGE_MASK) 175 + bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK) 176 176 >> (order + 9); 177 177 178 178 mutex_lock(&pd->lock);