IB/ehca: Add PMA support

This patch enables ehca to redirect any PMA queries to the
actual PMA QP.

Signed-off-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
Reviewed-by: Joachim Fenkes <fenkes@de.ibm.com>
Reviewed-by: Christoph Raisch <raisch@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Hoang-Nam Nguyen and committed by
Roland Dreier
2b5e6b12 528b03f7

+98 -1
+1
drivers/infiniband/hw/ehca/ehca_classes.h
··· 101 spinlock_t mod_sqp_lock; 102 enum ib_port_state port_state; 103 struct ehca_sma_attr saved_attr; 104 }; 105 106 #define HCA_CAP_MR_PGSIZE_4K 0x80000000
··· 101 spinlock_t mod_sqp_lock; 102 enum ib_port_state port_state; 103 struct ehca_sma_attr saved_attr; 104 + u32 pma_qp_nr; 105 }; 106 107 #define HCA_CAP_MR_PGSIZE_4K 0x80000000
+5
drivers/infiniband/hw/ehca/ehca_iverbs.h
··· 187 188 int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 189 190 void ehca_poll_eqs(unsigned long data); 191 192 int ehca_calc_ipd(struct ehca_shca *shca, int port,
··· 187 188 int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 189 190 + int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 191 + struct ib_wc *in_wc, struct ib_grh *in_grh, 192 + struct ib_mad *in_mad, 193 + struct ib_mad *out_mad); 194 + 195 void ehca_poll_eqs(unsigned long data); 196 197 int ehca_calc_ipd(struct ehca_shca *shca, int port,
+1 -1
drivers/infiniband/hw/ehca/ehca_main.c
··· 472 shca->ib_device.dealloc_fmr = ehca_dealloc_fmr; 473 shca->ib_device.attach_mcast = ehca_attach_mcast; 474 shca->ib_device.detach_mcast = ehca_detach_mcast; 475 - /* shca->ib_device.process_mad = ehca_process_mad; */ 476 shca->ib_device.mmap = ehca_mmap; 477 478 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
··· 472 shca->ib_device.dealloc_fmr = ehca_dealloc_fmr; 473 shca->ib_device.attach_mcast = ehca_attach_mcast; 474 shca->ib_device.detach_mcast = ehca_detach_mcast; 475 + shca->ib_device.process_mad = ehca_process_mad; 476 shca->ib_device.mmap = ehca_mmap; 477 478 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
+91
drivers/infiniband/hw/ehca/ehca_sqp.c
··· 39 * POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42 43 #include "ehca_classes.h" 44 #include "ehca_tools.h" 45 #include "ehca_iverbs.h" 46 #include "hcp_if.h" 47 48 49 /** 50 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue ··· 89 port, ret); 90 return ret; 91 } 92 break; 93 default: 94 ehca_err(&shca->ib_device, "invalid qp_type=%x", ··· 117 } 118 119 return H_SUCCESS; 120 }
··· 39 * POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42 + #include <rdma/ib_mad.h> 43 44 #include "ehca_classes.h" 45 #include "ehca_tools.h" 46 #include "ehca_iverbs.h" 47 #include "hcp_if.h" 48 49 + #define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002) 50 + #define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004) 51 + #define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008) 52 + 53 + #define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) 54 55 /** 56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue ··· 83 port, ret); 84 return ret; 85 } 86 + shca->sport[port - 1].pma_qp_nr = pma_qp_nr; 87 + ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x", 88 + port, pma_qp_nr); 89 break; 90 default: 91 ehca_err(&shca->ib_device, "invalid qp_type=%x", ··· 108 } 109 110 return H_SUCCESS; 111 + } 112 + 113 + struct ib_perf { 114 + struct ib_mad_hdr mad_hdr; 115 + u8 reserved[40]; 116 + u8 data[192]; 117 + } __attribute__ ((packed)); 118 + 119 + 120 + static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, 121 + struct ib_mad *in_mad, struct ib_mad *out_mad) 122 + { 123 + struct ib_perf *in_perf = (struct ib_perf *)in_mad; 124 + struct ib_perf *out_perf = (struct ib_perf *)out_mad; 125 + struct ib_class_port_info *poi = 126 + (struct ib_class_port_info *)out_perf->data; 127 + struct ehca_shca *shca = 128 + container_of(ibdev, struct ehca_shca, ib_device); 129 + struct ehca_sport *sport = &shca->sport[port_num - 1]; 130 + 131 + ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method); 132 + 133 + *out_mad = *in_mad; 134 + 135 + if (in_perf->mad_hdr.class_version != 1) { 136 + ehca_warn(ibdev, "Unsupported class_version=%x", 137 + in_perf->mad_hdr.class_version); 138 + out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION; 139 + goto perf_reply; 140 + } 141 + 142 + switch (in_perf->mad_hdr.method) { 143 + case IB_MGMT_METHOD_GET: 144 + case IB_MGMT_METHOD_SET: 145 + /* set class port info for redirection */ 146 + out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO; 147 + out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT; 148 + memset(poi, 0, sizeof(*poi)); 149 + poi->base_version = 1; 150 + poi->class_version = 1; 151 + poi->resp_time_value = 18; 152 + poi->redirect_lid = sport->saved_attr.lid; 153 + poi->redirect_qp = sport->pma_qp_nr; 154 + poi->redirect_qkey = IB_QP1_QKEY; 155 + poi->redirect_pkey = IB_DEFAULT_PKEY_FULL; 156 + 157 + ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x", 158 + sport->saved_attr.lid, sport->pma_qp_nr); 159 + break; 160 + 161 + case IB_MGMT_METHOD_GET_RESP: 162 + return IB_MAD_RESULT_FAILURE; 163 + 164 + default: 165 + out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD; 166 + break; 167 + } 168 + 169 + perf_reply: 170 + out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP; 171 + 172 + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 173 + } 174 + 175 + int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 176 + struct ib_wc *in_wc, struct ib_grh *in_grh, 177 + struct ib_mad *in_mad, 178 + struct ib_mad *out_mad) 179 + { 180 + int ret; 181 + 182 + if (!port_num || port_num > ibdev->phys_port_cnt) 183 + return IB_MAD_RESULT_FAILURE; 184 + 185 + /* accept only pma request */ 186 + if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) 187 + return IB_MAD_RESULT_SUCCESS; 188 + 189 + ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp); 190 + ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad); 191 + 192 + return ret; 193 }