Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA: Change MAD processing function to remove extra casting and parameter

All users of process_mad() converts input pointers from ib_mad_hdr to be
ib_mad, update the function declaration to use ib_mad directly.

Also remove not used input MAD size parameter.

Link: https://lore.kernel.org/r/20191029062745.7932-17-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Tested-By: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>

authored by

Leon Romanovsky and committed by
Jason Gunthorpe
e26e7b88 333ee7e2

+104 -154
+6 -6
drivers/infiniband/core/mad.c
··· 913 913 914 914 /* No GRH for DR SMP */ 915 915 ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL, 916 - (const struct ib_mad_hdr *)smp, mad_size, 917 - (struct ib_mad_hdr *)mad_priv->mad, 918 - &mad_size, &out_mad_pkey_index); 916 + (const struct ib_mad *)smp, 917 + (struct ib_mad *)mad_priv->mad, &mad_size, 918 + &out_mad_pkey_index); 919 919 switch (ret) 920 920 { 921 921 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: ··· 2321 2321 if (port_priv->device->ops.process_mad) { 2322 2322 ret = port_priv->device->ops.process_mad( 2323 2323 port_priv->device, 0, port_priv->port_num, wc, 2324 - &recv->grh, (const struct ib_mad_hdr *)recv->mad, 2325 - recv->mad_size, (struct ib_mad_hdr *)response->mad, 2326 - &mad_size, &resp_mad_pkey_index); 2324 + &recv->grh, (const struct ib_mad *)recv->mad, 2325 + (struct ib_mad *)response->mad, &mad_size, 2326 + &resp_mad_pkey_index); 2327 2327 2328 2328 if (opa) 2329 2329 wc->pkey_index = resp_mad_pkey_index;
+2 -4
drivers/infiniband/core/sysfs.c
··· 497 497 if (attr != IB_PMA_CLASS_PORT_INFO) 498 498 in_mad->data[41] = port_num; /* PortSelect field */ 499 499 500 - if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY, 501 - port_num, NULL, NULL, 502 - (const struct ib_mad_hdr *)in_mad, mad_size, 503 - (struct ib_mad_hdr *)out_mad, &mad_size, 500 + if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY, port_num, NULL, NULL, 501 + in_mad, out_mad, &mad_size, 504 502 &out_mad_pkey_index) & 505 503 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) != 506 504 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
+5 -8
drivers/infiniband/hw/hfi1/mad.c
··· 4915 4915 */ 4916 4916 int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, 4917 4917 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 4918 - const struct ib_mad_hdr *in_mad, size_t in_mad_size, 4919 - struct ib_mad_hdr *out_mad, size_t *out_mad_size, 4920 - u16 *out_mad_pkey_index) 4918 + const struct ib_mad *in_mad, struct ib_mad *out_mad, 4919 + size_t *out_mad_size, u16 *out_mad_pkey_index) 4921 4920 { 4922 - switch (in_mad->base_version) { 4921 + switch (in_mad->mad_hdr.base_version) { 4923 4922 case OPA_MGMT_BASE_VERSION: 4924 4923 return hfi1_process_opa_mad(ibdev, mad_flags, port, 4925 4924 in_wc, in_grh, ··· 4927 4928 out_mad_size, 4928 4929 out_mad_pkey_index); 4929 4930 case IB_MGMT_BASE_VERSION: 4930 - return hfi1_process_ib_mad(ibdev, mad_flags, port, 4931 - in_wc, in_grh, 4932 - (const struct ib_mad *)in_mad, 4933 - (struct ib_mad *)out_mad); 4931 + return hfi1_process_ib_mad(ibdev, mad_flags, port, in_wc, 4932 + in_grh, in_mad, out_mad); 4934 4933 default: 4935 4934 break; 4936 4935 }
+2 -3
drivers/infiniband/hw/hfi1/verbs.h
··· 330 330 void hfi1_node_desc_chg(struct hfi1_ibport *ibp); 331 331 int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, 332 332 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 333 - const struct ib_mad_hdr *in_mad, size_t in_mad_size, 334 - struct ib_mad_hdr *out_mad, size_t *out_mad_size, 335 - u16 *out_mad_pkey_index); 333 + const struct ib_mad *in_mad, struct ib_mad *out_mad, 334 + size_t *out_mad_size, u16 *out_mad_pkey_index); 336 335 337 336 /* 338 337 * The PSN_MASK and PSN_SHIFT allow for
+11 -14
drivers/infiniband/hw/mlx4/mad.c
··· 983 983 984 984 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 985 985 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 986 - const struct ib_mad_hdr *in, size_t in_mad_size, 987 - struct ib_mad_hdr *out, size_t *out_mad_size, 988 - u16 *out_mad_pkey_index) 986 + const struct ib_mad *in, struct ib_mad *out, 987 + size_t *out_mad_size, u16 *out_mad_pkey_index) 989 988 { 990 989 struct mlx4_ib_dev *dev = to_mdev(ibdev); 991 - const struct ib_mad *in_mad = (const struct ib_mad *)in; 992 - struct ib_mad *out_mad = (struct ib_mad *)out; 993 990 enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num); 994 991 995 992 /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA ··· 994 997 */ 995 998 if (link == IB_LINK_LAYER_INFINIBAND) { 996 999 if (mlx4_is_slave(dev->dev) && 997 - (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && 998 - (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS || 999 - in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT || 1000 - in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO))) 1001 - return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, 1002 - in_grh, in_mad, out_mad); 1000 + (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && 1001 + (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS || 1002 + in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT || 1003 + in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO))) 1004 + return iboe_process_mad(ibdev, mad_flags, port_num, 1005 + in_wc, in_grh, in, out); 1003 1006 1004 - return ib_process_mad(ibdev, mad_flags, port_num, in_wc, 1005 - in_grh, in_mad, out_mad); 1007 + return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, 1008 + in, out); 1006 1009 } 1007 1010 1008 1011 if (link == IB_LINK_LAYER_ETHERNET) 1009 1012 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, 1010 - in_grh, in_mad, out_mad); 1013 + in_grh, in, out); 1011 1014 1012 1015 return -EINVAL; 1013 1016 }
+3 -4
drivers/infiniband/hw/mlx4/mlx4_ib.h
··· 786 786 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags, 787 787 int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, 788 788 const void *in_mad, void *response_mad); 789 - int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 789 + int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 790 790 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 791 - const struct ib_mad_hdr *in, size_t in_mad_size, 792 - struct ib_mad_hdr *out, size_t *out_mad_size, 793 - u16 *out_mad_pkey_index); 791 + const struct ib_mad *in, struct ib_mad *out, 792 + size_t *out_mad_size, u16 *out_mad_pkey_index); 794 793 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev); 795 794 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev); 796 795
+10 -14
drivers/infiniband/hw/mlx5/mad.c
··· 219 219 220 220 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 221 221 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 222 - const struct ib_mad_hdr *in, size_t in_mad_size, 223 - struct ib_mad_hdr *out, size_t *out_mad_size, 224 - u16 *out_mad_pkey_index) 222 + const struct ib_mad *in, struct ib_mad *out, 223 + size_t *out_mad_size, u16 *out_mad_pkey_index) 225 224 { 226 225 struct mlx5_ib_dev *dev = to_mdev(ibdev); 227 - const struct ib_mad *in_mad = (const struct ib_mad *)in; 228 - struct ib_mad *out_mad = (struct ib_mad *)out; 229 - u8 mgmt_class = in_mad->mad_hdr.mgmt_class; 230 - u8 method = in_mad->mad_hdr.method; 226 + u8 mgmt_class = in->mad_hdr.mgmt_class; 227 + u8 method = in->mad_hdr.method; 231 228 u16 slid; 232 229 int err; 233 230 ··· 244 247 245 248 /* Don't process SMInfo queries -- the SMA can't handle them. 246 249 */ 247 - if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) 250 + if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) 248 251 return IB_MAD_RESULT_SUCCESS; 249 252 } break; 250 253 case IB_MGMT_CLASS_PERF_MGMT: 251 254 if (MLX5_CAP_GEN(dev->mdev, vport_counters) && 252 255 method == IB_MGMT_METHOD_GET) 253 - return process_pma_cmd(dev, port_num, in_mad, out_mad); 256 + return process_pma_cmd(dev, port_num, in, out); 254 257 /* fallthrough */ 255 258 case MLX5_IB_VENDOR_CLASS1: 256 259 /* fallthrough */ ··· 264 267 return IB_MAD_RESULT_SUCCESS; 265 268 } 266 269 267 - err = mlx5_MAD_IFC(to_mdev(ibdev), 268 - mad_flags & IB_MAD_IGNORE_MKEY, 269 - mad_flags & IB_MAD_IGNORE_BKEY, 270 - port_num, in_wc, in_grh, in_mad, out_mad); 270 + err = mlx5_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY, 271 + mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc, 272 + in_grh, in, out); 271 273 if (err) 272 274 return IB_MAD_RESULT_FAILURE; 273 275 274 276 /* set return bit in status of directed route responses */ 275 277 if (mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 276 - out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); 278 + out->mad_hdr.status |= cpu_to_be16(1 << 15); 277 279 278 280 if (method == IB_MGMT_METHOD_TRAP_REPRESS) 279 281 /* no response for trap repress */
+2 -3
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 1192 1192 unsigned int *meta_sg_offset); 1193 1193 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 1194 1194 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 1195 - const struct ib_mad_hdr *in, size_t in_mad_size, 1196 - struct ib_mad_hdr *out, size_t *out_mad_size, 1197 - u16 *out_mad_pkey_index); 1195 + const struct ib_mad *in, struct ib_mad *out, 1196 + size_t *out_mad_size, u16 *out_mad_pkey_index); 1198 1197 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, 1199 1198 struct ib_udata *udata); 1200 1199 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
+4 -8
drivers/infiniband/hw/mthca/mthca_dev.h
··· 576 576 int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 577 577 int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 578 578 579 - int mthca_process_mad(struct ib_device *ibdev, 580 - int mad_flags, 581 - u8 port_num, 582 - const struct ib_wc *in_wc, 583 - const struct ib_grh *in_grh, 584 - const struct ib_mad_hdr *in, size_t in_mad_size, 585 - struct ib_mad_hdr *out, size_t *out_mad_size, 586 - u16 *out_mad_pkey_index); 579 + int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 580 + const struct ib_wc *in_wc, const struct ib_grh *in_grh, 581 + const struct ib_mad *in, struct ib_mad *out, 582 + size_t *out_mad_size, u16 *out_mad_pkey_index); 587 583 int mthca_create_agents(struct mthca_dev *dev); 588 584 void mthca_free_agents(struct mthca_dev *dev); 589 585
+31 -39
drivers/infiniband/hw/mthca/mthca_mad.c
··· 196 196 } 197 197 } 198 198 199 - int mthca_process_mad(struct ib_device *ibdev, 200 - int mad_flags, 201 - u8 port_num, 202 - const struct ib_wc *in_wc, 203 - const struct ib_grh *in_grh, 204 - const struct ib_mad_hdr *in, size_t in_mad_size, 205 - struct ib_mad_hdr *out, size_t *out_mad_size, 206 - u16 *out_mad_pkey_index) 199 + int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 200 + const struct ib_wc *in_wc, const struct ib_grh *in_grh, 201 + const struct ib_mad *in, struct ib_mad *out, 202 + size_t *out_mad_size, u16 *out_mad_pkey_index) 207 203 { 208 204 int err; 209 205 u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE); 210 206 u16 prev_lid = 0; 211 207 struct ib_port_attr pattr; 212 - const struct ib_mad *in_mad = (const struct ib_mad *)in; 213 - struct ib_mad *out_mad = (struct ib_mad *)out; 214 208 215 209 /* Forward locally generated traps to the SM */ 216 - if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && 217 - slid == 0) { 218 - forward_trap(to_mdev(ibdev), port_num, in_mad); 210 + if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP && !slid) { 211 + forward_trap(to_mdev(ibdev), port_num, in); 219 212 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 220 213 } 221 214 ··· 218 225 * Only handle PMA and Mellanox vendor-specific class gets and 219 226 * sets for other classes. 220 227 */ 221 - if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 222 - in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 223 - if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 224 - in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && 225 - in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) 228 + if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 229 + in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 230 + if (in->mad_hdr.method != IB_MGMT_METHOD_GET && 231 + in->mad_hdr.method != IB_MGMT_METHOD_SET && 232 + in->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) 226 233 return IB_MAD_RESULT_SUCCESS; 227 234 228 235 /* 229 236 * Don't process SMInfo queries or vendor-specific 230 237 * MADs -- the SMA can't handle them. 231 238 */ 232 - if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || 233 - ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == 239 + if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || 240 + ((in->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == 234 241 IB_SMP_ATTR_VENDOR_MASK)) 235 242 return IB_MAD_RESULT_SUCCESS; 236 - } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || 237 - in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 || 238 - in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) { 239 - if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 240 - in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) 243 + } else if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || 244 + in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 || 245 + in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) { 246 + if (in->mad_hdr.method != IB_MGMT_METHOD_GET && 247 + in->mad_hdr.method != IB_MGMT_METHOD_SET) 241 248 return IB_MAD_RESULT_SUCCESS; 242 249 } else 243 250 return IB_MAD_RESULT_SUCCESS; 244 - if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 245 - in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 246 - in_mad->mad_hdr.method == IB_MGMT_METHOD_SET && 247 - in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && 251 + if ((in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 252 + in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 253 + in->mad_hdr.method == IB_MGMT_METHOD_SET && 254 + in->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && 248 255 !ib_query_port(ibdev, port_num, &pattr)) 249 256 prev_lid = ib_lid_cpu16(pattr.lid); 250 257 251 - err = mthca_MAD_IFC(to_mdev(ibdev), 252 - mad_flags & IB_MAD_IGNORE_MKEY, 253 - mad_flags & IB_MAD_IGNORE_BKEY, 254 - port_num, in_wc, in_grh, in_mad, out_mad); 258 + err = mthca_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY, 259 + mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc, 260 + in_grh, in, out); 255 261 if (err == -EBADMSG) 256 262 return IB_MAD_RESULT_SUCCESS; 257 263 else if (err) { ··· 258 266 return IB_MAD_RESULT_FAILURE; 259 267 } 260 268 261 - if (!out_mad->mad_hdr.status) { 262 - smp_snoop(ibdev, port_num, in_mad, prev_lid); 263 - node_desc_override(ibdev, out_mad); 269 + if (!out->mad_hdr.status) { 270 + smp_snoop(ibdev, port_num, in, prev_lid); 271 + node_desc_override(ibdev, out); 264 272 } 265 273 266 274 /* set return bit in status of directed route responses */ 267 - if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 268 - out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); 275 + if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 276 + out->mad_hdr.status |= cpu_to_be16(1 << 15); 269 277 270 - if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) 278 + if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) 271 279 /* no response for trap repress */ 272 280 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 273 281
+6 -11
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
··· 247 247 return 0; 248 248 } 249 249 250 - int ocrdma_process_mad(struct ib_device *ibdev, 251 - int process_mad_flags, 252 - u8 port_num, 253 - const struct ib_wc *in_wc, 254 - const struct ib_grh *in_grh, 255 - const struct ib_mad_hdr *in, size_t in_mad_size, 256 - struct ib_mad_hdr *out, size_t *out_mad_size, 250 + int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags, 251 + u8 port_num, const struct ib_wc *in_wc, 252 + const struct ib_grh *in_grh, const struct ib_mad *in, 253 + struct ib_mad *out, size_t *out_mad_size, 257 254 u16 *out_mad_pkey_index) 258 255 { 259 256 int status = IB_MAD_RESULT_SUCCESS; 260 257 struct ocrdma_dev *dev; 261 - const struct ib_mad *in_mad = (const struct ib_mad *)in; 262 - struct ib_mad *out_mad = (struct ib_mad *)out; 263 258 264 - if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { 259 + if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { 265 260 dev = get_ocrdma_dev(ibdev); 266 - ocrdma_pma_counters(dev, out_mad); 261 + ocrdma_pma_counters(dev, out); 267 262 status |= IB_MAD_RESULT_REPLY; 268 263 } 269 264
+4 -7
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
··· 56 56 void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags); 57 57 int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 58 58 59 - int ocrdma_process_mad(struct ib_device *, 60 - int process_mad_flags, 61 - u8 port_num, 62 - const struct ib_wc *in_wc, 63 - const struct ib_grh *in_grh, 64 - const struct ib_mad_hdr *in, size_t in_mad_size, 65 - struct ib_mad_hdr *out, size_t *out_mad_size, 59 + int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags, 60 + u8 port_num, const struct ib_wc *in_wc, 61 + const struct ib_grh *in_grh, const struct ib_mad *in, 62 + struct ib_mad *out, size_t *out_mad_size, 66 63 u16 *out_mad_pkey_index); 67 64 #endif /* __OCRDMA_AH_H__ */
+4 -13
drivers/infiniband/hw/qedr/verbs.c
··· 4346 4346 } 4347 4347 4348 4348 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags, 4349 - u8 port_num, 4350 - const struct ib_wc *in_wc, 4351 - const struct ib_grh *in_grh, 4352 - const struct ib_mad_hdr *mad_hdr, 4353 - size_t in_mad_size, struct ib_mad_hdr *out_mad, 4354 - size_t *out_mad_size, u16 *out_mad_pkey_index) 4349 + u8 port_num, const struct ib_wc *in_wc, 4350 + const struct ib_grh *in_grh, const struct ib_mad *in, 4351 + struct ib_mad *out_mad, size_t *out_mad_size, 4352 + u16 *out_mad_pkey_index) 4355 4353 { 4356 - struct qedr_dev *dev = get_qedr_dev(ibdev); 4357 - 4358 - DP_DEBUG(dev, QEDR_MSG_GSI, 4359 - "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n", 4360 - mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod, 4361 - mad_hdr->class_specific, mad_hdr->class_version, 4362 - mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status); 4363 4354 return IB_MAD_RESULT_SUCCESS; 4364 4355 }
+3 -4
drivers/infiniband/hw/qedr/verbs.h
··· 92 92 const struct ib_recv_wr **bad_wr); 93 93 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags, 94 94 u8 port_num, const struct ib_wc *in_wc, 95 - const struct ib_grh *in_grh, 96 - const struct ib_mad_hdr *in_mad, 97 - size_t in_mad_size, struct ib_mad_hdr *out_mad, 98 - size_t *out_mad_size, u16 *out_mad_pkey_index); 95 + const struct ib_grh *in_grh, const struct ib_mad *in_mad, 96 + struct ib_mad *out_mad, size_t *out_mad_size, 97 + u16 *out_mad_pkey_index); 99 98 100 99 int qedr_port_immutable(struct ib_device *ibdev, u8 port_num, 101 100 struct ib_port_immutable *immutable);
+6 -9
drivers/infiniband/hw/qib/qib_mad.c
··· 2386 2386 */ 2387 2387 int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, 2388 2388 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 2389 - const struct ib_mad_hdr *in, size_t in_mad_size, 2390 - struct ib_mad_hdr *out, size_t *out_mad_size, 2391 - u16 *out_mad_pkey_index) 2389 + const struct ib_mad *in, struct ib_mad *out, 2390 + size_t *out_mad_size, u16 *out_mad_pkey_index) 2392 2391 { 2393 2392 int ret; 2394 2393 struct qib_ibport *ibp = to_iport(ibdev, port); 2395 2394 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 2396 - const struct ib_mad *in_mad = (const struct ib_mad *)in; 2397 - struct ib_mad *out_mad = (struct ib_mad *)out; 2398 2395 2399 - switch (in_mad->mad_hdr.mgmt_class) { 2396 + switch (in->mad_hdr.mgmt_class) { 2400 2397 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 2401 2398 case IB_MGMT_CLASS_SUBN_LID_ROUTED: 2402 - ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad); 2399 + ret = process_subn(ibdev, mad_flags, port, in, out); 2403 2400 goto bail; 2404 2401 2405 2402 case IB_MGMT_CLASS_PERF_MGMT: 2406 - ret = process_perf(ibdev, port, in_mad, out_mad); 2403 + ret = process_perf(ibdev, port, in, out); 2407 2404 goto bail; 2408 2405 2409 2406 case IB_MGMT_CLASS_CONG_MGMT: ··· 2409 2412 ret = IB_MAD_RESULT_SUCCESS; 2410 2413 goto bail; 2411 2414 } 2412 - ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad); 2415 + ret = process_cc(ibdev, mad_flags, port, in, out); 2413 2416 goto bail; 2414 2417 2415 2418 default:
+2 -3
drivers/infiniband/hw/qib/qib_verbs.h
··· 245 245 void qib_node_desc_chg(struct qib_ibport *ibp); 246 246 int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 247 247 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 248 - const struct ib_mad_hdr *in, size_t in_mad_size, 249 - struct ib_mad_hdr *out, size_t *out_mad_size, 250 - u16 *out_mad_pkey_index); 248 + const struct ib_mad *in, struct ib_mad *out, 249 + size_t *out_mad_size, u16 *out_mad_pkey_index); 251 250 void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx); 252 251 void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx); 253 252
+3 -4
include/rdma/ib_verbs.h
··· 2123 2123 atomic_t usecnt; 2124 2124 }; 2125 2125 2126 - struct ib_mad_hdr; 2126 + struct ib_mad; 2127 2127 struct ib_grh; 2128 2128 2129 2129 enum ib_process_mad_flags { ··· 2301 2301 int (*process_mad)(struct ib_device *device, int process_mad_flags, 2302 2302 u8 port_num, const struct ib_wc *in_wc, 2303 2303 const struct ib_grh *in_grh, 2304 - const struct ib_mad_hdr *in_mad, size_t in_mad_size, 2305 - struct ib_mad_hdr *out_mad, size_t *out_mad_size, 2306 - u16 *out_mad_pkey_index); 2304 + const struct ib_mad *in_mad, struct ib_mad *out_mad, 2305 + size_t *out_mad_size, u16 *out_mad_pkey_index); 2307 2306 int (*query_device)(struct ib_device *device, 2308 2307 struct ib_device_attr *device_attr, 2309 2308 struct ib_udata *udata);