Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mlx4: generalization of multicast steering.

The same packet steering mechanism would be used both for IB and Ethernet,
Both multicasts and unicasts.
This commit prepares the general infrastructure for this.

Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Yevgeny Petrilin and committed by
David S. Miller
0345584e 725c8999

+102 -43
+5 -5
drivers/infiniband/hw/mlx4/main.c
··· 625 625 626 626 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 627 627 !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 628 - MLX4_PROTOCOL_IB); 628 + MLX4_PROT_IB_IPV6); 629 629 if (err) 630 630 return err; 631 631 ··· 636 636 return 0; 637 637 638 638 err_add: 639 - mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); 639 + mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); 640 640 return err; 641 641 } 642 642 ··· 666 666 struct mlx4_ib_gid_entry *ge; 667 667 668 668 err = mlx4_multicast_detach(mdev->dev, 669 - &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); 669 + &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); 670 670 if (err) 671 671 return err; 672 672 ··· 953 953 mlx4_foreach_ib_transport_port(port, ibdev->dev) { 954 954 oldnd = iboe->netdevs[port - 1]; 955 955 iboe->netdevs[port - 1] = 956 - mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port); 956 + mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); 957 957 if (oldnd != iboe->netdevs[port - 1]) { 958 958 if (iboe->netdevs[port - 1]) 959 959 netdev_added(ibdev, port); ··· 1206 1206 .add = mlx4_ib_add, 1207 1207 .remove = mlx4_ib_remove, 1208 1208 .event = mlx4_ib_event, 1209 - .protocol = MLX4_PROTOCOL_IB 1209 + .protocol = MLX4_PROT_IB_IPV6 1210 1210 }; 1211 1211 1212 1212 static int __init mlx4_ib_init(void)
+1 -1
drivers/net/mlx4/en_main.c
··· 296 296 .remove = mlx4_en_remove, 297 297 .event = mlx4_en_event, 298 298 .get_dev = mlx4_en_get_netdev, 299 - .protocol = MLX4_PROTOCOL_EN, 299 + .protocol = MLX4_PROT_ETH, 300 300 }; 301 301 302 302 static int __init mlx4_en_init(void)
+2
drivers/net/mlx4/fw.c
··· 274 274 dev_cap->stat_rate_support = stat_rate; 275 275 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET); 276 276 dev_cap->udp_rss = field & 0x1; 277 + dev_cap->vep_uc_steering = field & 0x2; 278 + dev_cap->vep_mc_steering = field & 0x4; 277 279 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET); 278 280 dev_cap->loopback_support = field & 0x1; 279 281 dev_cap->wol = field & 0x40;
+2
drivers/net/mlx4/fw.h
··· 80 80 u16 stat_rate_support; 81 81 int udp_rss; 82 82 int loopback_support; 83 + int vep_uc_steering; 84 + int vep_mc_steering; 83 85 int wol; 84 86 u32 flags; 85 87 int reserved_uars;
+2
drivers/net/mlx4/main.c
··· 227 227 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 228 228 dev->caps.udp_rss = dev_cap->udp_rss; 229 229 dev->caps.loopback_support = dev_cap->loopback_support; 230 + dev->caps.vep_uc_steering = dev_cap->vep_uc_steering; 231 + dev->caps.vep_mc_steering = dev_cap->vep_mc_steering; 230 232 dev->caps.wol = dev_cap->wol; 231 233 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 232 234
+78 -35
drivers/net/mlx4/mcg.c
··· 32 32 */ 33 33 34 34 #include <linux/string.h> 35 + #include <linux/etherdevice.h> 35 36 36 37 #include <linux/mlx4/cmd.h> 37 38 ··· 51 50 52 51 static const u8 zero_gid[16]; /* automatically initialized to 0 */ 53 52 54 - static int mlx4_READ_MCG(struct mlx4_dev *dev, int index, 55 - struct mlx4_cmd_mailbox *mailbox) 53 + static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 54 + struct mlx4_cmd_mailbox *mailbox) 56 55 { 57 56 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 58 57 MLX4_CMD_TIME_CLASS_A); 59 58 } 60 59 61 - static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index, 62 - struct mlx4_cmd_mailbox *mailbox) 60 + static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, 61 + struct mlx4_cmd_mailbox *mailbox) 63 62 { 64 63 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 65 64 MLX4_CMD_TIME_CLASS_A); 66 65 } 67 66 68 - static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 69 - u16 *hash) 67 + static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 68 + u16 *hash, u8 op_mod) 70 69 { 71 70 u64 imm; 72 71 int err; 73 72 74 - err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH, 75 - MLX4_CMD_TIME_CLASS_A); 73 + err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, 74 + MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A); 76 75 77 76 if (!err) 78 77 *hash = imm; ··· 95 94 * If no AMGM exists for given gid, *index = -1, *prev = index of last 96 95 * entry in hash chain and *mgm holds end of hash chain. 97 96 */ 98 - static int find_mgm(struct mlx4_dev *dev, 99 - u8 *gid, enum mlx4_protocol protocol, 100 - struct mlx4_cmd_mailbox *mgm_mailbox, 101 - u16 *hash, int *prev, int *index) 97 + static int find_entry(struct mlx4_dev *dev, u8 port, 98 + u8 *gid, enum mlx4_protocol prot, 99 + enum mlx4_steer_type steer, 100 + struct mlx4_cmd_mailbox *mgm_mailbox, 101 + u16 *hash, int *prev, int *index) 102 102 { 103 103 struct mlx4_cmd_mailbox *mailbox; 104 104 struct mlx4_mgm *mgm = mgm_mailbox->buf; 105 105 u8 *mgid; 106 106 int err; 107 + u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0; 107 108 108 109 mailbox = mlx4_alloc_cmd_mailbox(dev); 109 110 if (IS_ERR(mailbox)) ··· 114 111 115 112 memcpy(mgid, gid, 16); 116 113 117 - err = mlx4_MGID_HASH(dev, mailbox, hash); 114 + err = mlx4_GID_HASH(dev, mailbox, hash, op_mod); 118 115 mlx4_free_cmd_mailbox(dev, mailbox); 119 116 if (err) 120 117 return err; ··· 126 123 *prev = -1; 127 124 128 125 do { 129 - err = mlx4_READ_MCG(dev, *index, mgm_mailbox); 126 + err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); 130 127 if (err) 131 128 return err; 132 129 133 - if (!memcmp(mgm->gid, zero_gid, 16)) { 130 + if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 134 131 if (*index != *hash) { 135 132 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 136 133 err = -EINVAL; ··· 139 136 } 140 137 141 138 if (!memcmp(mgm->gid, gid, 16) && 142 - be32_to_cpu(mgm->members_count) >> 30 == protocol) 139 + be32_to_cpu(mgm->members_count) >> 30 == prot) 143 140 return err; 144 141 145 142 *prev = *index; ··· 150 147 return err; 151 148 } 152 149 153 - int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 154 - int block_mcast_loopback, enum mlx4_protocol protocol) 150 + int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 151 + int block_mcast_loopback, enum mlx4_protocol prot, 152 + enum mlx4_steer_type steer) 155 153 { 156 154 struct mlx4_priv *priv = mlx4_priv(dev); 157 155 struct mlx4_cmd_mailbox *mailbox; ··· 163 159 int link = 0; 164 160 int i; 165 161 int err; 162 + u8 port = gid[5]; 166 163 167 164 mailbox = mlx4_alloc_cmd_mailbox(dev); 168 165 if (IS_ERR(mailbox)) ··· 171 166 mgm = mailbox->buf; 172 167 173 168 mutex_lock(&priv->mcg_table.mutex); 174 - 175 - err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 169 + err = find_entry(dev, port, gid, prot, steer, 170 + mailbox, &hash, &prev, &index); 176 171 if (err) 177 172 goto out; 178 173 179 174 if (index != -1) { 180 - if (!memcmp(mgm->gid, zero_gid, 16)) 175 + if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) 181 176 memcpy(mgm->gid, gid, 16); 182 177 } else { 183 178 link = 1; ··· 214 209 else 215 210 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 216 211 217 - mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30); 212 + mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); 218 213 219 - err = mlx4_WRITE_MCG(dev, index, mailbox); 214 + err = mlx4_WRITE_ENTRY(dev, index, mailbox); 220 215 if (err) 221 216 goto out; 222 217 223 218 if (!link) 224 219 goto out; 225 220 226 - err = mlx4_READ_MCG(dev, prev, mailbox); 221 + err = mlx4_READ_ENTRY(dev, prev, mailbox); 227 222 if (err) 228 223 goto out; 229 224 230 225 mgm->next_gid_index = cpu_to_be32(index << 6); 231 226 232 - err = mlx4_WRITE_MCG(dev, prev, mailbox); 227 + err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 233 228 if (err) 234 229 goto out; 235 230 ··· 247 242 mlx4_free_cmd_mailbox(dev, mailbox); 248 243 return err; 249 244 } 250 - EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 251 245 252 - int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 253 - enum mlx4_protocol protocol) 246 + int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 247 + enum mlx4_protocol prot, enum mlx4_steer_type steer) 254 248 { 255 249 struct mlx4_priv *priv = mlx4_priv(dev); 256 250 struct mlx4_cmd_mailbox *mailbox; ··· 259 255 int prev, index; 260 256 int i, loc; 261 257 int err; 258 + u8 port = gid[5]; 262 259 263 260 mailbox = mlx4_alloc_cmd_mailbox(dev); 264 261 if (IS_ERR(mailbox)) ··· 268 263 269 264 mutex_lock(&priv->mcg_table.mutex); 270 265 271 - err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 266 + err = find_entry(dev, port, gid, prot, steer, 267 + mailbox, &hash, &prev, &index); 272 268 if (err) 273 269 goto out; 274 270 ··· 291 285 } 292 286 293 287 294 - mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30); 288 + mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); 295 289 mgm->qp[loc] = mgm->qp[i - 1]; 296 290 mgm->qp[i - 1] = 0; 297 291 298 292 if (i != 1) { 299 - err = mlx4_WRITE_MCG(dev, index, mailbox); 293 + err = mlx4_WRITE_ENTRY(dev, index, mailbox); 300 294 goto out; 301 295 } 302 296 ··· 304 298 /* Remove entry from MGM */ 305 299 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 306 300 if (amgm_index) { 307 - err = mlx4_READ_MCG(dev, amgm_index, mailbox); 301 + err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); 308 302 if (err) 309 303 goto out; 310 304 } else 311 305 memset(mgm->gid, 0, 16); 312 306 313 - err = mlx4_WRITE_MCG(dev, index, mailbox); 307 + err = mlx4_WRITE_ENTRY(dev, index, mailbox); 314 308 if (err) 315 309 goto out; 316 310 ··· 325 319 } else { 326 320 /* Remove entry from AMGM */ 327 321 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 328 - err = mlx4_READ_MCG(dev, prev, mailbox); 322 + err = mlx4_READ_ENTRY(dev, prev, mailbox); 329 323 if (err) 330 324 goto out; 331 325 332 326 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 333 327 334 - err = mlx4_WRITE_MCG(dev, prev, mailbox); 328 + err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 335 329 if (err) 336 330 goto out; 337 331 ··· 348 342 349 343 mlx4_free_cmd_mailbox(dev, mailbox); 350 344 return err; 345 + } 346 + 347 + 348 + int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 349 + int block_mcast_loopback, enum mlx4_protocol prot) 350 + { 351 + enum mlx4_steer_type steer; 352 + 353 + steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; 354 + 355 + if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering) 356 + return 0; 357 + 358 + if (prot == MLX4_PROT_ETH) 359 + gid[7] |= (steer << 1); 360 + 361 + return mlx4_qp_attach_common(dev, qp, gid, 362 + block_mcast_loopback, prot, 363 + steer); 364 + } 365 + EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 366 + 367 + int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 368 + enum mlx4_protocol prot) 369 + { 370 + enum mlx4_steer_type steer; 371 + 372 + steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; 373 + 374 + if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering) 375 + return 0; 376 + 377 + if (prot == MLX4_PROT_ETH) { 378 + gid[7] |= (steer << 1); 379 + } 380 + 381 + return mlx4_qp_detach_common(dev, qp, gid, prot, steer); 351 382 } 352 383 EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 353 384
+12 -2
include/linux/mlx4/device.h
··· 150 150 }; 151 151 152 152 enum mlx4_protocol { 153 - MLX4_PROTOCOL_IB, 154 - MLX4_PROTOCOL_EN, 153 + MLX4_PROT_IB_IPV6 = 0, 154 + MLX4_PROT_ETH, 155 + MLX4_PROT_IB_IPV4, 156 + MLX4_PROT_FCOE 155 157 }; 156 158 157 159 enum { ··· 178 176 MLX4_NO_VLAN_IDX = 0, 179 177 MLX4_VLAN_MISS_IDX, 180 178 MLX4_VLAN_REGULAR 179 + }; 180 + 181 + enum mlx4_steer_type { 182 + MLX4_MC_STEER = 0, 183 + MLX4_UC_STEER, 184 + MLX4_NUM_STEERS 181 185 }; 182 186 183 187 enum { ··· 259 251 u16 stat_rate_support; 260 252 int udp_rss; 261 253 int loopback_support; 254 + int vep_uc_steering; 255 + int vep_mc_steering; 262 256 int wol; 263 257 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 264 258 int max_gso_sz;