Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/mlx4: Use IBoE (RoCE) IP based GIDs in the port GID table

Currently, the mlx4 driver set IBoE (RoCE) gids to encode related
Ethernet netdevice interface MAC address and possibly VLAN id.

Change this scheme such that gids encode interface IP addresses (both
IP4 and IPv6).

This requires learning the IP addresses which are of use by a
netdevice associated with the HCA port, formatting them to gids and
adding them to the port gid table. Furthermore, events of add and
delete address are caught to maintain the gid table accordingly.

Associated IP addresses may belong to a master of an Ethernet
netdevice on top of that port so this should be considered when
building and maintaining the gid table.

Signed-off-by: Moni Shoua <monis@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>

authored by

Moni Shoua and committed by
Roland Dreier
d487ee77 7b85627b

+349 -158
+346 -158
drivers/infiniband/hw/mlx4/main.c
··· 39 39 #include <linux/inetdevice.h> 40 40 #include <linux/rtnetlink.h> 41 41 #include <linux/if_vlan.h> 42 + #include <net/ipv6.h> 43 + #include <net/addrconf.h> 42 44 43 45 #include <rdma/ib_smi.h> 44 46 #include <rdma/ib_user_verbs.h> ··· 789 787 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 790 788 union ib_gid *gid) 791 789 { 792 - u8 mac[6]; 793 790 struct net_device *ndev; 794 791 int ret = 0; 795 792 ··· 802 801 spin_unlock(&mdev->iboe.lock); 803 802 804 803 if (ndev) { 805 - rdma_get_mcast_mac((struct in6_addr *)gid, mac); 806 - rtnl_lock(); 807 - dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac); 808 804 ret = 1; 809 - rtnl_unlock(); 810 805 dev_put(ndev); 811 806 } 812 807 ··· 1022 1025 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1023 1026 u64 reg_id; 1024 1027 struct mlx4_ib_steering *ib_steering = NULL; 1028 + enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1029 + MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; 1025 1030 1026 1031 if (mdev->dev->caps.steering_mode == 1027 1032 MLX4_STEERING_MODE_DEVICE_MANAGED) { ··· 1035 1036 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, 1036 1037 !!(mqp->flags & 1037 1038 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 1038 - MLX4_PROT_IB_IPV6, &reg_id); 1039 + prot, &reg_id); 1039 1040 if (err) 1040 1041 goto err_malloc; 1041 1042 ··· 1054 1055 1055 1056 err_add: 1056 1057 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1057 - MLX4_PROT_IB_IPV6, reg_id); 1058 + prot, reg_id); 1058 1059 err_malloc: 1059 1060 kfree(ib_steering); 1060 1061 ··· 1082 1083 int err; 1083 1084 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1084 1085 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1085 - u8 mac[6]; 1086 1086 struct net_device *ndev; 1087 1087 struct mlx4_ib_gid_entry *ge; 1088 1088 u64 reg_id = 0; 1089 + enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1090 + MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; 1089 1091 1090 1092 if (mdev->dev->caps.steering_mode == 1091 1093 MLX4_STEERING_MODE_DEVICE_MANAGED) { ··· 1109 1109 } 1110 1110 1111 1111 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1112 - MLX4_PROT_IB_IPV6, reg_id); 1112 + prot, reg_id); 1113 1113 if (err) 1114 1114 return err; 1115 1115 ··· 1121 1121 if (ndev) 1122 1122 dev_hold(ndev); 1123 1123 spin_unlock(&mdev->iboe.lock); 1124 - rdma_get_mcast_mac((struct in6_addr *)gid, mac); 1125 - if (ndev) { 1126 - rtnl_lock(); 1127 - dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac); 1128 - rtnl_unlock(); 1124 + if (ndev) 1129 1125 dev_put(ndev); 1130 - } 1131 1126 list_del(&ge->list); 1132 1127 kfree(ge); 1133 1128 } else ··· 1218 1223 &dev_attr_board_id 1219 1224 }; 1220 1225 1221 - static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev) 1222 - { 1223 - memcpy(eui, dev->dev_addr, 3); 1224 - memcpy(eui + 5, dev->dev_addr + 3, 3); 1225 - if (vlan_id < 0x1000) { 1226 - eui[3] = vlan_id >> 8; 1227 - eui[4] = vlan_id & 0xff; 1228 - } else { 1229 - eui[3] = 0xff; 1230 - eui[4] = 0xfe; 1231 - } 1232 - eui[0] ^= 2; 1233 - } 1234 - 1235 1226 static void update_gids_task(struct work_struct *work) 1236 1227 { 1237 1228 struct update_gid_work *gw = container_of(work, struct update_gid_work, work); ··· 1240 1259 MLX4_CMD_WRAPPED); 1241 1260 if (err) 1242 1261 pr_warn("set port command failed\n"); 1243 - else { 1244 - memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); 1262 + else 1245 1263 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); 1246 - } 1247 1264 1248 1265 mlx4_free_cmd_mailbox(dev, mailbox); 1249 1266 kfree(gw); 1250 1267 } 1251 1268 1252 - static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear) 1269 + static void reset_gids_task(struct work_struct *work) 1253 1270 { 1254 - struct net_device *ndev = dev->iboe.netdevs[port - 1]; 1255 - struct update_gid_work *work; 1256 - struct net_device *tmp; 1271 + struct update_gid_work *gw = 1272 + container_of(work, struct update_gid_work, work); 1273 + struct mlx4_cmd_mailbox *mailbox; 1274 + union ib_gid *gids; 1275 + int err; 1257 1276 int i; 1258 - u8 *hits; 1259 - int ret; 1260 - union ib_gid gid; 1261 - int free; 1262 - int found; 1263 - int need_update = 0; 1264 - u16 vid; 1277 + struct mlx4_dev *dev = gw->dev->dev; 1265 1278 1266 - work = kzalloc(sizeof *work, GFP_ATOMIC); 1279 + mailbox = mlx4_alloc_cmd_mailbox(dev); 1280 + if (IS_ERR(mailbox)) { 1281 + pr_warn("reset gid table failed\n"); 1282 + goto free; 1283 + } 1284 + 1285 + gids = mailbox->buf; 1286 + memcpy(gids, gw->gids, sizeof(gw->gids)); 1287 + 1288 + for (i = 1; i < gw->dev->num_ports + 1; i++) { 1289 + if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) == 1290 + IB_LINK_LAYER_ETHERNET) { 1291 + err = mlx4_cmd(dev, mailbox->dma, 1292 + MLX4_SET_PORT_GID_TABLE << 8 | i, 1293 + 1, MLX4_CMD_SET_PORT, 1294 + MLX4_CMD_TIME_CLASS_B, 1295 + MLX4_CMD_WRAPPED); 1296 + if (err) 1297 + pr_warn(KERN_WARNING 1298 + "set port %d command failed\n", i); 1299 + } 1300 + } 1301 + 1302 + mlx4_free_cmd_mailbox(dev, mailbox); 1303 + free: 1304 + kfree(gw); 1305 + } 1306 + 1307 + static int update_gid_table(struct mlx4_ib_dev *dev, int port, 1308 + union ib_gid *gid, int clear) 1309 + { 1310 + struct update_gid_work *work; 1311 + int i; 1312 + int need_update = 0; 1313 + int free = -1; 1314 + int found = -1; 1315 + int max_gids; 1316 + 1317 + max_gids = dev->dev->caps.gid_table_len[port]; 1318 + for (i = 0; i < max_gids; ++i) { 1319 + if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, 1320 + sizeof(*gid))) 1321 + found = i; 1322 + 1323 + if (clear) { 1324 + if (found >= 0) { 1325 + need_update = 1; 1326 + dev->iboe.gid_table[port - 1][found] = zgid; 1327 + break; 1328 + } 1329 + } else { 1330 + if (found >= 0) 1331 + break; 1332 + 1333 + if (free < 0 && 1334 + !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, 1335 + sizeof(*gid))) 1336 + free = i; 1337 + } 1338 + } 1339 + 1340 + if (found == -1 && !clear && free >= 0) { 1341 + dev->iboe.gid_table[port - 1][free] = *gid; 1342 + need_update = 1; 1343 + } 1344 + 1345 + if (!need_update) 1346 + return 0; 1347 + 1348 + work = kzalloc(sizeof(*work), GFP_ATOMIC); 1267 1349 if (!work) 1268 1350 return -ENOMEM; 1269 1351 1270 - hits = kzalloc(128, GFP_ATOMIC); 1271 - if (!hits) { 1272 - ret = -ENOMEM; 1273 - goto out; 1274 - } 1352 + memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids)); 1353 + INIT_WORK(&work->work, update_gids_task); 1354 + work->port = port; 1355 + work->dev = dev; 1356 + queue_work(wq, &work->work); 1275 1357 1276 - rcu_read_lock(); 1277 - for_each_netdev_rcu(&init_net, tmp) { 1278 - if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) { 1279 - gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 1280 - vid = rdma_vlan_dev_vlan_id(tmp); 1281 - mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev); 1282 - found = 0; 1283 - free = -1; 1284 - for (i = 0; i < 128; ++i) { 1285 - if (free < 0 && 1286 - !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid)) 1287 - free = i; 1288 - if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) { 1289 - hits[i] = 1; 1290 - found = 1; 1291 - break; 1292 - } 1293 - } 1358 + return 0; 1359 + } 1294 1360 1295 - if (!found) { 1296 - if (tmp == ndev && 1297 - (memcmp(&dev->iboe.gid_table[port - 1][0], 1298 - &gid, sizeof gid) || 1299 - !memcmp(&dev->iboe.gid_table[port - 1][0], 1300 - &zgid, sizeof gid))) { 1301 - dev->iboe.gid_table[port - 1][0] = gid; 1302 - ++need_update; 1303 - hits[0] = 1; 1304 - } else if (free >= 0) { 1305 - dev->iboe.gid_table[port - 1][free] = gid; 1306 - hits[free] = 1; 1307 - ++need_update; 1308 - } 1309 - } 1310 - } 1311 - } 1312 - rcu_read_unlock(); 1361 + static int reset_gid_table(struct mlx4_ib_dev *dev) 1362 + { 1363 + struct update_gid_work *work; 1313 1364 1314 - for (i = 0; i < 128; ++i) 1315 - if (!hits[i]) { 1316 - if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid)) 1317 - ++need_update; 1318 - dev->iboe.gid_table[port - 1][i] = zgid; 1319 - } 1320 1365 1321 - if (need_update) { 1322 - memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids); 1323 - INIT_WORK(&work->work, update_gids_task); 1324 - work->port = port; 1325 - work->dev = dev; 1326 - queue_work(wq, &work->work); 1327 - } else 1328 - kfree(work); 1366 + work = kzalloc(sizeof(*work), GFP_ATOMIC); 1367 + if (!work) 1368 + return -ENOMEM; 1369 + memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table)); 1370 + memset(work->gids, 0, sizeof(work->gids)); 1371 + INIT_WORK(&work->work, reset_gids_task); 1372 + work->dev = dev; 1373 + queue_work(wq, &work->work); 1374 + return 0; 1375 + } 1329 1376 1330 - kfree(hits); 1377 + static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, 1378 + struct mlx4_ib_dev *ibdev, union ib_gid *gid) 1379 + { 1380 + struct mlx4_ib_iboe *iboe; 1381 + int port = 0; 1382 + struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? 1383 + rdma_vlan_dev_real_dev(event_netdev) : 1384 + event_netdev; 1385 + 1386 + if (event != NETDEV_DOWN && event != NETDEV_UP) 1387 + return 0; 1388 + 1389 + if ((real_dev != event_netdev) && 1390 + (event == NETDEV_DOWN) && 1391 + rdma_link_local_addr((struct in6_addr *)gid)) 1392 + return 0; 1393 + 1394 + iboe = &ibdev->iboe; 1395 + spin_lock(&iboe->lock); 1396 + 1397 + for (port = 1; port <= MLX4_MAX_PORTS; ++port) 1398 + if ((netif_is_bond_master(real_dev) && 1399 + (real_dev == iboe->masters[port - 1])) || 1400 + (!netif_is_bond_master(real_dev) && 1401 + (real_dev == iboe->netdevs[port - 1]))) 1402 + update_gid_table(ibdev, port, gid, 1403 + event == NETDEV_DOWN); 1404 + 1405 + spin_unlock(&iboe->lock); 1331 1406 return 0; 1332 1407 1333 - out: 1334 - kfree(work); 1335 - return ret; 1336 1408 } 1337 1409 1338 - static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event) 1410 + static u8 mlx4_ib_get_dev_port(struct net_device *dev, 1411 + struct mlx4_ib_dev *ibdev) 1339 1412 { 1340 - switch (event) { 1341 - case NETDEV_UP: 1342 - case NETDEV_CHANGEADDR: 1343 - update_ipv6_gids(dev, port, 0); 1344 - break; 1413 + u8 port = 0; 1414 + struct mlx4_ib_iboe *iboe; 1415 + struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ? 1416 + rdma_vlan_dev_real_dev(dev) : dev; 1345 1417 1346 - case NETDEV_DOWN: 1347 - update_ipv6_gids(dev, port, 1); 1348 - dev->iboe.netdevs[port - 1] = NULL; 1349 - } 1418 + iboe = &ibdev->iboe; 1419 + spin_lock(&iboe->lock); 1420 + 1421 + for (port = 1; port <= MLX4_MAX_PORTS; ++port) 1422 + if ((netif_is_bond_master(real_dev) && 1423 + (real_dev == iboe->masters[port - 1])) || 1424 + (!netif_is_bond_master(real_dev) && 1425 + (real_dev == iboe->netdevs[port - 1]))) 1426 + break; 1427 + 1428 + spin_unlock(&iboe->lock); 1429 + 1430 + if ((port == 0) || (port > MLX4_MAX_PORTS)) 1431 + return 0; 1432 + else 1433 + return port; 1350 1434 } 1351 1435 1352 - static void netdev_added(struct mlx4_ib_dev *dev, int port) 1353 - { 1354 - update_ipv6_gids(dev, port, 0); 1355 - } 1356 - 1357 - static void netdev_removed(struct mlx4_ib_dev *dev, int port) 1358 - { 1359 - update_ipv6_gids(dev, port, 1); 1360 - } 1361 - 1362 - static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event, 1436 + static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event, 1363 1437 void *ptr) 1438 + { 1439 + struct mlx4_ib_dev *ibdev; 1440 + struct in_ifaddr *ifa = ptr; 1441 + union ib_gid gid; 1442 + struct net_device *event_netdev = ifa->ifa_dev->dev; 1443 + 1444 + ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid); 1445 + 1446 + ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet); 1447 + 1448 + mlx4_ib_addr_event(event, event_netdev, ibdev, &gid); 1449 + return NOTIFY_DONE; 1450 + } 1451 + 1452 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1453 + static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event, 1454 + void *ptr) 1455 + { 1456 + struct mlx4_ib_dev *ibdev; 1457 + struct inet6_ifaddr *ifa = ptr; 1458 + union ib_gid *gid = (union ib_gid *)&ifa->addr; 1459 + struct net_device *event_netdev = ifa->idev->dev; 1460 + 1461 + ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6); 1462 + 1463 + mlx4_ib_addr_event(event, event_netdev, ibdev, gid); 1464 + return NOTIFY_DONE; 1465 + } 1466 + #endif 1467 + 1468 + static void mlx4_ib_get_dev_addr(struct net_device *dev, 1469 + struct mlx4_ib_dev *ibdev, u8 port) 1470 + { 1471 + struct in_device *in_dev; 1472 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1473 + struct inet6_dev *in6_dev; 1474 + union ib_gid *pgid; 1475 + struct inet6_ifaddr *ifp; 1476 + #endif 1477 + union ib_gid gid; 1478 + 1479 + 1480 + if ((port == 0) || (port > MLX4_MAX_PORTS)) 1481 + return; 1482 + 1483 + /* IPv4 gids */ 1484 + in_dev = in_dev_get(dev); 1485 + if (in_dev) { 1486 + for_ifa(in_dev) { 1487 + /*ifa->ifa_address;*/ 1488 + ipv6_addr_set_v4mapped(ifa->ifa_address, 1489 + (struct in6_addr *)&gid); 1490 + update_gid_table(ibdev, port, &gid, 0); 1491 + } 1492 + endfor_ifa(in_dev); 1493 + in_dev_put(in_dev); 1494 + } 1495 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1496 + /* IPv6 gids */ 1497 + in6_dev = in6_dev_get(dev); 1498 + if (in6_dev) { 1499 + read_lock_bh(&in6_dev->lock); 1500 + list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { 1501 + pgid = (union ib_gid *)&ifp->addr; 1502 + update_gid_table(ibdev, port, pgid, 0); 1503 + } 1504 + read_unlock_bh(&in6_dev->lock); 1505 + in6_dev_put(in6_dev); 1506 + } 1507 + #endif 1508 + } 1509 + 1510 + static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) 1511 + { 1512 + struct net_device *dev; 1513 + 1514 + if (reset_gid_table(ibdev)) 1515 + return -1; 1516 + 1517 + read_lock(&dev_base_lock); 1518 + 1519 + for_each_netdev(&init_net, dev) { 1520 + u8 port = mlx4_ib_get_dev_port(dev, ibdev); 1521 + if (port) 1522 + mlx4_ib_get_dev_addr(dev, ibdev, port); 1523 + } 1524 + 1525 + read_unlock(&dev_base_lock); 1526 + 1527 + return 0; 1528 + } 1529 + 1530 + static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) 1531 + { 1532 + struct mlx4_ib_iboe *iboe; 1533 + int port; 1534 + 1535 + iboe = &ibdev->iboe; 1536 + 1537 + spin_lock(&iboe->lock); 1538 + mlx4_foreach_ib_transport_port(port, ibdev->dev) { 1539 + struct net_device *old_master = iboe->masters[port - 1]; 1540 + struct net_device *curr_master; 1541 + iboe->netdevs[port - 1] = 1542 + mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); 1543 + 1544 + if (iboe->netdevs[port - 1] && 1545 + netif_is_bond_slave(iboe->netdevs[port - 1])) { 1546 + rtnl_lock(); 1547 + iboe->masters[port - 1] = netdev_master_upper_dev_get( 1548 + iboe->netdevs[port - 1]); 1549 + rtnl_unlock(); 1550 + } 1551 + curr_master = iboe->masters[port - 1]; 1552 + 1553 + /* if bonding is used it is possible that we add it to masters 1554 + only after IP address is assigned to the net bonding 1555 + interface */ 1556 + if (curr_master && (old_master != curr_master)) 1557 + mlx4_ib_get_dev_addr(curr_master, ibdev, port); 1558 + } 1559 + 1560 + spin_unlock(&iboe->lock); 1561 + } 1562 + 1563 + static int mlx4_ib_netdev_event(struct notifier_block *this, 1564 + unsigned long event, void *ptr) 1364 1565 { 1365 1566 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1366 1567 struct mlx4_ib_dev *ibdev; 1367 - struct net_device *oldnd; 1368 - struct mlx4_ib_iboe *iboe; 1369 - int port; 1370 1568 1371 1569 if (!net_eq(dev_net(dev), &init_net)) 1372 1570 return NOTIFY_DONE; 1373 1571 1374 1572 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); 1375 - iboe = &ibdev->iboe; 1376 - 1377 - spin_lock(&iboe->lock); 1378 - mlx4_foreach_ib_transport_port(port, ibdev->dev) { 1379 - oldnd = iboe->netdevs[port - 1]; 1380 - iboe->netdevs[port - 1] = 1381 - mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); 1382 - if (oldnd != iboe->netdevs[port - 1]) { 1383 - if (iboe->netdevs[port - 1]) 1384 - netdev_added(ibdev, port); 1385 - else 1386 - netdev_removed(ibdev, port); 1387 - } 1388 - } 1389 - 1390 - if (dev == iboe->netdevs[0] || 1391 - (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0])) 1392 - handle_en_event(ibdev, 1, event); 1393 - else if (dev == iboe->netdevs[1] 1394 - || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1])) 1395 - handle_en_event(ibdev, 2, event); 1396 - 1397 - spin_unlock(&iboe->lock); 1573 + mlx4_ib_scan_netdevs(ibdev); 1398 1574 1399 1575 return NOTIFY_DONE; 1400 1576 } ··· 1857 1719 if (mlx4_ib_init_sriov(ibdev)) 1858 1720 goto err_mad; 1859 1721 1860 - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) { 1861 - iboe->nb.notifier_call = mlx4_ib_netdev_event; 1862 - err = register_netdevice_notifier(&iboe->nb); 1863 - if (err) 1864 - goto err_sriov; 1722 + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) { 1723 + if (!iboe->nb.notifier_call) { 1724 + iboe->nb.notifier_call = mlx4_ib_netdev_event; 1725 + err = register_netdevice_notifier(&iboe->nb); 1726 + if (err) { 1727 + iboe->nb.notifier_call = NULL; 1728 + goto err_notif; 1729 + } 1730 + } 1731 + if (!iboe->nb_inet.notifier_call) { 1732 + iboe->nb_inet.notifier_call = mlx4_ib_inet_event; 1733 + err = register_inetaddr_notifier(&iboe->nb_inet); 1734 + if (err) { 1735 + iboe->nb_inet.notifier_call = NULL; 1736 + goto err_notif; 1737 + } 1738 + } 1739 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1740 + if (!iboe->nb_inet6.notifier_call) { 1741 + iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event; 1742 + err = register_inet6addr_notifier(&iboe->nb_inet6); 1743 + if (err) { 1744 + iboe->nb_inet6.notifier_call = NULL; 1745 + goto err_notif; 1746 + } 1747 + } 1748 + #endif 1749 + mlx4_ib_scan_netdevs(ibdev); 1750 + mlx4_ib_init_gid_table(ibdev); 1865 1751 } 1866 1752 1867 1753 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { ··· 1911 1749 return ibdev; 1912 1750 1913 1751 err_notif: 1914 - if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 1915 - pr_warn("failure unregistering notifier\n"); 1752 + if (ibdev->iboe.nb.notifier_call) { 1753 + if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 1754 + pr_warn("failure unregistering notifier\n"); 1755 + ibdev->iboe.nb.notifier_call = NULL; 1756 + } 1757 + if (ibdev->iboe.nb_inet.notifier_call) { 1758 + if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet)) 1759 + pr_warn("failure unregistering notifier\n"); 1760 + ibdev->iboe.nb_inet.notifier_call = NULL; 1761 + } 1762 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1763 + if (ibdev->iboe.nb_inet6.notifier_call) { 1764 + if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6)) 1765 + pr_warn("failure unregistering notifier\n"); 1766 + ibdev->iboe.nb_inet6.notifier_call = NULL; 1767 + } 1768 + #endif 1916 1769 flush_workqueue(wq); 1917 1770 1918 - err_sriov: 1919 1771 mlx4_ib_close_sriov(ibdev); 1920 1772 1921 1773 err_mad: ··· 1971 1795 pr_warn("failure unregistering notifier\n"); 1972 1796 ibdev->iboe.nb.notifier_call = NULL; 1973 1797 } 1798 + if (ibdev->iboe.nb_inet.notifier_call) { 1799 + if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet)) 1800 + pr_warn("failure unregistering notifier\n"); 1801 + ibdev->iboe.nb_inet.notifier_call = NULL; 1802 + } 1803 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1804 + if (ibdev->iboe.nb_inet6.notifier_call) { 1805 + if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6)) 1806 + pr_warn("failure unregistering notifier\n"); 1807 + ibdev->iboe.nb_inet6.notifier_call = NULL; 1808 + } 1809 + #endif 1974 1810 iounmap(ibdev->uar_map); 1975 1811 for (p = 0; p < ibdev->num_ports; ++p) 1976 1812 if (ibdev->counters[p] != -1)
+3
drivers/infiniband/hw/mlx4/mlx4_ib.h
··· 428 428 struct mlx4_ib_iboe { 429 429 spinlock_t lock; 430 430 struct net_device *netdevs[MLX4_MAX_PORTS]; 431 + struct net_device *masters[MLX4_MAX_PORTS]; 431 432 struct notifier_block nb; 433 + struct notifier_block nb_inet; 434 + struct notifier_block nb_inet6; 432 435 union ib_gid gid_table[MLX4_MAX_PORTS][128]; 433 436 }; 434 437