RDMA/cma: Remove padding arrays by using struct sockaddr_storage

There are a few places where the RDMA CM code handles IPv6 by doing

struct sockaddr addr;
u8 pad[sizeof(struct sockaddr_in6) -
sizeof(struct sockaddr)];

This is fragile and ugly; handle this in a better way with just

struct sockaddr_storage addr;

[ Also roll in patch from Aleksey Senin <alekseys@voltaire.com> to
switch to struct sockaddr_storage and get rid of padding arrays in
struct rdma_addr. ]

Signed-off-by: Roland Dreier <rolandd@cisco.com>

+26 -33
+18 -19
drivers/infiniband/core/cma.c
··· 155 } multicast; 156 struct list_head list; 157 void *context; 158 - struct sockaddr addr; 159 - u8 pad[sizeof(struct sockaddr_in6) - 160 - sizeof(struct sockaddr)]; 161 }; 162 163 struct cma_work { ··· 784 cma_cancel_route(id_priv); 785 break; 786 case CMA_LISTEN: 787 - if (cma_any_addr(&id_priv->id.route.addr.src_addr) && 788 - !id_priv->cma_dev) 789 cma_cancel_listens(id_priv); 790 break; 791 default: ··· 1024 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1025 1026 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1027 - ret = rdma_translate_ip(&id->route.addr.src_addr, 1028 &id->route.addr.dev_addr); 1029 if (ret) 1030 goto destroy_id; ··· 1062 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1063 ip_ver, port, src, dst); 1064 1065 - ret = rdma_translate_ip(&id->route.addr.src_addr, 1066 &id->route.addr.dev_addr); 1067 if (ret) 1068 goto err; ··· 1375 if (IS_ERR(id_priv->cm_id.ib)) 1376 return PTR_ERR(id_priv->cm_id.ib); 1377 1378 - addr = &id_priv->id.route.addr.src_addr; 1379 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1380 if (cma_any_addr(addr)) 1381 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); ··· 1441 1442 dev_id_priv->state = CMA_ADDR_BOUND; 1443 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1444 - ip_addr_size(&id_priv->id.route.addr.src_addr)); 1445 1446 cma_attach_to_dev(dev_id_priv, cma_dev); 1447 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); ··· 1561 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1562 path_rec.numb_path = 1; 1563 path_rec.reversible = 1; 1564 - path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr); 1565 1566 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1567 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1568 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 1569 1570 - if (addr->src_addr.sa_family == AF_INET) { 1571 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 1572 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 1573 } else { ··· 1847 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1848 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 1849 1850 - if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { 1851 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; 1852 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; 1853 src_in->sin_family = dst_in->sin_family; ··· 1896 if (cma_any_addr(dst_addr)) 1897 ret = cma_resolve_loopback(id_priv); 1898 else 1899 - ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr, 1900 dst_addr, &id->route.addr.dev_addr, 1901 timeout_ms, addr_handler, id_priv); 1902 if (ret) ··· 2020 * We don't support binding to any address if anyone is bound to 2021 * a specific address on the same port. 2022 */ 2023 - if (cma_any_addr(&id_priv->id.route.addr.src_addr)) 2024 return -EADDRNOTAVAIL; 2025 2026 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2027 - if (cma_any_addr(&cur_id->id.route.addr.src_addr)) 2028 return -EADDRNOTAVAIL; 2029 2030 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; ··· 2059 } 2060 2061 mutex_lock(&lock); 2062 - if (cma_any_port(&id_priv->id.route.addr.src_addr)) 2063 ret = cma_alloc_any_port(ps, id_priv); 2064 else 2065 ret = cma_use_port(ps, id_priv); ··· 2231 2232 req.path = route->path_rec; 2233 req.service_id = cma_get_service_id(id_priv->id.ps, 2234 - &route->addr.dst_addr); 2235 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 2236 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2237 ··· 2282 req.alternate_path = &route->path_rec[1]; 2283 2284 req.service_id = cma_get_service_id(id_priv->id.ps, 2285 - &route->addr.dst_addr); 2286 req.qp_num = id_priv->qp_num; 2287 req.qp_type = IB_QPT_RC; 2288 req.starting_psn = id_priv->seq_num; ··· 2666 if (ret) 2667 return ret; 2668 2669 - cma_set_mgid(id_priv, &mc->addr, &rec.mgid); 2670 if (id_priv->id.ps == RDMA_PS_UDP) 2671 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 2672 ib_addr_get_sgid(dev_addr, &rec.port_gid);
··· 155 } multicast; 156 struct list_head list; 157 void *context; 158 + struct sockaddr_storage addr; 159 }; 160 161 struct cma_work { ··· 786 cma_cancel_route(id_priv); 787 break; 788 case CMA_LISTEN: 789 + if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) 790 + && !id_priv->cma_dev) 791 cma_cancel_listens(id_priv); 792 break; 793 default: ··· 1026 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1027 1028 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1029 + ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1030 &id->route.addr.dev_addr); 1031 if (ret) 1032 goto destroy_id; ··· 1064 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1065 ip_ver, port, src, dst); 1066 1067 + ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1068 &id->route.addr.dev_addr); 1069 if (ret) 1070 goto err; ··· 1377 if (IS_ERR(id_priv->cm_id.ib)) 1378 return PTR_ERR(id_priv->cm_id.ib); 1379 1380 + addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 1381 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1382 if (cma_any_addr(addr)) 1383 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); ··· 1443 1444 dev_id_priv->state = CMA_ADDR_BOUND; 1445 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1446 + ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 1447 1448 cma_attach_to_dev(dev_id_priv, cma_dev); 1449 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); ··· 1563 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1564 path_rec.numb_path = 1; 1565 path_rec.reversible = 1; 1566 + path_rec.service_id = cma_get_service_id(id_priv->id.ps, 1567 + (struct sockaddr *) &addr->dst_addr); 1568 1569 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1570 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1571 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 1572 1573 + if (addr->src_addr.ss_family == AF_INET) { 1574 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 1575 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 1576 } else { ··· 1848 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1849 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 1850 1851 + if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) { 1852 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; 1853 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; 1854 src_in->sin_family = dst_in->sin_family; ··· 1897 if (cma_any_addr(dst_addr)) 1898 ret = cma_resolve_loopback(id_priv); 1899 else 1900 + ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr, 1901 dst_addr, &id->route.addr.dev_addr, 1902 timeout_ms, addr_handler, id_priv); 1903 if (ret) ··· 2021 * We don't support binding to any address if anyone is bound to 2022 * a specific address on the same port. 2023 */ 2024 + if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2025 return -EADDRNOTAVAIL; 2026 2027 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2028 + if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) 2029 return -EADDRNOTAVAIL; 2030 2031 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; ··· 2060 } 2061 2062 mutex_lock(&lock); 2063 + if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2064 ret = cma_alloc_any_port(ps, id_priv); 2065 else 2066 ret = cma_use_port(ps, id_priv); ··· 2232 2233 req.path = route->path_rec; 2234 req.service_id = cma_get_service_id(id_priv->id.ps, 2235 + (struct sockaddr *) &route->addr.dst_addr); 2236 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 2237 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2238 ··· 2283 req.alternate_path = &route->path_rec[1]; 2284 2285 req.service_id = cma_get_service_id(id_priv->id.ps, 2286 + (struct sockaddr *) &route->addr.dst_addr); 2287 req.qp_num = id_priv->qp_num; 2288 req.qp_type = IB_QPT_RC; 2289 req.starting_psn = id_priv->seq_num; ··· 2667 if (ret) 2668 return ret; 2669 2670 + cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 2671 if (id_priv->id.ps == RDMA_PS_UDP) 2672 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 2673 ib_addr_get_sgid(dev_addr, &rec.port_gid);
+6 -8
drivers/infiniband/core/ucma.c
··· 81 82 u64 uid; 83 struct list_head list; 84 - struct sockaddr addr; 85 - u8 pad[sizeof(struct sockaddr_in6) - 86 - sizeof(struct sockaddr)]; 87 }; 88 89 struct ucma_event { ··· 601 return PTR_ERR(ctx); 602 603 memset(&resp, 0, sizeof resp); 604 - addr = &ctx->cm_id->route.addr.src_addr; 605 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? 606 sizeof(struct sockaddr_in) : 607 sizeof(struct sockaddr_in6)); 608 - addr = &ctx->cm_id->route.addr.dst_addr; 609 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? 610 sizeof(struct sockaddr_in) : 611 sizeof(struct sockaddr_in6)); ··· 911 912 mc->uid = cmd.uid; 913 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr); 914 - ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc); 915 if (ret) 916 goto err2; 917 ··· 927 return 0; 928 929 err3: 930 - rdma_leave_multicast(ctx->cm_id, &mc->addr); 931 ucma_cleanup_mc_events(mc); 932 err2: 933 mutex_lock(&mut); ··· 973 goto out; 974 } 975 976 - rdma_leave_multicast(mc->ctx->cm_id, &mc->addr); 977 mutex_lock(&mc->ctx->file->mut); 978 ucma_cleanup_mc_events(mc); 979 list_del(&mc->list);
··· 81 82 u64 uid; 83 struct list_head list; 84 + struct sockaddr_storage addr; 85 }; 86 87 struct ucma_event { ··· 603 return PTR_ERR(ctx); 604 605 memset(&resp, 0, sizeof resp); 606 + addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; 607 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? 608 sizeof(struct sockaddr_in) : 609 sizeof(struct sockaddr_in6)); 610 + addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; 611 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? 612 sizeof(struct sockaddr_in) : 613 sizeof(struct sockaddr_in6)); ··· 913 914 mc->uid = cmd.uid; 915 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr); 916 + ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc); 917 if (ret) 918 goto err2; 919 ··· 929 return 0; 930 931 err3: 932 + rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); 933 ucma_cleanup_mc_events(mc); 934 err2: 935 mutex_lock(&mut); ··· 975 goto out; 976 } 977 978 + rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); 979 mutex_lock(&mc->ctx->file->mut); 980 ucma_cleanup_mc_events(mc); 981 list_del(&mc->list);
+2 -6
include/rdma/rdma_cm.h
··· 71 }; 72 73 struct rdma_addr { 74 - struct sockaddr src_addr; 75 - u8 src_pad[sizeof(struct sockaddr_in6) - 76 - sizeof(struct sockaddr)]; 77 - struct sockaddr dst_addr; 78 - u8 dst_pad[sizeof(struct sockaddr_in6) - 79 - sizeof(struct sockaddr)]; 80 struct rdma_dev_addr dev_addr; 81 }; 82
··· 71 }; 72 73 struct rdma_addr { 74 + struct sockaddr_storage src_addr; 75 + struct sockaddr_storage dst_addr; 76 struct rdma_dev_addr dev_addr; 77 }; 78