Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA: Support more than 255 rdma ports

Current code uses many different types when dealing with a port of a RDMA
device: u8, unsigned int and u32. Switch to u32 to clean up the logic.

This allows us to make (at least) the core view consistent and use the
same type. Unfortunately not all places can be converted. Many uverbs
functions expect port to be u8 so keep those places in order not to break
UAPIs. HW/Spec defined values must also not be changed.

With the switch to u32 we now can support devices with more than 255
ports. U32_MAX is reserved to make control logic a bit easier to deal
with. As a device with U32_MAX ports probably isn't going to happen any
time soon this seems like a non issue.

When a device with more than 255 ports is created uverbs will report the
RDMA device as having 255 ports as this is the max currently supported.

The verbs interface is not changed yet because the IBTA spec limits the
port size in too many places to be u8 and all applications that relies in
verbs won't be able to cope with this change. At this stage, we are
extending the interfaces that are using vendor channel solely

Once the limitation is lifted mlx5 in switchdev mode will be able to have
thousands of SFs created by the device. As the only instance of an RDMA
device that reports more than 255 ports will be a representor device and
it exposes itself as a RAW Ethernet only device CM/MAD/IPoIB and other
ULPs aren't effected by this change and their sysfs/interfaces that are
exposes to userspace can remain unchanged.

While here cleanup some alignment issues and remove unneeded sanity
checks (mainly in rdmavt),

Link: https://lore.kernel.org/r/20210301070420.439400-1-leon@kernel.org
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Mark Bloch and committed by
Jason Gunthorpe
1fb7f897 847d19a4

+776 -776
+38 -46
drivers/infiniband/core/cache.c
··· 121 121 u32 default_gid_indices; 122 122 }; 123 123 124 - static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port) 124 + static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port) 125 125 { 126 126 struct ib_event event; 127 127 ··· 197 197 } 198 198 EXPORT_SYMBOL(ib_cache_gid_parse_type_str); 199 199 200 - static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port) 200 + static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port) 201 201 { 202 202 return device->port_data[port].cache.gid; 203 203 } ··· 237 237 static void free_gid_entry_locked(struct ib_gid_table_entry *entry) 238 238 { 239 239 struct ib_device *device = entry->attr.device; 240 - u8 port_num = entry->attr.port_num; 240 + u32 port_num = entry->attr.port_num; 241 241 struct ib_gid_table *table = rdma_gid_table(device, port_num); 242 242 243 - dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__, 243 + dev_dbg(&device->dev, "%s port=%u index=%d gid %pI6\n", __func__, 244 244 port_num, entry->attr.index, entry->attr.gid.raw); 245 245 246 246 write_lock_irq(&table->rwlock); ··· 282 282 struct ib_gid_table_entry *entry = 283 283 container_of(work, struct ib_gid_table_entry, del_work); 284 284 struct ib_device *device = entry->attr.device; 285 - u8 port_num = entry->attr.port_num; 285 + u32 port_num = entry->attr.port_num; 286 286 struct ib_gid_table *table = rdma_gid_table(device, port_num); 287 287 288 288 mutex_lock(&table->lock); ··· 379 379 * @ix: GID entry index to delete 380 380 * 381 381 */ 382 - static void del_gid(struct ib_device *ib_dev, u8 port, 382 + static void del_gid(struct ib_device *ib_dev, u32 port, 383 383 struct ib_gid_table *table, int ix) 384 384 { 385 385 struct roce_gid_ndev_storage *ndev_storage; ··· 387 387 388 388 lockdep_assert_held(&table->lock); 389 389 390 - dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port, 390 + dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port, 391 391 ix, table->data_vec[ix]->attr.gid.raw); 392 392 393 393 write_lock_irq(&table->rwlock); ··· 543 543 addrconf_ifid_eui48(&gid->raw[8], dev); 544 544 } 545 545 546 - static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port, 546 + static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port, 547 547 union ib_gid *gid, struct ib_gid_attr *attr, 548 548 unsigned long mask, bool default_gid) 549 549 { ··· 587 587 return ret; 588 588 } 589 589 590 - int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, 590 + int ib_cache_gid_add(struct ib_device *ib_dev, u32 port, 591 591 union ib_gid *gid, struct ib_gid_attr *attr) 592 592 { 593 593 unsigned long mask = GID_ATTR_FIND_MASK_GID | ··· 598 598 } 599 599 600 600 static int 601 - _ib_cache_gid_del(struct ib_device *ib_dev, u8 port, 601 + _ib_cache_gid_del(struct ib_device *ib_dev, u32 port, 602 602 union ib_gid *gid, struct ib_gid_attr *attr, 603 603 unsigned long mask, bool default_gid) 604 604 { ··· 627 627 return ret; 628 628 } 629 629 630 - int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, 630 + int ib_cache_gid_del(struct ib_device *ib_dev, u32 port, 631 631 union ib_gid *gid, struct ib_gid_attr *attr) 632 632 { 633 633 unsigned long mask = GID_ATTR_FIND_MASK_GID | ··· 638 638 return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false); 639 639 } 640 640 641 - int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, 641 + int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, 642 642 struct net_device *ndev) 643 643 { 644 644 struct ib_gid_table *table; ··· 683 683 rdma_find_gid_by_port(struct ib_device *ib_dev, 684 684 const union ib_gid *gid, 685 685 enum ib_gid_type gid_type, 686 - u8 port, struct net_device *ndev) 686 + u32 port, struct net_device *ndev) 687 687 { 688 688 int local_index; 689 689 struct ib_gid_table *table; ··· 734 734 * 735 735 */ 736 736 const struct ib_gid_attr *rdma_find_gid_by_filter( 737 - struct ib_device *ib_dev, const union ib_gid *gid, u8 port, 737 + struct ib_device *ib_dev, const union ib_gid *gid, u32 port, 738 738 bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *, 739 739 void *), 740 740 void *context) ··· 818 818 kfree(table); 819 819 } 820 820 821 - static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port, 821 + static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port, 822 822 struct ib_gid_table *table) 823 823 { 824 824 int i; ··· 834 834 mutex_unlock(&table->lock); 835 835 } 836 836 837 - void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, 837 + void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port, 838 838 struct net_device *ndev, 839 839 unsigned long gid_type_mask, 840 840 enum ib_cache_gid_default_mode mode) ··· 867 867 } 868 868 } 869 869 870 - static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port, 870 + static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port, 871 871 struct ib_gid_table *table) 872 872 { 873 873 unsigned int i; ··· 884 884 885 885 static void gid_table_release_one(struct ib_device *ib_dev) 886 886 { 887 - unsigned int p; 887 + u32 p; 888 888 889 889 rdma_for_each_port (ib_dev, p) { 890 890 release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid); ··· 895 895 static int _gid_table_setup_one(struct ib_device *ib_dev) 896 896 { 897 897 struct ib_gid_table *table; 898 - unsigned int rdma_port; 898 + u32 rdma_port; 899 899 900 900 rdma_for_each_port (ib_dev, rdma_port) { 901 901 table = alloc_gid_table( ··· 915 915 916 916 static void gid_table_cleanup_one(struct ib_device *ib_dev) 917 917 { 918 - unsigned int p; 918 + u32 p; 919 919 920 920 rdma_for_each_port (ib_dev, p) 921 921 cleanup_gid_table_port(ib_dev, p, ··· 950 950 * Returns 0 on success or appropriate error code. 951 951 * 952 952 */ 953 - int rdma_query_gid(struct ib_device *device, u8 port_num, 953 + int rdma_query_gid(struct ib_device *device, u32 port_num, 954 954 int index, union ib_gid *gid) 955 955 { 956 956 struct ib_gid_table *table; ··· 1014 1014 unsigned long mask = GID_ATTR_FIND_MASK_GID | 1015 1015 GID_ATTR_FIND_MASK_GID_TYPE; 1016 1016 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type}; 1017 - unsigned int p; 1017 + u32 p; 1018 1018 1019 1019 if (ndev) 1020 1020 mask |= GID_ATTR_FIND_MASK_NETDEV; ··· 1043 1043 EXPORT_SYMBOL(rdma_find_gid); 1044 1044 1045 1045 int ib_get_cached_pkey(struct ib_device *device, 1046 - u8 port_num, 1046 + u32 port_num, 1047 1047 int index, 1048 1048 u16 *pkey) 1049 1049 { ··· 1069 1069 } 1070 1070 EXPORT_SYMBOL(ib_get_cached_pkey); 1071 1071 1072 - int ib_get_cached_subnet_prefix(struct ib_device *device, 1073 - u8 port_num, 1074 - u64 *sn_pfx) 1072 + int ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num, 1073 + u64 *sn_pfx) 1075 1074 { 1076 1075 unsigned long flags; 1077 1076 ··· 1085 1086 } 1086 1087 EXPORT_SYMBOL(ib_get_cached_subnet_prefix); 1087 1088 1088 - int ib_find_cached_pkey(struct ib_device *device, 1089 - u8 port_num, 1090 - u16 pkey, 1091 - u16 *index) 1089 + int ib_find_cached_pkey(struct ib_device *device, u32 port_num, 1090 + u16 pkey, u16 *index) 1092 1091 { 1093 1092 struct ib_pkey_cache *cache; 1094 1093 unsigned long flags; ··· 1129 1132 } 1130 1133 EXPORT_SYMBOL(ib_find_cached_pkey); 1131 1134 1132 - int ib_find_exact_cached_pkey(struct ib_device *device, 1133 - u8 port_num, 1134 - u16 pkey, 1135 - u16 *index) 1135 + int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num, 1136 + u16 pkey, u16 *index) 1136 1137 { 1137 1138 struct ib_pkey_cache *cache; 1138 1139 unsigned long flags; ··· 1164 1169 } 1165 1170 EXPORT_SYMBOL(ib_find_exact_cached_pkey); 1166 1171 1167 - int ib_get_cached_lmc(struct ib_device *device, 1168 - u8 port_num, 1169 - u8 *lmc) 1172 + int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc) 1170 1173 { 1171 1174 unsigned long flags; 1172 1175 int ret = 0; ··· 1180 1187 } 1181 1188 EXPORT_SYMBOL(ib_get_cached_lmc); 1182 1189 1183 - int ib_get_cached_port_state(struct ib_device *device, 1184 - u8 port_num, 1190 + int ib_get_cached_port_state(struct ib_device *device, u32 port_num, 1185 1191 enum ib_port_state *port_state) 1186 1192 { 1187 1193 unsigned long flags; ··· 1214 1222 * code. 1215 1223 */ 1216 1224 const struct ib_gid_attr * 1217 - rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index) 1225 + rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index) 1218 1226 { 1219 1227 const struct ib_gid_attr *attr = ERR_PTR(-ENODATA); 1220 1228 struct ib_gid_table *table; ··· 1255 1263 const struct ib_gid_attr *gid_attr; 1256 1264 ssize_t num_entries = 0, ret; 1257 1265 struct ib_gid_table *table; 1258 - unsigned int port_num, i; 1266 + u32 port_num, i; 1259 1267 struct net_device *ndev; 1260 1268 unsigned long flags; 1261 1269 ··· 1353 1361 container_of(attr, struct ib_gid_table_entry, attr); 1354 1362 struct ib_device *device = entry->attr.device; 1355 1363 struct net_device *ndev = ERR_PTR(-EINVAL); 1356 - u8 port_num = entry->attr.port_num; 1364 + u32 port_num = entry->attr.port_num; 1357 1365 struct ib_gid_table *table; 1358 1366 unsigned long flags; 1359 1367 bool valid; ··· 1433 1441 EXPORT_SYMBOL(rdma_read_gid_l2_fields); 1434 1442 1435 1443 static int config_non_roce_gid_cache(struct ib_device *device, 1436 - u8 port, int gid_tbl_len) 1444 + u32 port, int gid_tbl_len) 1437 1445 { 1438 1446 struct ib_gid_attr gid_attr = {}; 1439 1447 struct ib_gid_table *table; ··· 1464 1472 } 1465 1473 1466 1474 static int 1467 - ib_cache_update(struct ib_device *device, u8 port, bool enforce_security) 1475 + ib_cache_update(struct ib_device *device, u32 port, bool enforce_security) 1468 1476 { 1469 1477 struct ib_port_attr *tprops = NULL; 1470 1478 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; ··· 1613 1621 1614 1622 int ib_cache_setup_one(struct ib_device *device) 1615 1623 { 1616 - unsigned int p; 1624 + u32 p; 1617 1625 int err; 1618 1626 1619 1627 rwlock_init(&device->cache_lock); ··· 1633 1641 1634 1642 void ib_cache_release_one(struct ib_device *device) 1635 1643 { 1636 - unsigned int p; 1644 + u32 p; 1637 1645 1638 1646 /* 1639 1647 * The release function frees all the cache elements.
+6 -6
drivers/infiniband/core/cm.c
··· 202 202 struct cm_port { 203 203 struct cm_device *cm_dev; 204 204 struct ib_mad_agent *mad_agent; 205 - u8 port_num; 205 + u32 port_num; 206 206 struct list_head cm_priv_prim_list; 207 207 struct list_head cm_priv_altr_list; 208 208 struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; ··· 1631 1631 req_msg)))); 1632 1632 } 1633 1633 1634 - static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num, 1634 + static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num, 1635 1635 struct sa_path_rec *path, union ib_gid *gid) 1636 1636 { 1637 1637 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num)) ··· 1750 1750 static u16 cm_get_bth_pkey(struct cm_work *work) 1751 1751 { 1752 1752 struct ib_device *ib_dev = work->port->cm_dev->ib_device; 1753 - u8 port_num = work->port->port_num; 1753 + u32 port_num = work->port->port_num; 1754 1754 u16 pkey_index = work->mad_recv_wc->wc->pkey_index; 1755 1755 u16 pkey; 1756 1756 int ret; ··· 1778 1778 struct sa_path_rec *path) 1779 1779 { 1780 1780 struct ib_device *dev = work->port->cm_dev->ib_device; 1781 - u8 port_num = work->port->port_num; 1781 + u32 port_num = work->port->port_num; 1782 1782 1783 1783 if (rdma_cap_opa_ah(dev, port_num) && 1784 1784 (ib_is_opa_gid(&path->sgid))) { ··· 4334 4334 unsigned long flags; 4335 4335 int ret; 4336 4336 int count = 0; 4337 - unsigned int i; 4337 + u32 i; 4338 4338 4339 4339 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt), 4340 4340 GFP_KERNEL); ··· 4432 4432 .clr_port_cap_mask = IB_PORT_CM_SUP 4433 4433 }; 4434 4434 unsigned long flags; 4435 - unsigned int i; 4435 + u32 i; 4436 4436 4437 4437 write_lock_irqsave(&cm.device_lock, flags); 4438 4438 list_del(&cm_dev->list);
+9 -9
drivers/infiniband/core/cma.c
··· 278 278 } 279 279 280 280 int cma_get_default_gid_type(struct cma_device *cma_dev, 281 - unsigned int port) 281 + u32 port) 282 282 { 283 283 if (!rdma_is_port_valid(cma_dev->device, port)) 284 284 return -EINVAL; ··· 287 287 } 288 288 289 289 int cma_set_default_gid_type(struct cma_device *cma_dev, 290 - unsigned int port, 290 + u32 port, 291 291 enum ib_gid_type default_gid_type) 292 292 { 293 293 unsigned long supported_gids; ··· 310 310 return 0; 311 311 } 312 312 313 - int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port) 313 + int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port) 314 314 { 315 315 if (!rdma_is_port_valid(cma_dev->device, port)) 316 316 return -EINVAL; ··· 318 318 return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)]; 319 319 } 320 320 321 - int cma_set_default_roce_tos(struct cma_device *cma_dev, unsigned int port, 321 + int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port, 322 322 u8 default_roce_tos) 323 323 { 324 324 if (!rdma_is_port_valid(cma_dev->device, port)) ··· 553 553 } 554 554 555 555 static const struct ib_gid_attr * 556 - cma_validate_port(struct ib_device *device, u8 port, 556 + cma_validate_port(struct ib_device *device, u32 port, 557 557 enum ib_gid_type gid_type, 558 558 union ib_gid *gid, 559 559 struct rdma_id_private *id_priv) ··· 611 611 struct cma_device *cma_dev; 612 612 enum ib_gid_type gid_type; 613 613 int ret = -ENODEV; 614 - unsigned int port; 614 + u32 port; 615 615 616 616 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 617 617 id_priv->id.ps == RDMA_PS_IPOIB) ··· 702 702 struct cma_device *cma_dev; 703 703 enum ib_gid_type gid_type; 704 704 int ret = -ENODEV; 705 - unsigned int port; 706 705 union ib_gid gid; 706 + u32 port; 707 707 708 708 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 709 709 id_priv->id.ps == RDMA_PS_IPOIB) ··· 1572 1572 static bool cma_protocol_roce(const struct rdma_cm_id *id) 1573 1573 { 1574 1574 struct ib_device *device = id->device; 1575 - const int port_num = id->port_num ?: rdma_start_port(device); 1575 + const u32 port_num = id->port_num ?: rdma_start_port(device); 1576 1576 1577 1577 return rdma_protocol_roce(device, port_num); 1578 1578 } ··· 4861 4861 struct rdma_id_private *to_destroy; 4862 4862 struct cma_device *cma_dev; 4863 4863 struct rdma_id_private *id_priv; 4864 - unsigned int i; 4865 4864 unsigned long supported_gids = 0; 4866 4865 int ret; 4866 + u32 i; 4867 4867 4868 4868 cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL); 4869 4869 if (!cma_dev)
+4 -4
drivers/infiniband/core/cma_configfs.c
··· 43 43 struct cma_dev_group; 44 44 45 45 struct cma_dev_port_group { 46 - unsigned int port_num; 46 + u32 port_num; 47 47 struct cma_dev_group *cma_dev_group; 48 48 struct config_group group; 49 49 }; ··· 200 200 static int make_cma_ports(struct cma_dev_group *cma_dev_group, 201 201 struct cma_device *cma_dev) 202 202 { 203 - struct ib_device *ibdev; 204 - unsigned int i; 205 - unsigned int ports_num; 206 203 struct cma_dev_port_group *ports; 204 + struct ib_device *ibdev; 205 + u32 ports_num; 206 + u32 i; 207 207 208 208 ibdev = cma_get_ib_dev(cma_dev); 209 209
+4 -4
drivers/infiniband/core/cma_priv.h
··· 117 117 typedef bool (*cma_device_filter)(struct ib_device *, void *); 118 118 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 119 119 void *cookie); 120 - int cma_get_default_gid_type(struct cma_device *dev, unsigned int port); 121 - int cma_set_default_gid_type(struct cma_device *dev, unsigned int port, 120 + int cma_get_default_gid_type(struct cma_device *dev, u32 port); 121 + int cma_set_default_gid_type(struct cma_device *dev, u32 port, 122 122 enum ib_gid_type default_gid_type); 123 - int cma_get_default_roce_tos(struct cma_device *dev, unsigned int port); 124 - int cma_set_default_roce_tos(struct cma_device *dev, unsigned int port, 123 + int cma_get_default_roce_tos(struct cma_device *dev, u32 port); 124 + int cma_set_default_roce_tos(struct cma_device *dev, u32 port, 125 125 u8 default_roce_tos); 126 126 struct ib_device *cma_get_ib_dev(struct cma_device *dev); 127 127
+14 -14
drivers/infiniband/core/core_priv.h
··· 83 83 int ib_device_rename(struct ib_device *ibdev, const char *name); 84 84 int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim); 85 85 86 - typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port, 86 + typedef void (*roce_netdev_callback)(struct ib_device *device, u32 port, 87 87 struct net_device *idev, void *cookie); 88 88 89 - typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port, 89 + typedef bool (*roce_netdev_filter)(struct ib_device *device, u32 port, 90 90 struct net_device *idev, void *cookie); 91 91 92 92 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, 93 - unsigned int port); 93 + u32 port); 94 94 95 95 void ib_enum_roce_netdev(struct ib_device *ib_dev, 96 96 roce_netdev_filter filter, ··· 113 113 struct ib_client_nl_info { 114 114 struct sk_buff *nl_msg; 115 115 struct device *cdev; 116 - unsigned int port; 116 + u32 port; 117 117 u64 abi; 118 118 }; 119 119 int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name, ··· 128 128 129 129 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type); 130 130 131 - void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, 131 + void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port, 132 132 struct net_device *ndev, 133 133 unsigned long gid_type_mask, 134 134 enum ib_cache_gid_default_mode mode); 135 135 136 - int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, 136 + int ib_cache_gid_add(struct ib_device *ib_dev, u32 port, 137 137 union ib_gid *gid, struct ib_gid_attr *attr); 138 138 139 - int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, 139 + int ib_cache_gid_del(struct ib_device *ib_dev, u32 port, 140 140 union ib_gid *gid, struct ib_gid_attr *attr); 141 141 142 - int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, 142 + int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, 143 143 struct net_device *ndev); 144 144 145 145 int roce_gid_mgmt_init(void); 146 146 void roce_gid_mgmt_cleanup(void); 147 147 148 - unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port); 148 + unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port); 149 149 150 150 int ib_cache_setup_one(struct ib_device *device); 151 151 void ib_cache_cleanup_one(struct ib_device *device); ··· 215 215 struct netlink_ext_ack *extack); 216 216 217 217 int ib_get_cached_subnet_prefix(struct ib_device *device, 218 - u8 port_num, 219 - u64 *sn_pfx); 218 + u32 port_num, 219 + u64 *sn_pfx); 220 220 221 221 #ifdef CONFIG_SECURITY_INFINIBAND 222 222 void ib_security_release_port_pkey_list(struct ib_device *device); 223 223 224 224 void ib_security_cache_change(struct ib_device *device, 225 - u8 port_num, 225 + u32 port_num, 226 226 u64 subnet_prefix); 227 227 228 228 int ib_security_modify_qp(struct ib_qp *qp, ··· 247 247 } 248 248 249 249 static inline void ib_security_cache_change(struct ib_device *device, 250 - u8 port_num, 250 + u32 port_num, 251 251 u64 subnet_prefix) 252 252 { 253 253 } ··· 381 381 382 382 int rdma_compatdev_set(u8 enable); 383 383 384 - int ib_port_register_module_stat(struct ib_device *device, u8 port_num, 384 + int ib_port_register_module_stat(struct ib_device *device, u32 port_num, 385 385 struct kobject *kobj, struct kobj_type *ktype, 386 386 const char *name); 387 387 void ib_port_unregister_module_stat(struct kobject *kobj);
+10 -10
drivers/infiniband/core/counters.c
··· 34 34 * 35 35 * Return 0 on success. 36 36 */ 37 - int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port, 37 + int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port, 38 38 enum rdma_nl_counter_mask mask, 39 39 struct netlink_ext_ack *extack) 40 40 { ··· 100 100 return ret; 101 101 } 102 102 103 - static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u8 port, 103 + static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port, 104 104 struct ib_qp *qp, 105 105 enum rdma_nl_counter_mode mode) 106 106 { ··· 238 238 * Return: The counter (with ref-count increased) if found 239 239 */ 240 240 static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp, 241 - u8 port) 241 + u32 port) 242 242 { 243 243 struct rdma_port_counter *port_counter; 244 244 struct rdma_counter *counter = NULL; ··· 282 282 * rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on 283 283 * the auto-mode rule 284 284 */ 285 - int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port) 285 + int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port) 286 286 { 287 287 struct rdma_port_counter *port_counter; 288 288 struct ib_device *dev = qp->device; ··· 352 352 } 353 353 354 354 static u64 get_running_counters_hwstat_sum(struct ib_device *dev, 355 - u8 port, u32 index) 355 + u32 port, u32 index) 356 356 { 357 357 struct rdma_restrack_entry *res; 358 358 struct rdma_restrack_root *rt; ··· 388 388 * rdma_counter_get_hwstat_value() - Get the sum value of all counters on a 389 389 * specific port, including the running ones and history data 390 390 */ 391 - u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index) 391 + u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index) 392 392 { 393 393 struct rdma_port_counter *port_counter; 394 394 u64 sum; ··· 443 443 /* 444 444 * rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id 445 445 */ 446 - int rdma_counter_bind_qpn(struct ib_device *dev, u8 port, 446 + int rdma_counter_bind_qpn(struct ib_device *dev, u32 port, 447 447 u32 qp_num, u32 counter_id) 448 448 { 449 449 struct rdma_port_counter *port_counter; ··· 493 493 * rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it 494 494 * The id of new counter is returned in @counter_id 495 495 */ 496 - int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port, 496 + int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port, 497 497 u32 qp_num, u32 *counter_id) 498 498 { 499 499 struct rdma_port_counter *port_counter; ··· 540 540 /* 541 541 * rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter 542 542 */ 543 - int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port, 543 + int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port, 544 544 u32 qp_num, u32 counter_id) 545 545 { 546 546 struct rdma_port_counter *port_counter; ··· 573 573 return ret; 574 574 } 575 575 576 - int rdma_counter_get_mode(struct ib_device *dev, u8 port, 576 + int rdma_counter_get_mode(struct ib_device *dev, u32 port, 577 577 enum rdma_nl_counter_mode *mode, 578 578 enum rdma_nl_counter_mask *mask) 579 579 {
+20 -16
drivers/infiniband/core/device.c
··· 779 779 static int alloc_port_data(struct ib_device *device) 780 780 { 781 781 struct ib_port_data_rcu *pdata_rcu; 782 - unsigned int port; 782 + u32 port; 783 783 784 784 if (device->port_data) 785 785 return 0; 786 786 787 787 /* This can only be called once the physical port range is defined */ 788 788 if (WARN_ON(!device->phys_port_cnt)) 789 + return -EINVAL; 790 + 791 + /* Reserve U32_MAX so the logic to go over all the ports is sane */ 792 + if (WARN_ON(device->phys_port_cnt == U32_MAX)) 789 793 return -EINVAL; 790 794 791 795 /* ··· 823 819 return 0; 824 820 } 825 821 826 - static int verify_immutable(const struct ib_device *dev, u8 port) 822 + static int verify_immutable(const struct ib_device *dev, u32 port) 827 823 { 828 824 return WARN_ON(!rdma_cap_ib_mad(dev, port) && 829 825 rdma_max_mad_size(dev, port) != 0); ··· 831 827 832 828 static int setup_port_data(struct ib_device *device) 833 829 { 834 - unsigned int port; 830 + u32 port; 835 831 int ret; 836 832 837 833 ret = alloc_port_data(device); ··· 2009 2005 } 2010 2006 2011 2007 static int iw_query_port(struct ib_device *device, 2012 - u8 port_num, 2008 + u32 port_num, 2013 2009 struct ib_port_attr *port_attr) 2014 2010 { 2015 2011 struct in_device *inetdev; ··· 2048 2044 } 2049 2045 2050 2046 static int __ib_query_port(struct ib_device *device, 2051 - u8 port_num, 2047 + u32 port_num, 2052 2048 struct ib_port_attr *port_attr) 2053 2049 { 2054 2050 union ib_gid gid = {}; ··· 2082 2078 * @port_attr pointer. 2083 2079 */ 2084 2080 int ib_query_port(struct ib_device *device, 2085 - u8 port_num, 2081 + u32 port_num, 2086 2082 struct ib_port_attr *port_attr) 2087 2083 { 2088 2084 if (!rdma_is_port_valid(device, port_num)) ··· 2134 2130 * NETDEV_UNREGISTER event. 2135 2131 */ 2136 2132 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, 2137 - unsigned int port) 2133 + u32 port) 2138 2134 { 2139 2135 struct net_device *old_ndev; 2140 2136 struct ib_port_data *pdata; ··· 2177 2173 static void free_netdevs(struct ib_device *ib_dev) 2178 2174 { 2179 2175 unsigned long flags; 2180 - unsigned int port; 2176 + u32 port; 2181 2177 2182 2178 if (!ib_dev->port_data) 2183 2179 return; ··· 2208 2204 } 2209 2205 2210 2206 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, 2211 - unsigned int port) 2207 + u32 port) 2212 2208 { 2213 2209 struct ib_port_data *pdata; 2214 2210 struct net_device *res; ··· 2295 2291 roce_netdev_callback cb, 2296 2292 void *cookie) 2297 2293 { 2298 - unsigned int port; 2294 + u32 port; 2299 2295 2300 2296 rdma_for_each_port (ib_dev, port) 2301 2297 if (rdma_protocol_roce(ib_dev, port)) { ··· 2373 2369 * ib_query_pkey() fetches the specified P_Key table entry. 2374 2370 */ 2375 2371 int ib_query_pkey(struct ib_device *device, 2376 - u8 port_num, u16 index, u16 *pkey) 2372 + u32 port_num, u16 index, u16 *pkey) 2377 2373 { 2378 2374 if (!rdma_is_port_valid(device, port_num)) 2379 2375 return -EINVAL; ··· 2418 2414 * @port_modify_mask and @port_modify structure. 2419 2415 */ 2420 2416 int ib_modify_port(struct ib_device *device, 2421 - u8 port_num, int port_modify_mask, 2417 + u32 port_num, int port_modify_mask, 2422 2418 struct ib_port_modify *port_modify) 2423 2419 { 2424 2420 int rc; ··· 2450 2446 * parameter may be NULL. 2451 2447 */ 2452 2448 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2453 - u8 *port_num, u16 *index) 2449 + u32 *port_num, u16 *index) 2454 2450 { 2455 2451 union ib_gid tmp_gid; 2456 - unsigned int port; 2452 + u32 port; 2457 2453 int ret, i; 2458 2454 2459 2455 rdma_for_each_port (device, port) { ··· 2487 2483 * @index: The index into the PKey table where the PKey was found. 2488 2484 */ 2489 2485 int ib_find_pkey(struct ib_device *device, 2490 - u8 port_num, u16 pkey, u16 *index) 2486 + u32 port_num, u16 pkey, u16 *index) 2491 2487 { 2492 2488 int ret, i; 2493 2489 u16 tmp_pkey; ··· 2530 2526 * 2531 2527 */ 2532 2528 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, 2533 - u8 port, 2529 + u32 port, 2534 2530 u16 pkey, 2535 2531 const union ib_gid *gid, 2536 2532 const struct sockaddr *addr)
+16 -16
drivers/infiniband/core/mad.c
··· 61 61 { 62 62 u16 pkey; 63 63 struct ib_device *dev = qp_info->port_priv->device; 64 - u8 pnum = qp_info->port_priv->port_num; 64 + u32 pnum = qp_info->port_priv->port_num; 65 65 struct ib_ud_wr *wr = &mad_send_wr->send_wr; 66 66 struct rdma_ah_attr attr = {}; 67 67 ··· 118 118 * Assumes ib_mad_port_list_lock is being held 119 119 */ 120 120 static inline struct ib_mad_port_private * 121 - __ib_get_mad_port(struct ib_device *device, int port_num) 121 + __ib_get_mad_port(struct ib_device *device, u32 port_num) 122 122 { 123 123 struct ib_mad_port_private *entry; 124 124 ··· 134 134 * for a device/port 135 135 */ 136 136 static inline struct ib_mad_port_private * 137 - ib_get_mad_port(struct ib_device *device, int port_num) 137 + ib_get_mad_port(struct ib_device *device, u32 port_num) 138 138 { 139 139 struct ib_mad_port_private *entry; 140 140 unsigned long flags; ··· 222 222 * Context: Process context. 223 223 */ 224 224 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, 225 - u8 port_num, 225 + u32 port_num, 226 226 enum ib_qp_type qp_type, 227 227 struct ib_mad_reg_req *mad_reg_req, 228 228 u8 rmpp_version, ··· 549 549 } 550 550 551 551 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, 552 - u16 pkey_index, u8 port_num, struct ib_wc *wc) 552 + u16 pkey_index, u32 port_num, struct ib_wc *wc) 553 553 { 554 554 memset(wc, 0, sizeof *wc); 555 555 wc->wr_cqe = cqe; ··· 608 608 struct ib_mad_port_private *port_priv; 609 609 struct ib_mad_agent_private *recv_mad_agent = NULL; 610 610 struct ib_device *device = mad_agent_priv->agent.device; 611 - u8 port_num; 611 + u32 port_num; 612 612 struct ib_wc mad_wc; 613 613 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; 614 614 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); ··· 1613 1613 1614 1614 if (mad_agent && !mad_agent->agent.recv_handler) { 1615 1615 dev_notice(&port_priv->device->dev, 1616 - "No receive handler for client %p on port %d\n", 1616 + "No receive handler for client %p on port %u\n", 1617 1617 &mad_agent->agent, port_priv->port_num); 1618 1618 deref_mad_agent(mad_agent); 1619 1619 mad_agent = NULL; ··· 1685 1685 u8 send_resp, rcv_resp; 1686 1686 union ib_gid sgid; 1687 1687 struct ib_device *device = mad_agent_priv->agent.device; 1688 - u8 port_num = mad_agent_priv->agent.port_num; 1688 + u32 port_num = mad_agent_priv->agent.port_num; 1689 1689 u8 lmc; 1690 1690 bool has_grh; 1691 1691 ··· 1867 1867 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, 1868 1868 const struct ib_mad_qp_info *qp_info, 1869 1869 const struct ib_wc *wc, 1870 - int port_num, 1870 + u32 port_num, 1871 1871 struct ib_mad_private *recv, 1872 1872 struct ib_mad_private *response) 1873 1873 { ··· 1954 1954 handle_opa_smi(struct ib_mad_port_private *port_priv, 1955 1955 struct ib_mad_qp_info *qp_info, 1956 1956 struct ib_wc *wc, 1957 - int port_num, 1957 + u32 port_num, 1958 1958 struct ib_mad_private *recv, 1959 1959 struct ib_mad_private *response) 1960 1960 { ··· 2010 2010 handle_smi(struct ib_mad_port_private *port_priv, 2011 2011 struct ib_mad_qp_info *qp_info, 2012 2012 struct ib_wc *wc, 2013 - int port_num, 2013 + u32 port_num, 2014 2014 struct ib_mad_private *recv, 2015 2015 struct ib_mad_private *response, 2016 2016 bool opa) ··· 2034 2034 struct ib_mad_private_header *mad_priv_hdr; 2035 2035 struct ib_mad_private *recv, *response = NULL; 2036 2036 struct ib_mad_agent_private *mad_agent; 2037 - int port_num; 2037 + u32 port_num; 2038 2038 int ret = IB_MAD_RESULT_SUCCESS; 2039 2039 size_t mad_size; 2040 2040 u16 resp_mad_pkey_index = 0; ··· 2947 2947 * Create the QP, PD, MR, and CQ if needed 2948 2948 */ 2949 2949 static int ib_mad_port_open(struct ib_device *device, 2950 - int port_num) 2950 + u32 port_num) 2951 2951 { 2952 2952 int ret, cq_size; 2953 2953 struct ib_mad_port_private *port_priv; ··· 3002 3002 if (ret) 3003 3003 goto error7; 3004 3004 3005 - snprintf(name, sizeof name, "ib_mad%d", port_num); 3005 + snprintf(name, sizeof(name), "ib_mad%u", port_num); 3006 3006 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 3007 3007 if (!port_priv->wq) { 3008 3008 ret = -ENOMEM; ··· 3048 3048 * If there are no classes using the port, free the port 3049 3049 * resources (CQ, MR, PD, QP) and remove the port's info structure 3050 3050 */ 3051 - static int ib_mad_port_close(struct ib_device *device, int port_num) 3051 + static int ib_mad_port_close(struct ib_device *device, u32 port_num) 3052 3052 { 3053 3053 struct ib_mad_port_private *port_priv; 3054 3054 unsigned long flags; ··· 3057 3057 port_priv = __ib_get_mad_port(device, port_num); 3058 3058 if (port_priv == NULL) { 3059 3059 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3060 - dev_err(&device->dev, "Port %d not found\n", port_num); 3060 + dev_err(&device->dev, "Port %u not found\n", port_num); 3061 3061 return -ENODEV; 3062 3062 } 3063 3063 list_del_init(&port_priv->port_list);
+4 -4
drivers/infiniband/core/multicast.c
··· 63 63 struct rb_root table; 64 64 atomic_t refcount; 65 65 struct completion comp; 66 - u8 port_num; 66 + u32 port_num; 67 67 }; 68 68 69 69 struct mcast_device { ··· 605 605 */ 606 606 struct ib_sa_multicast * 607 607 ib_sa_join_multicast(struct ib_sa_client *client, 608 - struct ib_device *device, u8 port_num, 608 + struct ib_device *device, u32 port_num, 609 609 struct ib_sa_mcmember_rec *rec, 610 610 ib_sa_comp_mask comp_mask, gfp_t gfp_mask, 611 611 int (*callback)(int status, ··· 690 690 } 691 691 EXPORT_SYMBOL(ib_sa_free_multicast); 692 692 693 - int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num, 693 + int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num, 694 694 union ib_gid *mgid, struct ib_sa_mcmember_rec *rec) 695 695 { 696 696 struct mcast_device *dev; ··· 732 732 * success or appropriate error code. 733 733 * 734 734 */ 735 - int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, 735 + int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num, 736 736 struct ib_sa_mcmember_rec *rec, 737 737 struct net_device *ndev, 738 738 enum ib_gid_type gid_type,
+1 -1
drivers/infiniband/core/nldev.c
··· 242 242 { 243 243 char fw[IB_FW_VERSION_NAME_MAX]; 244 244 int ret = 0; 245 - u8 port; 245 + u32 port; 246 246 247 247 if (fill_nldev_handle(msg, device)) 248 248 return -EMSGSIZE;
+2 -2
drivers/infiniband/core/opa_smi.h
··· 40 40 #include "smi.h" 41 41 42 42 enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch, 43 - int port_num, int phys_port_cnt); 43 + u32 port_num, int phys_port_cnt); 44 44 int opa_smi_get_fwd_port(struct opa_smp *smp); 45 45 extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp); 46 46 extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, 47 - bool is_switch, int port_num); 47 + bool is_switch, u32 port_num); 48 48 49 49 /* 50 50 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
+26 -26
drivers/infiniband/core/roce_gid_mgmt.c
··· 70 70 }; 71 71 72 72 static const struct { 73 - bool (*is_supported)(const struct ib_device *device, u8 port_num); 73 + bool (*is_supported)(const struct ib_device *device, u32 port_num); 74 74 enum ib_gid_type gid_type; 75 75 } PORT_CAP_TO_GID_TYPE[] = { 76 76 {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE}, ··· 79 79 80 80 #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE) 81 81 82 - unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port) 82 + unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port) 83 83 { 84 84 int i; 85 85 unsigned int ret_flags = 0; ··· 96 96 EXPORT_SYMBOL(roce_gid_type_mask_support); 97 97 98 98 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev, 99 - u8 port, union ib_gid *gid, 99 + u32 port, union ib_gid *gid, 100 100 struct ib_gid_attr *gid_attr) 101 101 { 102 102 int i; ··· 144 144 #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \ 145 145 BONDING_SLAVE_STATE_NA) 146 146 static bool 147 - is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port, 147 + is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u32 port, 148 148 struct net_device *rdma_ndev, void *cookie) 149 149 { 150 150 struct net_device *real_dev; ··· 168 168 } 169 169 170 170 static bool 171 - is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port, 171 + is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port, 172 172 struct net_device *rdma_ndev, void *cookie) 173 173 { 174 174 struct net_device *master_dev; ··· 197 197 * considered for deriving default RoCE GID, returns false otherwise. 198 198 */ 199 199 static bool 200 - is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port, 200 + is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u32 port, 201 201 struct net_device *rdma_ndev, void *cookie) 202 202 { 203 203 struct net_device *cookie_ndev = cookie; ··· 223 223 return res; 224 224 } 225 225 226 - static bool pass_all_filter(struct ib_device *ib_dev, u8 port, 226 + static bool pass_all_filter(struct ib_device *ib_dev, u32 port, 227 227 struct net_device *rdma_ndev, void *cookie) 228 228 { 229 229 return true; 230 230 } 231 231 232 - static bool upper_device_filter(struct ib_device *ib_dev, u8 port, 232 + static bool upper_device_filter(struct ib_device *ib_dev, u32 port, 233 233 struct net_device *rdma_ndev, void *cookie) 234 234 { 235 235 bool res; ··· 260 260 * not have been established as slave device yet. 261 261 */ 262 262 static bool 263 - is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port, 263 + is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u32 port, 264 264 struct net_device *rdma_ndev, 265 265 void *cookie) 266 266 { ··· 280 280 281 281 static void update_gid_ip(enum gid_op_type gid_op, 282 282 struct ib_device *ib_dev, 283 - u8 port, struct net_device *ndev, 283 + u32 port, struct net_device *ndev, 284 284 struct sockaddr *addr) 285 285 { 286 286 union ib_gid gid; ··· 294 294 } 295 295 296 296 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, 297 - u8 port, 297 + u32 port, 298 298 struct net_device *rdma_ndev, 299 299 struct net_device *event_ndev) 300 300 { ··· 328 328 } 329 329 330 330 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, 331 - u8 port, struct net_device *ndev) 331 + u32 port, struct net_device *ndev) 332 332 { 333 333 const struct in_ifaddr *ifa; 334 334 struct in_device *in_dev; ··· 372 372 } 373 373 374 374 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, 375 - u8 port, struct net_device *ndev) 375 + u32 port, struct net_device *ndev) 376 376 { 377 377 struct inet6_ifaddr *ifp; 378 378 struct inet6_dev *in6_dev; ··· 417 417 } 418 418 } 419 419 420 - static void _add_netdev_ips(struct ib_device *ib_dev, u8 port, 420 + static void _add_netdev_ips(struct ib_device *ib_dev, u32 port, 421 421 struct net_device *ndev) 422 422 { 423 423 enum_netdev_ipv4_ips(ib_dev, port, ndev); ··· 425 425 enum_netdev_ipv6_ips(ib_dev, port, ndev); 426 426 } 427 427 428 - static void add_netdev_ips(struct ib_device *ib_dev, u8 port, 428 + static void add_netdev_ips(struct ib_device *ib_dev, u32 port, 429 429 struct net_device *rdma_ndev, void *cookie) 430 430 { 431 431 _add_netdev_ips(ib_dev, port, cookie); 432 432 } 433 433 434 - static void del_netdev_ips(struct ib_device *ib_dev, u8 port, 434 + static void del_netdev_ips(struct ib_device *ib_dev, u32 port, 435 435 struct net_device *rdma_ndev, void *cookie) 436 436 { 437 437 ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie); ··· 446 446 * 447 447 * del_default_gids() deletes the default GIDs of the event/cookie netdevice. 448 448 */ 449 - static void del_default_gids(struct ib_device *ib_dev, u8 port, 449 + static void del_default_gids(struct ib_device *ib_dev, u32 port, 450 450 struct net_device *rdma_ndev, void *cookie) 451 451 { 452 452 struct net_device *cookie_ndev = cookie; ··· 458 458 IB_CACHE_GID_DEFAULT_MODE_DELETE); 459 459 } 460 460 461 - static void add_default_gids(struct ib_device *ib_dev, u8 port, 461 + static void add_default_gids(struct ib_device *ib_dev, u32 port, 462 462 struct net_device *rdma_ndev, void *cookie) 463 463 { 464 464 struct net_device *event_ndev = cookie; ··· 470 470 } 471 471 472 472 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev, 473 - u8 port, 473 + u32 port, 474 474 struct net_device *rdma_ndev, 475 475 void *cookie) 476 476 { ··· 515 515 EXPORT_SYMBOL(rdma_roce_rescan_device); 516 516 517 517 static void callback_for_addr_gid_device_scan(struct ib_device *device, 518 - u8 port, 518 + u32 port, 519 519 struct net_device *rdma_ndev, 520 520 void *cookie) 521 521 { ··· 547 547 return 0; 548 548 } 549 549 550 - static void handle_netdev_upper(struct ib_device *ib_dev, u8 port, 550 + static void handle_netdev_upper(struct ib_device *ib_dev, u32 port, 551 551 void *cookie, 552 552 void (*handle_netdev)(struct ib_device *ib_dev, 553 - u8 port, 553 + u32 port, 554 554 struct net_device *ndev)) 555 555 { 556 556 struct net_device *ndev = cookie; ··· 574 574 } 575 575 } 576 576 577 - static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, 577 + static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, 578 578 struct net_device *event_ndev) 579 579 { 580 580 ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev); 581 581 } 582 582 583 - static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port, 583 + static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port, 584 584 struct net_device *rdma_ndev, void *cookie) 585 585 { 586 586 handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids); 587 587 } 588 588 589 - static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port, 589 + static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port, 590 590 struct net_device *rdma_ndev, void *cookie) 591 591 { 592 592 handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips); 593 593 } 594 594 595 - static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port, 595 + static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port, 596 596 struct net_device *rdma_ndev, 597 597 void *cookie) 598 598 {
+13 -12
drivers/infiniband/core/rw.c
··· 25 25 * registration is also enabled if registering memory might yield better 26 26 * performance than using multiple SGE entries, see rdma_rw_io_needs_mr() 27 27 */ 28 - static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num) 28 + static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u32 port_num) 29 29 { 30 30 if (rdma_protocol_iwarp(dev, port_num)) 31 31 return true; ··· 42 42 * optimization otherwise. Additionally we have a debug option to force usage 43 43 * of MRs to help testing this code path. 44 44 */ 45 - static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num, 45 + static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u32 port_num, 46 46 enum dma_data_direction dir, int dma_nents) 47 47 { 48 48 if (dir == DMA_FROM_DEVICE) { ··· 87 87 } 88 88 89 89 /* Caller must have zero-initialized *reg. */ 90 - static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, 90 + static int rdma_rw_init_one_mr(struct ib_qp *qp, u32 port_num, 91 91 struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, 92 92 u32 sg_cnt, u32 offset) 93 93 { ··· 121 121 } 122 122 123 123 static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 124 - u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, 124 + u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, 125 125 u64 remote_addr, u32 rkey, enum dma_data_direction dir) 126 126 { 127 127 struct rdma_rw_reg_ctx *prev = NULL; ··· 308 308 * Returns the number of WQEs that will be needed on the workqueue if 309 309 * successful, or a negative error code. 310 310 */ 311 - int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 311 + int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, 312 312 struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, 313 313 u64 remote_addr, u32 rkey, enum dma_data_direction dir) 314 314 { ··· 377 377 * successful, or a negative error code. 378 378 */ 379 379 int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 380 - u8 port_num, struct scatterlist *sg, u32 sg_cnt, 380 + u32 port_num, struct scatterlist *sg, u32 sg_cnt, 381 381 struct scatterlist *prot_sg, u32 prot_sg_cnt, 382 382 struct ib_sig_attrs *sig_attrs, 383 383 u64 remote_addr, u32 rkey, enum dma_data_direction dir) ··· 505 505 * completion notification. 506 506 */ 507 507 struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 508 - u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 508 + u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 509 509 { 510 510 struct ib_send_wr *first_wr, *last_wr; 511 511 int i; ··· 562 562 * is not set @cqe must be set so that the caller gets a completion 563 563 * notification. 564 564 */ 565 - int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 565 + int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, 566 566 struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 567 567 { 568 568 struct ib_send_wr *first_wr; ··· 581 581 * @sg_cnt: number of entries in @sg 582 582 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 583 583 */ 584 - void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 585 - struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir) 584 + void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 585 + u32 port_num, struct scatterlist *sg, u32 sg_cnt, 586 + enum dma_data_direction dir) 586 587 { 587 588 int i; 588 589 ··· 621 620 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 622 621 */ 623 622 void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 624 - u8 port_num, struct scatterlist *sg, u32 sg_cnt, 623 + u32 port_num, struct scatterlist *sg, u32 sg_cnt, 625 624 struct scatterlist *prot_sg, u32 prot_sg_cnt, 626 625 enum dma_data_direction dir) 627 626 { ··· 648 647 * compute max_rdma_ctxts and the size of the transport's Send and 649 648 * Send Completion Queues. 650 649 */ 651 - unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num, 650 + unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num, 652 651 unsigned int maxpages) 653 652 { 654 653 unsigned int mr_pages;
+1 -1
drivers/infiniband/core/sa.h
··· 49 49 } 50 50 51 51 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 52 - struct ib_device *device, u8 port_num, u8 method, 52 + struct ib_device *device, u32 port_num, u8 method, 53 53 struct ib_sa_mcmember_rec *rec, 54 54 ib_sa_comp_mask comp_mask, 55 55 unsigned long timeout_ms, gfp_t gfp_mask,
+11 -11
drivers/infiniband/core/sa_query.c
··· 95 95 struct delayed_work ib_cpi_work; 96 96 spinlock_t classport_lock; /* protects class port info set */ 97 97 spinlock_t ah_lock; 98 - u8 port_num; 98 + u32 port_num; 99 99 }; 100 100 101 101 struct ib_sa_device { ··· 1194 1194 } 1195 1195 EXPORT_SYMBOL(ib_sa_cancel_query); 1196 1196 1197 - static u8 get_src_path_mask(struct ib_device *device, u8 port_num) 1197 + static u8 get_src_path_mask(struct ib_device *device, u32 port_num) 1198 1198 { 1199 1199 struct ib_sa_device *sa_dev; 1200 1200 struct ib_sa_port *port; ··· 1213 1213 return src_path_mask; 1214 1214 } 1215 1215 1216 - static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num, 1216 + static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num, 1217 1217 struct sa_path_rec *rec, 1218 1218 struct rdma_ah_attr *ah_attr, 1219 1219 const struct ib_gid_attr *gid_attr) ··· 1251 1251 * User must invoke rdma_destroy_ah_attr() to release reference to SGID 1252 1252 * attributes which are initialized using ib_init_ah_attr_from_path(). 1253 1253 */ 1254 - int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, 1254 + int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num, 1255 1255 struct sa_path_rec *rec, 1256 1256 struct rdma_ah_attr *ah_attr, 1257 1257 const struct ib_gid_attr *gid_attr) ··· 1409 1409 1410 1410 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client, 1411 1411 struct ib_sa_device *sa_dev, 1412 - u8 port_num) 1412 + u32 port_num) 1413 1413 { 1414 1414 struct ib_sa_port *port; 1415 1415 unsigned long flags; ··· 1444 1444 */ 1445 1445 static int opa_pr_query_possible(struct ib_sa_client *client, 1446 1446 struct ib_sa_device *sa_dev, 1447 - struct ib_device *device, u8 port_num, 1447 + struct ib_device *device, u32 port_num, 1448 1448 struct sa_path_rec *rec) 1449 1449 { 1450 1450 struct ib_port_attr port_attr; ··· 1533 1533 * the query. 1534 1534 */ 1535 1535 int ib_sa_path_rec_get(struct ib_sa_client *client, 1536 - struct ib_device *device, u8 port_num, 1536 + struct ib_device *device, u32 port_num, 1537 1537 struct sa_path_rec *rec, 1538 1538 ib_sa_comp_mask comp_mask, 1539 1539 unsigned long timeout_ms, gfp_t gfp_mask, ··· 1688 1688 * the query. 1689 1689 */ 1690 1690 int ib_sa_service_rec_query(struct ib_sa_client *client, 1691 - struct ib_device *device, u8 port_num, u8 method, 1691 + struct ib_device *device, u32 port_num, u8 method, 1692 1692 struct ib_sa_service_rec *rec, 1693 1693 ib_sa_comp_mask comp_mask, 1694 1694 unsigned long timeout_ms, gfp_t gfp_mask, ··· 1784 1784 } 1785 1785 1786 1786 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 1787 - struct ib_device *device, u8 port_num, 1787 + struct ib_device *device, u32 port_num, 1788 1788 u8 method, 1789 1789 struct ib_sa_mcmember_rec *rec, 1790 1790 ib_sa_comp_mask comp_mask, ··· 1876 1876 } 1877 1877 1878 1878 int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 1879 - struct ib_device *device, u8 port_num, 1879 + struct ib_device *device, u32 port_num, 1880 1880 struct ib_sa_guidinfo_rec *rec, 1881 1881 ib_sa_comp_mask comp_mask, u8 method, 1882 1882 unsigned long timeout_ms, gfp_t gfp_mask, ··· 2265 2265 unsigned long flags; 2266 2266 struct ib_sa_device *sa_dev = 2267 2267 container_of(handler, typeof(*sa_dev), event_handler); 2268 - u8 port_num = event->element.port_num - sa_dev->start_port; 2268 + u32 port_num = event->element.port_num - sa_dev->start_port; 2269 2269 struct ib_sa_port *port = &sa_dev->port[port_num]; 2270 2270 2271 2271 if (!rdma_cap_ib_sa(handler->device, port->port_num))
+4 -4
drivers/infiniband/core/security.c
··· 193 193 194 194 static inline void check_pkey_qps(struct pkey_index_qp_list *pkey, 195 195 struct ib_device *device, 196 - u8 port_num, 196 + u32 port_num, 197 197 u64 subnet_prefix) 198 198 { 199 199 struct ib_port_pkey *pp, *tmp_pp; ··· 245 245 struct pkey_index_qp_list *tmp_pkey; 246 246 struct pkey_index_qp_list *pkey; 247 247 struct ib_device *dev; 248 - u8 port_num = pp->port_num; 248 + u32 port_num = pp->port_num; 249 249 int ret = 0; 250 250 251 251 if (pp->state != IB_PORT_PKEY_VALID) ··· 538 538 } 539 539 540 540 void ib_security_cache_change(struct ib_device *device, 541 - u8 port_num, 541 + u32 port_num, 542 542 u64 subnet_prefix) 543 543 { 544 544 struct pkey_index_qp_list *pkey; ··· 649 649 } 650 650 651 651 static int ib_security_pkey_access(struct ib_device *dev, 652 - u8 port_num, 652 + u32 port_num, 653 653 u16 pkey_index, 654 654 void *sec) 655 655 {
+6 -6
drivers/infiniband/core/smi.c
··· 41 41 #include "smi.h" 42 42 #include "opa_smi.h" 43 43 44 - static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num, 44 + static enum smi_action __smi_handle_dr_smp_send(bool is_switch, u32 port_num, 45 45 u8 *hop_ptr, u8 hop_cnt, 46 46 const u8 *initial_path, 47 47 const u8 *return_path, ··· 127 127 * Return IB_SMI_DISCARD if the SMP should be discarded 128 128 */ 129 129 enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, 130 - bool is_switch, int port_num) 130 + bool is_switch, u32 port_num) 131 131 { 132 132 return __smi_handle_dr_smp_send(is_switch, port_num, 133 133 &smp->hop_ptr, smp->hop_cnt, ··· 139 139 } 140 140 141 141 enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, 142 - bool is_switch, int port_num) 142 + bool is_switch, u32 port_num) 143 143 { 144 144 return __smi_handle_dr_smp_send(is_switch, port_num, 145 145 &smp->hop_ptr, smp->hop_cnt, ··· 152 152 OPA_LID_PERMISSIVE); 153 153 } 154 154 155 - static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num, 155 + static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, u32 port_num, 156 156 int phys_port_cnt, 157 157 u8 *hop_ptr, u8 hop_cnt, 158 158 const u8 *initial_path, ··· 238 238 * Return IB_SMI_DISCARD if the SMP should be dropped 239 239 */ 240 240 enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, 241 - int port_num, int phys_port_cnt) 241 + u32 port_num, int phys_port_cnt) 242 242 { 243 243 return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt, 244 244 &smp->hop_ptr, smp->hop_cnt, ··· 254 254 * Return IB_SMI_DISCARD if the SMP should be dropped 255 255 */ 256 256 enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch, 257 - int port_num, int phys_port_cnt) 257 + u32 port_num, int phys_port_cnt) 258 258 { 259 259 return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt, 260 260 &smp->hop_ptr, smp->hop_cnt,
+2 -2
drivers/infiniband/core/smi.h
··· 52 52 }; 53 53 54 54 enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, 55 - int port_num, int phys_port_cnt); 55 + u32 port_num, int phys_port_cnt); 56 56 int smi_get_fwd_port(struct ib_smp *smp); 57 57 extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp); 58 58 extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, 59 - bool is_switch, int port_num); 59 + bool is_switch, u32 port_num); 60 60 61 61 /* 62 62 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
+8 -8
drivers/infiniband/core/sysfs.c
··· 62 62 const struct attribute_group *pma_table; 63 63 struct attribute_group *hw_stats_ag; 64 64 struct rdma_hw_stats *hw_stats; 65 - u8 port_num; 65 + u32 port_num; 66 66 }; 67 67 68 68 struct port_attribute { ··· 94 94 const char *buf, 95 95 size_t count); 96 96 int index; 97 - u8 port_num; 97 + u32 port_num; 98 98 }; 99 99 100 100 static ssize_t port_attr_show(struct kobject *kobj, ··· 812 812 } 813 813 814 814 static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats, 815 - u8 port_num, int index) 815 + u32 port_num, int index) 816 816 { 817 817 int ret; 818 818 ··· 938 938 kfree(attr_group); 939 939 } 940 940 941 - static struct attribute *alloc_hsa(int index, u8 port_num, const char *name) 941 + static struct attribute *alloc_hsa(int index, u32 port_num, const char *name) 942 942 { 943 943 struct hw_stats_attribute *hsa; 944 944 ··· 956 956 return &hsa->attr; 957 957 } 958 958 959 - static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) 959 + static struct attribute *alloc_hsa_lifespan(char *name, u32 port_num) 960 960 { 961 961 struct hw_stats_attribute *hsa; 962 962 ··· 975 975 } 976 976 977 977 static void setup_hw_stats(struct ib_device *device, struct ib_port *port, 978 - u8 port_num) 978 + u32 port_num) 979 979 { 980 980 struct attribute_group *hsag; 981 981 struct rdma_hw_stats *stats; ··· 1383 1383 int ib_setup_port_attrs(struct ib_core_device *coredev) 1384 1384 { 1385 1385 struct ib_device *device = rdma_device_to_ibdev(&coredev->dev); 1386 - unsigned int port; 1386 + u32 port; 1387 1387 int ret; 1388 1388 1389 1389 coredev->ports_kobj = kobject_create_and_add("ports", ··· 1437 1437 * @ktype: pointer to the ktype for this kobject. 1438 1438 * @name: the name of the kobject 1439 1439 */ 1440 - int ib_port_register_module_stat(struct ib_device *device, u8 port_num, 1440 + int ib_port_register_module_stat(struct ib_device *device, u32 port_num, 1441 1441 struct kobject *kobj, struct kobj_type *ktype, 1442 1442 const char *name) 1443 1443 {
+2 -2
drivers/infiniband/core/user_mad.c
··· 101 101 struct ib_device *ib_dev; 102 102 struct ib_umad_device *umad_dev; 103 103 int dev_num; 104 - u8 port_num; 104 + u32 port_num; 105 105 }; 106 106 107 107 struct ib_umad_device { ··· 1145 1145 1146 1146 static struct ib_umad_port *get_port(struct ib_device *ibdev, 1147 1147 struct ib_umad_device *umad_dev, 1148 - unsigned int port) 1148 + u32 port) 1149 1149 { 1150 1150 if (!umad_dev) 1151 1151 return ERR_PTR(-EOPNOTSUPP);
+1 -1
drivers/infiniband/core/uverbs_cmd.c
··· 364 364 resp->max_srq_sge = attr->max_srq_sge; 365 365 resp->max_pkeys = attr->max_pkeys; 366 366 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 367 - resp->phys_port_cnt = ib_dev->phys_port_cnt; 367 + resp->phys_port_cnt = min_t(u32, ib_dev->phys_port_cnt, U8_MAX); 368 368 } 369 369 370 370 static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs)
+15 -14
drivers/infiniband/core/verbs.c
··· 227 227 } 228 228 EXPORT_SYMBOL(rdma_node_get_transport); 229 229 230 - enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) 230 + enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 231 + u32 port_num) 231 232 { 232 233 enum rdma_transport_type lt; 233 234 if (device->ops.get_link_layer) ··· 659 658 EXPORT_SYMBOL(ib_get_rdma_header_version); 660 659 661 660 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, 662 - u8 port_num, 661 + u32 port_num, 663 662 const struct ib_grh *grh) 664 663 { 665 664 int grh_version; ··· 702 701 } 703 702 704 703 static const struct ib_gid_attr * 705 - get_sgid_attr_from_eth(struct ib_device *device, u8 port_num, 704 + get_sgid_attr_from_eth(struct ib_device *device, u32 port_num, 706 705 u16 vlan_id, const union ib_gid *sgid, 707 706 enum ib_gid_type gid_type) 708 707 { ··· 789 788 * On success the caller is responsible to call rdma_destroy_ah_attr on the 790 789 * attr. 791 790 */ 792 - int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, 791 + int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num, 793 792 const struct ib_wc *wc, const struct ib_grh *grh, 794 793 struct rdma_ah_attr *ah_attr) 795 794 { ··· 920 919 EXPORT_SYMBOL(rdma_destroy_ah_attr); 921 920 922 921 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 923 - const struct ib_grh *grh, u8 port_num) 922 + const struct ib_grh *grh, u32 port_num) 924 923 { 925 924 struct rdma_ah_attr ah_attr; 926 925 struct ib_ah *ah; ··· 1674 1673 static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, 1675 1674 int attr_mask, struct ib_udata *udata) 1676 1675 { 1677 - u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1676 + u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1678 1677 const struct ib_gid_attr *old_sgid_attr_av; 1679 1678 const struct ib_gid_attr *old_sgid_attr_alt_av; 1680 1679 int ret; ··· 1802 1801 } 1803 1802 EXPORT_SYMBOL(ib_modify_qp_with_udata); 1804 1803 1805 - int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width) 1804 + int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width) 1806 1805 { 1807 1806 int rc; 1808 1807 u32 netdev_speed; ··· 2468 2467 } 2469 2468 EXPORT_SYMBOL(ib_check_mr_status); 2470 2469 2471 - int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 2470 + int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port, 2472 2471 int state) 2473 2472 { 2474 2473 if (!device->ops.set_vf_link_state) ··· 2478 2477 } 2479 2478 EXPORT_SYMBOL(ib_set_vf_link_state); 2480 2479 2481 - int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 2480 + int ib_get_vf_config(struct ib_device *device, int vf, u32 port, 2482 2481 struct ifla_vf_info *info) 2483 2482 { 2484 2483 if (!device->ops.get_vf_config) ··· 2488 2487 } 2489 2488 EXPORT_SYMBOL(ib_get_vf_config); 2490 2489 2491 - int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 2490 + int ib_get_vf_stats(struct ib_device *device, int vf, u32 port, 2492 2491 struct ifla_vf_stats *stats) 2493 2492 { 2494 2493 if (!device->ops.get_vf_stats) ··· 2498 2497 } 2499 2498 EXPORT_SYMBOL(ib_get_vf_stats); 2500 2499 2501 - int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 2500 + int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid, 2502 2501 int type) 2503 2502 { 2504 2503 if (!device->ops.set_vf_guid) ··· 2508 2507 } 2509 2508 EXPORT_SYMBOL(ib_set_vf_guid); 2510 2509 2511 - int ib_get_vf_guid(struct ib_device *device, int vf, u8 port, 2510 + int ib_get_vf_guid(struct ib_device *device, int vf, u32 port, 2512 2511 struct ifla_vf_guid *node_guid, 2513 2512 struct ifla_vf_guid *port_guid) 2514 2513 { ··· 2850 2849 } 2851 2850 EXPORT_SYMBOL(ib_drain_qp); 2852 2851 2853 - struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, 2852 + struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num, 2854 2853 enum rdma_netdev_t type, const char *name, 2855 2854 unsigned char name_assign_type, 2856 2855 void (*setup)(struct net_device *)) ··· 2876 2875 } 2877 2876 EXPORT_SYMBOL(rdma_alloc_netdev); 2878 2877 2879 - int rdma_init_netdev(struct ib_device *device, u8 port_num, 2878 + int rdma_init_netdev(struct ib_device *device, u32 port_num, 2880 2879 enum rdma_netdev_t type, const char *name, 2881 2880 unsigned char name_assign_type, 2882 2881 void (*setup)(struct net_device *),
+2 -2
drivers/infiniband/hw/bnxt_re/hw_counters.c
··· 114 114 115 115 int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, 116 116 struct rdma_hw_stats *stats, 117 - u8 port, int index) 117 + u32 port, int index) 118 118 { 119 119 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 120 120 struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma; ··· 235 235 } 236 236 237 237 struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev, 238 - u8 port_num) 238 + u32 port_num) 239 239 { 240 240 BUILD_BUG_ON(ARRAY_SIZE(bnxt_re_stat_name) != BNXT_RE_NUM_COUNTERS); 241 241 /* We support only per port stats */
+2 -2
drivers/infiniband/hw/bnxt_re/hw_counters.h
··· 97 97 }; 98 98 99 99 struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev, 100 - u8 port_num); 100 + u32 port_num); 101 101 int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, 102 102 struct rdma_hw_stats *stats, 103 - u8 port, int index); 103 + u32 port, int index); 104 104 #endif /* __BNXT_RE_HW_STATS_H__ */
+5 -5
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 189 189 } 190 190 191 191 /* Port */ 192 - int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, 192 + int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, 193 193 struct ib_port_attr *port_attr) 194 194 { 195 195 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); ··· 229 229 return 0; 230 230 } 231 231 232 - int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num, 232 + int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num, 233 233 struct ib_port_immutable *immutable) 234 234 { 235 235 struct ib_port_attr port_attr; ··· 254 254 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]); 255 255 } 256 256 257 - int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num, 257 + int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num, 258 258 u16 index, u16 *pkey) 259 259 { 260 260 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); ··· 266 266 &rdev->qplib_res.pkey_tbl, index, pkey); 267 267 } 268 268 269 - int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num, 269 + int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num, 270 270 int index, union ib_gid *gid) 271 271 { 272 272 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); ··· 374 374 } 375 375 376 376 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, 377 - u8 port_num) 377 + u32 port_num) 378 378 { 379 379 return IB_LINK_LAYER_ETHERNET; 380 380 }
+5 -5
drivers/infiniband/hw/bnxt_re/ib_verbs.h
··· 149 149 int bnxt_re_query_device(struct ib_device *ibdev, 150 150 struct ib_device_attr *ib_attr, 151 151 struct ib_udata *udata); 152 - int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, 152 + int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, 153 153 struct ib_port_attr *port_attr); 154 - int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num, 154 + int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num, 155 155 struct ib_port_immutable *immutable); 156 156 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str); 157 - int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num, 157 + int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num, 158 158 u16 index, u16 *pkey); 159 159 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context); 160 160 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context); 161 - int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num, 161 + int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num, 162 162 int index, union ib_gid *gid); 163 163 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, 164 - u8 port_num); 164 + u32 port_num); 165 165 int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); 166 166 int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 167 167 int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+6 -6
drivers/infiniband/hw/cxgb4/provider.c
··· 237 237 return 0; 238 238 } 239 239 240 - static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, 240 + static int c4iw_query_gid(struct ib_device *ibdev, u32 port, int index, 241 241 union ib_gid *gid) 242 242 { 243 243 struct c4iw_dev *dev; 244 244 245 - pr_debug("ibdev %p, port %d, index %d, gid %p\n", 245 + pr_debug("ibdev %p, port %u, index %d, gid %p\n", 246 246 ibdev, port, index, gid); 247 247 if (!port) 248 248 return -EINVAL; ··· 295 295 return 0; 296 296 } 297 297 298 - static int c4iw_query_port(struct ib_device *ibdev, u8 port, 298 + static int c4iw_query_port(struct ib_device *ibdev, u32 port, 299 299 struct ib_port_attr *props) 300 300 { 301 301 int ret = 0; ··· 378 378 }; 379 379 380 380 static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev, 381 - u8 port_num) 381 + u32 port_num) 382 382 { 383 383 BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS); 384 384 ··· 391 391 392 392 static int c4iw_get_mib(struct ib_device *ibdev, 393 393 struct rdma_hw_stats *stats, 394 - u8 port, int index) 394 + u32 port, int index) 395 395 { 396 396 struct tp_tcp_stats v4, v6; 397 397 struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev); ··· 420 420 .attrs = c4iw_class_attributes, 421 421 }; 422 422 423 - static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num, 423 + static int c4iw_port_immutable(struct ib_device *ibdev, u32 port_num, 424 424 struct ib_port_immutable *immutable) 425 425 { 426 426 struct ib_port_attr attr;
+7 -7
drivers/infiniband/hw/efa/efa.h
··· 120 120 int efa_query_device(struct ib_device *ibdev, 121 121 struct ib_device_attr *props, 122 122 struct ib_udata *udata); 123 - int efa_query_port(struct ib_device *ibdev, u8 port, 123 + int efa_query_port(struct ib_device *ibdev, u32 port, 124 124 struct ib_port_attr *props); 125 125 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 126 126 int qp_attr_mask, 127 127 struct ib_qp_init_attr *qp_init_attr); 128 - int efa_query_gid(struct ib_device *ibdev, u8 port, int index, 128 + int efa_query_gid(struct ib_device *ibdev, u32 port, int index, 129 129 union ib_gid *gid); 130 - int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 130 + int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index, 131 131 u16 *pkey); 132 132 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 133 133 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); ··· 142 142 u64 virt_addr, int access_flags, 143 143 struct ib_udata *udata); 144 144 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); 145 - int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num, 145 + int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num, 146 146 struct ib_port_immutable *immutable); 147 147 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata); 148 148 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext); ··· 156 156 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 157 157 int qp_attr_mask, struct ib_udata *udata); 158 158 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, 159 - u8 port_num); 160 - struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num); 159 + u32 port_num); 160 + struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u32 port_num); 161 161 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, 162 - u8 port_num, int index); 162 + u32 port_num, int index); 163 163 164 164 #endif /* _EFA_H_ */
+7 -7
drivers/infiniband/hw/efa/efa_verbs.c
··· 247 247 return 0; 248 248 } 249 249 250 - int efa_query_port(struct ib_device *ibdev, u8 port, 250 + int efa_query_port(struct ib_device *ibdev, u32 port, 251 251 struct ib_port_attr *props) 252 252 { 253 253 struct efa_dev *dev = to_edev(ibdev); ··· 319 319 return 0; 320 320 } 321 321 322 - int efa_query_gid(struct ib_device *ibdev, u8 port, int index, 322 + int efa_query_gid(struct ib_device *ibdev, u32 port, int index, 323 323 union ib_gid *gid) 324 324 { 325 325 struct efa_dev *dev = to_edev(ibdev); ··· 329 329 return 0; 330 330 } 331 331 332 - int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 332 + int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index, 333 333 u16 *pkey) 334 334 { 335 335 if (index > 0) ··· 1619 1619 return 0; 1620 1620 } 1621 1621 1622 - int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num, 1622 + int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num, 1623 1623 struct ib_port_immutable *immutable) 1624 1624 { 1625 1625 struct ib_port_attr attr; ··· 1904 1904 return 0; 1905 1905 } 1906 1906 1907 - struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num) 1907 + struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u32 port_num) 1908 1908 { 1909 1909 return rdma_alloc_hw_stats_struct(efa_stats_names, 1910 1910 ARRAY_SIZE(efa_stats_names), ··· 1912 1912 } 1913 1913 1914 1914 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, 1915 - u8 port_num, int index) 1915 + u32 port_num, int index) 1916 1916 { 1917 1917 struct efa_com_get_stats_params params = {}; 1918 1918 union efa_com_get_stats_result result; ··· 1981 1981 } 1982 1982 1983 1983 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, 1984 - u8 port_num) 1984 + u32 port_num) 1985 1985 { 1986 1986 return IB_LINK_LAYER_UNSPECIFIED; 1987 1987 }
+5 -5
drivers/infiniband/hw/hfi1/hfi.h
··· 858 858 u8 rx_pol_inv; 859 859 860 860 u8 hw_pidx; /* physical port index */ 861 - u8 port; /* IB port number and index into dd->pports - 1 */ 861 + u32 port; /* IB port number and index into dd->pports - 1 */ 862 862 /* type of neighbor node */ 863 863 u8 neighbor_type; 864 864 u8 neighbor_normal; ··· 1473 1473 struct hfi1_ctxtdata **rcd); 1474 1474 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd); 1475 1475 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 1476 - struct hfi1_devdata *dd, u8 hw_pidx, u8 port); 1476 + struct hfi1_devdata *dd, u8 hw_pidx, u32 port); 1477 1477 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd); 1478 1478 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd); 1479 1479 int hfi1_rcd_get(struct hfi1_ctxtdata *rcd); ··· 1969 1969 return container_of(rdi, struct hfi1_ibdev, rdi); 1970 1970 } 1971 1971 1972 - static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port) 1972 + static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u32 port) 1973 1973 { 1974 1974 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 1975 - unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ 1975 + u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */ 1976 1976 1977 1977 WARN_ON(pidx >= dd->num_pports); 1978 1978 return &dd->pport[pidx].ibport_data; ··· 2191 2191 int hfi1_device_create(struct hfi1_devdata *dd); 2192 2192 void hfi1_device_remove(struct hfi1_devdata *dd); 2193 2193 2194 - int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, 2194 + int hfi1_create_port_files(struct ib_device *ibdev, u32 port_num, 2195 2195 struct kobject *kobj); 2196 2196 int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd); 2197 2197 void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
+1 -1
drivers/infiniband/hw/hfi1/init.c
··· 627 627 * Common code for initializing the physical port structure. 628 628 */ 629 629 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 630 - struct hfi1_devdata *dd, u8 hw_pidx, u8 port) 630 + struct hfi1_devdata *dd, u8 hw_pidx, u32 port) 631 631 { 632 632 int i; 633 633 uint default_pkey_idx;
+1 -1
drivers/infiniband/hw/hfi1/ipoib.h
··· 143 143 int size, void *data); 144 144 145 145 int hfi1_ipoib_rn_get_params(struct ib_device *device, 146 - u8 port_num, 146 + u32 port_num, 147 147 enum rdma_netdev_t type, 148 148 struct rdma_netdev_alloc_params *params); 149 149
+2 -2
drivers/infiniband/hw/hfi1/ipoib_main.c
··· 194 194 } 195 195 196 196 static int hfi1_ipoib_setup_rn(struct ib_device *device, 197 - u8 port_num, 197 + u32 port_num, 198 198 struct net_device *netdev, 199 199 void *param) 200 200 { ··· 243 243 } 244 244 245 245 int hfi1_ipoib_rn_get_params(struct ib_device *device, 246 - u8 port_num, 246 + u32 port_num, 247 247 enum rdma_netdev_t type, 248 248 struct rdma_netdev_alloc_params *params) 249 249 {
+64 -64
drivers/infiniband/hw/hfi1/mad.c
··· 108 108 return 0; 109 109 } 110 110 111 - void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port) 111 + void hfi1_event_pkey_change(struct hfi1_devdata *dd, u32 port) 112 112 { 113 113 struct ib_event event; 114 114 ··· 297 297 struct rvt_qp *qp0; 298 298 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 299 299 struct hfi1_devdata *dd = dd_from_ppd(ppd); 300 - u8 port_num = ppd->port; 300 + u32 port_num = ppd->port; 301 301 302 302 memset(&attr, 0, sizeof(attr)); 303 303 attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num); ··· 515 515 /* 516 516 * Send a Port Capability Mask Changed trap (ch. 14.3.11). 517 517 */ 518 - void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num) 518 + void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num) 519 519 { 520 520 struct trap_node *trap; 521 521 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); ··· 581 581 582 582 static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am, 583 583 u8 *data, struct ib_device *ibdev, 584 - u8 port, u32 *resp_len, u32 max_len) 584 + u32 port, u32 *resp_len, u32 max_len) 585 585 { 586 586 struct opa_node_description *nd; 587 587 ··· 601 601 } 602 602 603 603 static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data, 604 - struct ib_device *ibdev, u8 port, 604 + struct ib_device *ibdev, u32 port, 605 605 u32 *resp_len, u32 max_len) 606 606 { 607 607 struct opa_node_info *ni; 608 608 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 609 - unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */ 609 + u32 pidx = port - 1; /* IB number port from 1, hw from 0 */ 610 610 611 611 ni = (struct opa_node_info *)data; 612 612 ··· 641 641 } 642 642 643 643 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, 644 - u8 port) 644 + u32 port) 645 645 { 646 646 struct ib_node_info *nip = (struct ib_node_info *)&smp->data; 647 647 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 648 - unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */ 648 + u32 pidx = port - 1; /* IB number port from 1, hw from 0 */ 649 649 650 650 /* GUID 0 is illegal */ 651 651 if (smp->attr_mod || pidx >= dd->num_pports || ··· 794 794 } 795 795 796 796 static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, 797 - struct ib_device *ibdev, u8 port, 797 + struct ib_device *ibdev, u32 port, 798 798 u32 *resp_len, u32 max_len) 799 799 { 800 800 int i; ··· 1009 1009 * @port: the IB port number 1010 1010 * @pkeys: the pkey table is placed here 1011 1011 */ 1012 - static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) 1012 + static int get_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys) 1013 1013 { 1014 1014 struct hfi1_pportdata *ppd = dd->pport + port - 1; 1015 1015 ··· 1019 1019 } 1020 1020 1021 1021 static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, 1022 - struct ib_device *ibdev, u8 port, 1022 + struct ib_device *ibdev, u32 port, 1023 1023 u32 *resp_len, u32 max_len) 1024 1024 { 1025 1025 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); ··· 1349 1349 * 1350 1350 */ 1351 1351 static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, 1352 - struct ib_device *ibdev, u8 port, 1352 + struct ib_device *ibdev, u32 port, 1353 1353 u32 *resp_len, u32 max_len, int local_mad) 1354 1354 { 1355 1355 struct opa_port_info *pi = (struct opa_port_info *)data; ··· 1667 1667 * @port: the IB port number 1668 1668 * @pkeys: the PKEY table 1669 1669 */ 1670 - static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) 1670 + static int set_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys) 1671 1671 { 1672 1672 struct hfi1_pportdata *ppd; 1673 1673 int i; ··· 1718 1718 } 1719 1719 1720 1720 static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, 1721 - struct ib_device *ibdev, u8 port, 1721 + struct ib_device *ibdev, u32 port, 1722 1722 u32 *resp_len, u32 max_len) 1723 1723 { 1724 1724 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); ··· 1732 1732 u32 size = 0; 1733 1733 1734 1734 if (n_blocks_sent == 0) { 1735 - pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n", 1735 + pr_warn("OPA Get PKey AM Invalid : P = %u; B = 0x%x; N = 0x%x\n", 1736 1736 port, start_block, n_blocks_sent); 1737 1737 smp->status |= IB_SMP_INVALID_FIELD; 1738 1738 return reply((struct ib_mad_hdr *)smp); ··· 1825 1825 } 1826 1826 1827 1827 static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data, 1828 - struct ib_device *ibdev, u8 port, 1828 + struct ib_device *ibdev, u32 port, 1829 1829 u32 *resp_len, u32 max_len) 1830 1830 { 1831 1831 struct hfi1_ibport *ibp = to_iport(ibdev, port); ··· 1848 1848 } 1849 1849 1850 1850 static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data, 1851 - struct ib_device *ibdev, u8 port, 1851 + struct ib_device *ibdev, u32 port, 1852 1852 u32 *resp_len, u32 max_len) 1853 1853 { 1854 1854 struct hfi1_ibport *ibp = to_iport(ibdev, port); ··· 1877 1877 } 1878 1878 1879 1879 static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data, 1880 - struct ib_device *ibdev, u8 port, 1880 + struct ib_device *ibdev, u32 port, 1881 1881 u32 *resp_len, u32 max_len) 1882 1882 { 1883 1883 struct hfi1_ibport *ibp = to_iport(ibdev, port); ··· 1900 1900 } 1901 1901 1902 1902 static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data, 1903 - struct ib_device *ibdev, u8 port, 1903 + struct ib_device *ibdev, u32 port, 1904 1904 u32 *resp_len, u32 max_len) 1905 1905 { 1906 1906 struct hfi1_ibport *ibp = to_iport(ibdev, port); ··· 1921 1921 } 1922 1922 1923 1923 static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, 1924 - struct ib_device *ibdev, u8 port, 1924 + struct ib_device *ibdev, u32 port, 1925 1925 u32 *resp_len, u32 max_len) 1926 1926 { 1927 1927 u32 n_blocks = OPA_AM_NBLK(am); ··· 1943 1943 } 1944 1944 1945 1945 static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, 1946 - struct ib_device *ibdev, u8 port, 1946 + struct ib_device *ibdev, u32 port, 1947 1947 u32 *resp_len, u32 max_len) 1948 1948 { 1949 1949 u32 n_blocks = OPA_AM_NBLK(am); ··· 1985 1985 } 1986 1986 1987 1987 static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, 1988 - struct ib_device *ibdev, u8 port, 1988 + struct ib_device *ibdev, u32 port, 1989 1989 u32 *resp_len, u32 max_len) 1990 1990 { 1991 1991 u32 n_blocks = OPA_AM_NPORT(am); ··· 2010 2010 } 2011 2011 2012 2012 static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, 2013 - struct ib_device *ibdev, u8 port, 2013 + struct ib_device *ibdev, u32 port, 2014 2014 u32 *resp_len, u32 max_len) 2015 2015 { 2016 2016 u32 n_blocks = OPA_AM_NPORT(am); ··· 2042 2042 } 2043 2043 2044 2044 static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data, 2045 - struct ib_device *ibdev, u8 port, 2045 + struct ib_device *ibdev, u32 port, 2046 2046 u32 *resp_len, u32 max_len) 2047 2047 { 2048 2048 u32 nports = OPA_AM_NPORT(am); ··· 2084 2084 } 2085 2085 2086 2086 static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data, 2087 - struct ib_device *ibdev, u8 port, 2087 + struct ib_device *ibdev, u32 port, 2088 2088 u32 *resp_len, u32 max_len, int local_mad) 2089 2089 { 2090 2090 u32 nports = OPA_AM_NPORT(am); ··· 2132 2132 } 2133 2133 2134 2134 static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data, 2135 - struct ib_device *ibdev, u8 port, 2135 + struct ib_device *ibdev, u32 port, 2136 2136 u32 *resp_len, u32 max_len) 2137 2137 { 2138 2138 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); ··· 2184 2184 } 2185 2185 2186 2186 static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data, 2187 - struct ib_device *ibdev, u8 port, u32 *resp_len, 2187 + struct ib_device *ibdev, u32 port, u32 *resp_len, 2188 2188 u32 max_len) 2189 2189 { 2190 2190 u32 num_ports = OPA_AM_NPORT(am); ··· 2208 2208 } 2209 2209 2210 2210 static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data, 2211 - struct ib_device *ibdev, u8 port, u32 *resp_len, 2211 + struct ib_device *ibdev, u32 port, u32 *resp_len, 2212 2212 u32 max_len) 2213 2213 { 2214 2214 u32 num_ports = OPA_AM_NPORT(am); ··· 2232 2232 } 2233 2233 2234 2234 static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, 2235 - struct ib_device *ibdev, u8 port, 2235 + struct ib_device *ibdev, u32 port, 2236 2236 u32 *resp_len, u32 max_len) 2237 2237 { 2238 2238 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); ··· 2274 2274 } 2275 2275 2276 2276 static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, 2277 - struct ib_device *ibdev, u8 port, 2277 + struct ib_device *ibdev, u32 port, 2278 2278 u32 *resp_len, u32 max_len) 2279 2279 { 2280 2280 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); ··· 2722 2722 2723 2723 static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, 2724 2724 struct ib_device *ibdev, 2725 - u8 port, u32 *resp_len) 2725 + u32 port, u32 *resp_len) 2726 2726 { 2727 2727 struct opa_port_status_req *req = 2728 2728 (struct opa_port_status_req *)pmp->data; ··· 2732 2732 unsigned long vl; 2733 2733 size_t response_data_size; 2734 2734 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; 2735 - u8 port_num = req->port_num; 2735 + u32 port_num = req->port_num; 2736 2736 u8 num_vls = hweight64(vl_select_mask); 2737 2737 struct _vls_pctrs *vlinfo; 2738 2738 struct hfi1_ibport *ibp = to_iport(ibdev, port); ··· 2888 2888 return reply((struct ib_mad_hdr *)pmp); 2889 2889 } 2890 2890 2891 - static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port, 2891 + static u64 get_error_counter_summary(struct ib_device *ibdev, u32 port, 2892 2892 u8 res_lli, u8 res_ler) 2893 2893 { 2894 2894 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); ··· 2973 2973 2974 2974 static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, 2975 2975 struct ib_device *ibdev, 2976 - u8 port, u32 *resp_len) 2976 + u32 port, u32 *resp_len) 2977 2977 { 2978 2978 struct opa_port_data_counters_msg *req = 2979 2979 (struct opa_port_data_counters_msg *)pmp->data; ··· 2987 2987 u8 lq, num_vls; 2988 2988 u8 res_lli, res_ler; 2989 2989 u64 port_mask; 2990 - u8 port_num; 2990 + u32 port_num; 2991 2991 unsigned long vl; 2992 2992 unsigned long vl_select_mask; 2993 2993 int vfi; ··· 3123 3123 } 3124 3124 3125 3125 static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp, 3126 - struct ib_device *ibdev, u8 port) 3126 + struct ib_device *ibdev, u32 port) 3127 3127 { 3128 3128 struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *) 3129 3129 pmp->data; ··· 3151 3151 } 3152 3152 3153 3153 static void pma_get_opa_port_ectrs(struct ib_device *ibdev, 3154 - struct _port_ectrs *rsp, u8 port) 3154 + struct _port_ectrs *rsp, u32 port) 3155 3155 { 3156 3156 u64 tmp, tmp2; 3157 3157 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); ··· 3194 3194 3195 3195 static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, 3196 3196 struct ib_device *ibdev, 3197 - u8 port, u32 *resp_len) 3197 + u32 port, u32 *resp_len) 3198 3198 { 3199 3199 size_t response_data_size; 3200 3200 struct _port_ectrs *rsp; 3201 - u8 port_num; 3201 + u32 port_num; 3202 3202 struct opa_port_error_counters64_msg *req; 3203 3203 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 3204 3204 u32 num_ports; ··· 3283 3283 } 3284 3284 3285 3285 static int pma_get_ib_portcounters(struct ib_pma_mad *pmp, 3286 - struct ib_device *ibdev, u8 port) 3286 + struct ib_device *ibdev, u32 port) 3287 3287 { 3288 3288 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 3289 3289 pmp->data; ··· 3369 3369 3370 3370 static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, 3371 3371 struct ib_device *ibdev, 3372 - u8 port, u32 *resp_len) 3372 + u32 port, u32 *resp_len) 3373 3373 { 3374 3374 size_t response_data_size; 3375 3375 struct _port_ei *rsp; ··· 3377 3377 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 3378 3378 u64 port_mask; 3379 3379 u32 num_ports; 3380 - u8 port_num; 3380 + u32 port_num; 3381 3381 u8 num_pslm; 3382 3382 u64 reg; 3383 3383 ··· 3468 3468 3469 3469 static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, 3470 3470 struct ib_device *ibdev, 3471 - u8 port, u32 *resp_len) 3471 + u32 port, u32 *resp_len) 3472 3472 { 3473 3473 struct opa_clear_port_status *req = 3474 3474 (struct opa_clear_port_status *)pmp->data; ··· 3620 3620 3621 3621 static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp, 3622 3622 struct ib_device *ibdev, 3623 - u8 port, u32 *resp_len) 3623 + u32 port, u32 *resp_len) 3624 3624 { 3625 3625 struct _port_ei *rsp; 3626 3626 struct opa_port_error_info_msg *req; 3627 3627 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 3628 3628 u64 port_mask; 3629 3629 u32 num_ports; 3630 - u8 port_num; 3630 + u32 port_num; 3631 3631 u8 num_pslm; 3632 3632 u32 error_info_select; 3633 3633 ··· 3702 3702 } __packed; 3703 3703 3704 3704 static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data, 3705 - struct ib_device *ibdev, u8 port, 3705 + struct ib_device *ibdev, u32 port, 3706 3706 u32 *resp_len, u32 max_len) 3707 3707 { 3708 3708 struct opa_congestion_info_attr *p = ··· 3727 3727 3728 3728 static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am, 3729 3729 u8 *data, struct ib_device *ibdev, 3730 - u8 port, u32 *resp_len, u32 max_len) 3730 + u32 port, u32 *resp_len, u32 max_len) 3731 3731 { 3732 3732 int i; 3733 3733 struct opa_congestion_setting_attr *p = ··· 3819 3819 } 3820 3820 3821 3821 static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, 3822 - struct ib_device *ibdev, u8 port, 3822 + struct ib_device *ibdev, u32 port, 3823 3823 u32 *resp_len, u32 max_len) 3824 3824 { 3825 3825 struct opa_congestion_setting_attr *p = ··· 3860 3860 3861 3861 static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am, 3862 3862 u8 *data, struct ib_device *ibdev, 3863 - u8 port, u32 *resp_len, u32 max_len) 3863 + u32 port, u32 *resp_len, u32 max_len) 3864 3864 { 3865 3865 struct hfi1_ibport *ibp = to_iport(ibdev, port); 3866 3866 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); ··· 3925 3925 } 3926 3926 3927 3927 static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, 3928 - struct ib_device *ibdev, u8 port, 3928 + struct ib_device *ibdev, u32 port, 3929 3929 u32 *resp_len, u32 max_len) 3930 3930 { 3931 3931 struct ib_cc_table_attr *cc_table_attr = ··· 3977 3977 } 3978 3978 3979 3979 static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, 3980 - struct ib_device *ibdev, u8 port, 3980 + struct ib_device *ibdev, u32 port, 3981 3981 u32 *resp_len, u32 max_len) 3982 3982 { 3983 3983 struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data; ··· 4036 4036 #define OPA_LED_MASK BIT(OPA_LED_SHIFT) 4037 4037 4038 4038 static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, 4039 - struct ib_device *ibdev, u8 port, 4039 + struct ib_device *ibdev, u32 port, 4040 4040 u32 *resp_len, u32 max_len) 4041 4041 { 4042 4042 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); ··· 4066 4066 } 4067 4067 4068 4068 static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, 4069 - struct ib_device *ibdev, u8 port, 4069 + struct ib_device *ibdev, u32 port, 4070 4070 u32 *resp_len, u32 max_len) 4071 4071 { 4072 4072 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); ··· 4089 4089 } 4090 4090 4091 4091 static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, 4092 - u8 *data, struct ib_device *ibdev, u8 port, 4092 + u8 *data, struct ib_device *ibdev, u32 port, 4093 4093 u32 *resp_len, u32 max_len) 4094 4094 { 4095 4095 int ret; ··· 4179 4179 } 4180 4180 4181 4181 static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, 4182 - u8 *data, struct ib_device *ibdev, u8 port, 4182 + u8 *data, struct ib_device *ibdev, u32 port, 4183 4183 u32 *resp_len, u32 max_len, int local_mad) 4184 4184 { 4185 4185 int ret; ··· 4254 4254 } 4255 4255 4256 4256 static int subn_get_opa_aggregate(struct opa_smp *smp, 4257 - struct ib_device *ibdev, u8 port, 4257 + struct ib_device *ibdev, u32 port, 4258 4258 u32 *resp_len) 4259 4259 { 4260 4260 int i; ··· 4303 4303 } 4304 4304 4305 4305 static int subn_set_opa_aggregate(struct opa_smp *smp, 4306 - struct ib_device *ibdev, u8 port, 4306 + struct ib_device *ibdev, u32 port, 4307 4307 u32 *resp_len, int local_mad) 4308 4308 { 4309 4309 int i; ··· 4509 4509 } 4510 4510 4511 4511 static int process_subn_opa(struct ib_device *ibdev, int mad_flags, 4512 - u8 port, const struct opa_mad *in_mad, 4512 + u32 port, const struct opa_mad *in_mad, 4513 4513 struct opa_mad *out_mad, 4514 4514 u32 *resp_len, int local_mad) 4515 4515 { ··· 4614 4614 } 4615 4615 4616 4616 static int process_subn(struct ib_device *ibdev, int mad_flags, 4617 - u8 port, const struct ib_mad *in_mad, 4617 + u32 port, const struct ib_mad *in_mad, 4618 4618 struct ib_mad *out_mad) 4619 4619 { 4620 4620 struct ib_smp *smp = (struct ib_smp *)out_mad; ··· 4672 4672 return ret; 4673 4673 } 4674 4674 4675 - static int process_perf(struct ib_device *ibdev, u8 port, 4675 + static int process_perf(struct ib_device *ibdev, u32 port, 4676 4676 const struct ib_mad *in_mad, 4677 4677 struct ib_mad *out_mad) 4678 4678 { ··· 4734 4734 return ret; 4735 4735 } 4736 4736 4737 - static int process_perf_opa(struct ib_device *ibdev, u8 port, 4737 + static int process_perf_opa(struct ib_device *ibdev, u32 port, 4738 4738 const struct opa_mad *in_mad, 4739 4739 struct opa_mad *out_mad, u32 *resp_len) 4740 4740 { ··· 4816 4816 } 4817 4817 4818 4818 static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags, 4819 - u8 port, const struct ib_wc *in_wc, 4819 + u32 port, const struct ib_wc *in_wc, 4820 4820 const struct ib_grh *in_grh, 4821 4821 const struct opa_mad *in_mad, 4822 4822 struct opa_mad *out_mad, size_t *out_mad_size, ··· 4869 4869 return ret; 4870 4870 } 4871 4871 4872 - static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port, 4872 + static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u32 port, 4873 4873 const struct ib_wc *in_wc, 4874 4874 const struct ib_grh *in_grh, 4875 4875 const struct ib_mad *in_mad, ··· 4914 4914 * 4915 4915 * This is called by the ib_mad module. 4916 4916 */ 4917 - int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, 4917 + int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u32 port, 4918 4918 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 4919 4919 const struct ib_mad *in_mad, struct ib_mad *out_mad, 4920 4920 size_t *out_mad_size, u16 *out_mad_pkey_index)
+1 -1
drivers/infiniband/hw/hfi1/mad.h
··· 436 436 COUNTER_MASK(1, 3) | \ 437 437 COUNTER_MASK(1, 4)) 438 438 439 - void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port); 439 + void hfi1_event_pkey_change(struct hfi1_devdata *dd, u32 port); 440 440 void hfi1_handle_trap_timer(struct timer_list *t); 441 441 u16 tx_link_width(u16 link_width); 442 442 u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd, u16 link_width,
+1 -1
drivers/infiniband/hw/hfi1/sysfs.c
··· 649 649 .attrs = hfi1_attributes, 650 650 }; 651 651 652 - int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, 652 + int hfi1_create_port_files(struct ib_device *ibdev, u32 port_num, 653 653 struct kobject *kobj) 654 654 { 655 655 struct hfi1_pportdata *ppd;
+4 -4
drivers/infiniband/hw/hfi1/verbs.c
··· 1407 1407 } 1408 1408 } 1409 1409 1410 - static int query_port(struct rvt_dev_info *rdi, u8 port_num, 1410 + static int query_port(struct rvt_dev_info *rdi, u32 port_num, 1411 1411 struct ib_port_attr *props) 1412 1412 { 1413 1413 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); ··· 1485 1485 return ret; 1486 1486 } 1487 1487 1488 - static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num) 1488 + static int shut_down_port(struct rvt_dev_info *rdi, u32 port_num) 1489 1489 { 1490 1490 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); 1491 1491 struct hfi1_devdata *dd = dd_from_dev(verbs_dev); ··· 1694 1694 } 1695 1695 1696 1696 static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev, 1697 - u8 port_num) 1697 + u32 port_num) 1698 1698 { 1699 1699 int i, err; 1700 1700 ··· 1758 1758 } 1759 1759 1760 1760 static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, 1761 - u8 port, int index) 1761 + u32 port, int index) 1762 1762 { 1763 1763 u64 *values; 1764 1764 int count;
+2 -2
drivers/infiniband/hw/hfi1/verbs.h
··· 325 325 */ 326 326 void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl, 327 327 u32 qp1, u32 qp2, u32 lid1, u32 lid2); 328 - void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num); 328 + void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num); 329 329 void hfi1_sys_guid_chg(struct hfi1_ibport *ibp); 330 330 void hfi1_node_desc_chg(struct hfi1_ibport *ibp); 331 - int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, 331 + int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u32 port, 332 332 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 333 333 const struct ib_mad *in_mad, struct ib_mad *out_mad, 334 334 size_t *out_mad_size, u16 *out_mad_pkey_index);
+1 -1
drivers/infiniband/hw/hfi1/vnic.h
··· 156 156 157 157 /* vnic rdma netdev operations */ 158 158 struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, 159 - u8 port_num, 159 + u32 port_num, 160 160 enum rdma_netdev_t type, 161 161 const char *name, 162 162 unsigned char name_assign_type,
+1 -1
drivers/infiniband/hw/hfi1/vnic_main.c
··· 593 593 } 594 594 595 595 struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, 596 - u8 port_num, 596 + u32 port_num, 597 597 enum rdma_netdev_t type, 598 598 const char *name, 599 599 unsigned char name_assign_type,
+2 -2
drivers/infiniband/hw/hns/hns_roce_device.h
··· 890 890 u16 token, int event); 891 891 int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned int timeout); 892 892 int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev); 893 - int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index, 893 + int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index, 894 894 const union ib_gid *gid, const struct ib_gid_attr *attr); 895 895 int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); 896 896 void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, ··· 1271 1271 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); 1272 1272 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); 1273 1273 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type); 1274 - u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); 1274 + u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index); 1275 1275 void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); 1276 1276 int hns_roce_init(struct hns_roce_dev *hr_dev); 1277 1277 void hns_roce_exit(struct hns_roce_dev *hr_dev);
+5 -5
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
··· 54 54 * GID[0][0], GID[1][0],.....GID[N - 1][0], 55 55 * And so on 56 56 */ 57 - u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index) 57 + u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index) 58 58 { 59 59 return gid_index * hr_dev->caps.num_ports + port; 60 60 } ··· 711 711 int i, j; 712 712 u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 }; 713 713 u8 phy_port; 714 - u8 port = 0; 714 + u32 port = 0; 715 715 u8 sl; 716 716 717 717 /* Reserved cq for loop qp */ ··· 1676 1676 return 0; 1677 1677 } 1678 1678 1679 - static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, 1679 + static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port, 1680 1680 int gid_index, const union ib_gid *gid, 1681 1681 const struct ib_gid_attr *attr) 1682 1682 { ··· 2673 2673 int ret = -EINVAL; 2674 2674 u64 sq_ba = 0; 2675 2675 u64 rq_ba = 0; 2676 - int port; 2677 - u8 port_num; 2676 + u32 port; 2677 + u32 port_num; 2678 2678 u8 *dmac; 2679 2679 u8 *smac; 2680 2680
+2 -2
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 2791 2791 return hns_roce_cmq_send(hr_dev, desc, 2); 2792 2792 } 2793 2793 2794 - static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port, 2794 + static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port, 2795 2795 int gid_index, const union ib_gid *gid, 2796 2796 const struct ib_gid_attr *attr) 2797 2797 { ··· 4261 4261 u64 *mtts; 4262 4262 u8 *dmac; 4263 4263 u8 *smac; 4264 - int port; 4264 + u32 port; 4265 4265 int ret; 4266 4266 4267 4267 ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
+10 -10
drivers/infiniband/hw/hns/hns_roce_main.c
··· 42 42 #include "hns_roce_device.h" 43 43 #include "hns_roce_hem.h" 44 44 45 - static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr) 45 + static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, u8 *addr) 46 46 { 47 47 u8 phy_port; 48 48 u32 i; ··· 63 63 static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context) 64 64 { 65 65 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); 66 - u8 port = attr->port_num - 1; 66 + u32 port = attr->port_num - 1; 67 67 int ret; 68 68 69 69 if (port >= hr_dev->caps.num_ports) ··· 77 77 static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context) 78 78 { 79 79 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); 80 - u8 port = attr->port_num - 1; 80 + u32 port = attr->port_num - 1; 81 81 int ret; 82 82 83 83 if (port >= hr_dev->caps.num_ports) ··· 88 88 return ret; 89 89 } 90 90 91 - static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port, 91 + static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port, 92 92 unsigned long event) 93 93 { 94 94 struct device *dev = hr_dev->dev; ··· 128 128 struct hns_roce_ib_iboe *iboe = NULL; 129 129 struct hns_roce_dev *hr_dev = NULL; 130 130 int ret; 131 - u8 port; 131 + u32 port; 132 132 133 133 hr_dev = container_of(self, struct hns_roce_dev, iboe.nb); 134 134 iboe = &hr_dev->iboe; ··· 213 213 return 0; 214 214 } 215 215 216 - static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, 216 + static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num, 217 217 struct ib_port_attr *props) 218 218 { 219 219 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); ··· 221 221 struct net_device *net_dev; 222 222 unsigned long flags; 223 223 enum ib_mtu mtu; 224 - u8 port; 224 + u32 port; 225 225 226 226 port = port_num - 1; 227 227 ··· 261 261 } 262 262 263 263 static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device, 264 - u8 port_num) 264 + u32 port_num) 265 265 { 266 266 return IB_LINK_LAYER_ETHERNET; 267 267 } 268 268 269 - static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index, 269 + static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index, 270 270 u16 *pkey) 271 271 { 272 272 *pkey = PKEY_ID; ··· 369 369 } 370 370 } 371 371 372 - static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num, 372 + static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num, 373 373 struct ib_port_immutable *immutable) 374 374 { 375 375 struct ib_port_attr attr;
+5 -5
drivers/infiniband/hw/i40iw/i40iw_verbs.c
··· 94 94 * @props: returning device attributes 95 95 */ 96 96 static int i40iw_query_port(struct ib_device *ibdev, 97 - u8 port, 97 + u32 port, 98 98 struct ib_port_attr *props) 99 99 { 100 100 props->lid = 1; ··· 2347 2347 * @port_num: port number 2348 2348 * @immutable: immutable data for the port return 2349 2349 */ 2350 - static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num, 2350 + static int i40iw_port_immutable(struct ib_device *ibdev, u32 port_num, 2351 2351 struct ib_port_immutable *immutable) 2352 2352 { 2353 2353 struct ib_port_attr attr; ··· 2446 2446 * @port_num: port number 2447 2447 */ 2448 2448 static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev, 2449 - u8 port_num) 2449 + u32 port_num) 2450 2450 { 2451 2451 struct i40iw_device *iwdev = to_iwdev(ibdev); 2452 2452 struct i40iw_sc_dev *dev = &iwdev->sc_dev; ··· 2477 2477 */ 2478 2478 static int i40iw_get_hw_stats(struct ib_device *ibdev, 2479 2479 struct rdma_hw_stats *stats, 2480 - u8 port_num, int index) 2480 + u32 port_num, int index) 2481 2481 { 2482 2482 struct i40iw_device *iwdev = to_iwdev(ibdev); 2483 2483 struct i40iw_sc_dev *dev = &iwdev->sc_dev; ··· 2504 2504 * @gid: Global ID 2505 2505 */ 2506 2506 static int i40iw_query_gid(struct ib_device *ibdev, 2507 - u8 port, 2507 + u32 port, 2508 2508 int index, 2509 2509 union ib_gid *gid) 2510 2510 {
+8 -8
drivers/infiniband/hw/mlx4/alias_GUID.c
··· 73 73 int *resched_delay_sec); 74 74 75 75 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, 76 - u8 port_num, u8 *p_data) 76 + u32 port_num, u8 *p_data) 77 77 { 78 78 int i; 79 79 u64 guid_indexes; 80 80 int slave_id; 81 - int port_index = port_num - 1; 81 + u32 port_index = port_num - 1; 82 82 83 83 if (!mlx4_is_master(dev->dev)) 84 84 return; ··· 86 86 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. 87 87 ports_guid[port_num - 1]. 88 88 all_rec_per_port[block_num].guid_indexes); 89 - pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes); 89 + pr_debug("port: %u, guid_indexes: 0x%llx\n", port_num, guid_indexes); 90 90 91 91 for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { 92 92 /* The location of the specific index starts from bit number 4 ··· 184 184 * port_number - 1 or 2 185 185 */ 186 186 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, 187 - int block_num, u8 port_num, 187 + int block_num, u32 port_num, 188 188 u8 *p_data) 189 189 { 190 190 int i; ··· 206 206 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. 207 207 ports_guid[port_num - 1]. 208 208 all_rec_per_port[block_num].guid_indexes); 209 - pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes); 209 + pr_debug("port: %u, guid_indexes: 0x%llx\n", port_num, guid_indexes); 210 210 211 211 /*calculate the slaves and notify them*/ 212 212 for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { ··· 260 260 new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num, 261 261 MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID, 262 262 &gen_event); 263 - pr_debug("slave: %d, port: %d prev_port_state: %d," 263 + pr_debug("slave: %d, port: %u prev_port_state: %d," 264 264 " new_port_state: %d, gen_event: %d\n", 265 265 slave_id, port_num, prev_state, new_state, gen_event); 266 266 if (gen_event == SLAVE_PORT_GEN_EVENT_UP) { 267 - pr_debug("sending PORT_UP event to slave: %d, port: %d\n", 267 + pr_debug("sending PORT_UP event to slave: %d, port: %u\n", 268 268 slave_id, port_num); 269 269 mlx4_gen_port_state_change_eqe(dev->dev, slave_id, 270 270 port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE); ··· 274 274 MLX4_PORT_STATE_IB_EVENT_GID_INVALID, 275 275 &gen_event); 276 276 if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) { 277 - pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", 277 + pr_debug("sending PORT DOWN event to slave: %d, port: %u\n", 278 278 slave_id, port_num); 279 279 mlx4_gen_port_state_change_eqe(dev->dev, 280 280 slave_id,
+24 -22
drivers/infiniband/hw/mlx4/mad.c
··· 88 88 struct ib_mad mad; 89 89 } __packed; 90 90 91 - static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num); 92 - static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num); 91 + static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u32 port_num); 92 + static void handle_lid_change_event(struct mlx4_ib_dev *dev, u32 port_num); 93 93 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, 94 94 int block, u32 change_bitmap); 95 95 ··· 186 186 return err; 187 187 } 188 188 189 - static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) 189 + static void update_sm_ah(struct mlx4_ib_dev *dev, u32 port_num, u16 lid, u8 sl) 190 190 { 191 191 struct ib_ah *new_ah; 192 192 struct rdma_ah_attr ah_attr; ··· 217 217 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can 218 218 * synthesize LID change, Client-Rereg, GID change, and P_Key change events. 219 219 */ 220 - static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad, 221 - u16 prev_lid) 220 + static void smp_snoop(struct ib_device *ibdev, u32 port_num, 221 + const struct ib_mad *mad, u16 prev_lid) 222 222 { 223 223 struct ib_port_info *pinfo; 224 224 u16 lid; ··· 274 274 be16_to_cpu(base[i]); 275 275 } 276 276 } 277 - pr_debug("PKEY Change event: port=%d, " 277 + pr_debug("PKEY Change event: port=%u, " 278 278 "block=0x%x, change_bitmap=0x%x\n", 279 279 port_num, bn, pkey_change_bitmap); 280 280 ··· 380 380 } 381 381 } 382 382 383 - static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad) 383 + static void forward_trap(struct mlx4_ib_dev *dev, u32 port_num, 384 + const struct ib_mad *mad) 384 385 { 385 386 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; 386 387 struct ib_mad_send_buf *send_buf; ··· 430 429 return ret; 431 430 } 432 431 433 - int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid) 432 + int mlx4_ib_find_real_gid(struct ib_device *ibdev, u32 port, __be64 guid) 434 433 { 435 434 struct mlx4_ib_dev *dev = to_mdev(ibdev); 436 435 int i; ··· 444 443 445 444 446 445 static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave, 447 - u8 port, u16 pkey, u16 *ix) 446 + u32 port, u16 pkey, u16 *ix) 448 447 { 449 448 int i, ret; 450 449 u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF; ··· 508 507 return (qpn >= proxy_start && qpn <= proxy_start + 1); 509 508 } 510 509 511 - int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, 510 + int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port, 512 511 enum ib_qp_type dest_qpt, struct ib_wc *wc, 513 512 struct ib_grh *grh, struct ib_mad *mad) 514 513 { ··· 679 678 return ret; 680 679 } 681 680 682 - static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, 681 + static int mlx4_ib_demux_mad(struct ib_device *ibdev, u32 port, 683 682 struct ib_wc *wc, struct ib_grh *grh, 684 683 struct ib_mad *mad) 685 684 { ··· 819 818 return 0; 820 819 } 821 820 822 - static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 821 + static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, 823 822 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 824 823 const struct ib_mad *in_mad, struct ib_mad *out_mad) 825 824 { ··· 933 932 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 934 933 } 935 934 936 - static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 937 - const struct ib_wc *in_wc, const struct ib_grh *in_grh, 938 - const struct ib_mad *in_mad, struct ib_mad *out_mad) 935 + static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, 936 + u32 port_num, const struct ib_wc *in_wc, 937 + const struct ib_grh *in_grh, 938 + const struct ib_mad *in_mad, struct ib_mad *out_mad) 939 939 { 940 940 struct mlx4_counter counter_stats; 941 941 struct mlx4_ib_dev *dev = to_mdev(ibdev); ··· 981 979 return err; 982 980 } 983 981 984 - int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 982 + int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, 985 983 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 986 984 const struct ib_mad *in, struct ib_mad *out, 987 985 size_t *out_mad_size, u16 *out_mad_pkey_index) ··· 1075 1073 } 1076 1074 } 1077 1075 1078 - static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num) 1076 + static void handle_lid_change_event(struct mlx4_ib_dev *dev, u32 port_num) 1079 1077 { 1080 1078 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE); 1081 1079 ··· 1084 1082 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK); 1085 1083 } 1086 1084 1087 - static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num) 1085 + static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u32 port_num) 1088 1086 { 1089 1087 /* re-configure the alias-guid and mcg's */ 1090 1088 if (mlx4_is_master(dev->dev)) { ··· 1123 1121 GET_MASK_FROM_EQE(eqe)); 1124 1122 } 1125 1123 1126 - static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num, 1124 + static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u32 port_num, 1127 1125 u32 guid_tbl_blk_num, u32 change_bitmap) 1128 1126 { 1129 1127 struct ib_smp *in_mad = NULL; ··· 1179 1177 struct ib_event_work *ew = container_of(work, struct ib_event_work, work); 1180 1178 struct mlx4_ib_dev *dev = ew->ib_dev; 1181 1179 struct mlx4_eqe *eqe = &(ew->ib_eqe); 1182 - u8 port = eqe->event.port_mgmt_change.port; 1180 + u32 port = eqe->event.port_mgmt_change.port; 1183 1181 u32 changed_attr; 1184 1182 u32 tbl_block; 1185 1183 u32 change_bitmap; ··· 1276 1274 kfree(ew); 1277 1275 } 1278 1276 1279 - void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, 1277 + void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num, 1280 1278 enum ib_event_type type) 1281 1279 { 1282 1280 struct ib_event event; ··· 1353 1351 return ret; 1354 1352 } 1355 1353 1356 - int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, 1354 + int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port, 1357 1355 enum ib_qp_type dest_qpt, u16 pkey_index, 1358 1356 u32 remote_qpn, u32 qkey, struct rdma_ah_attr *attr, 1359 1357 u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
+25 -22
drivers/infiniband/hw/mlx4/main.c
··· 81 81 82 82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init); 83 83 static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device, 84 - u8 port_num); 84 + u32 port_num); 85 85 86 86 static struct workqueue_struct *wq; 87 87 ··· 129 129 return ib_ports; 130 130 } 131 131 132 - static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num) 132 + static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, 133 + u32 port_num) 133 134 { 134 135 struct mlx4_ib_dev *ibdev = to_mdev(device); 135 136 struct net_device *dev; ··· 161 160 162 161 static int mlx4_ib_update_gids_v1(struct gid_entry *gids, 163 162 struct mlx4_ib_dev *ibdev, 164 - u8 port_num) 163 + u32 port_num) 165 164 { 166 165 struct mlx4_cmd_mailbox *mailbox; 167 166 int err; ··· 194 193 195 194 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, 196 195 struct mlx4_ib_dev *ibdev, 197 - u8 port_num) 196 + u32 port_num) 198 197 { 199 198 struct mlx4_cmd_mailbox *mailbox; 200 199 int err; ··· 239 238 240 239 static int mlx4_ib_update_gids(struct gid_entry *gids, 241 240 struct mlx4_ib_dev *ibdev, 242 - u8 port_num) 241 + u32 port_num) 243 242 { 244 243 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) 245 244 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num); ··· 408 407 int real_index = -EINVAL; 409 408 int i; 410 409 unsigned long flags; 411 - u8 port_num = attr->port_num; 410 + u32 port_num = attr->port_num; 412 411 413 412 if (port_num > MLX4_MAX_PORTS) 414 413 return -EINVAL; ··· 650 649 } 651 650 652 651 static enum rdma_link_layer 653 - mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) 652 + mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num) 654 653 { 655 654 struct mlx4_dev *dev = to_mdev(device)->dev; 656 655 ··· 658 657 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; 659 658 } 660 659 661 - static int ib_link_query_port(struct ib_device *ibdev, u8 port, 660 + static int ib_link_query_port(struct ib_device *ibdev, u32 port, 662 661 struct ib_port_attr *props, int netw_view) 663 662 { 664 663 struct ib_smp *in_mad = NULL; ··· 754 753 IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; 755 754 } 756 755 757 - static int eth_link_query_port(struct ib_device *ibdev, u8 port, 756 + static int eth_link_query_port(struct ib_device *ibdev, u32 port, 758 757 struct ib_port_attr *props) 759 758 { 760 759 ··· 815 814 return err; 816 815 } 817 816 818 - int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port, 817 + int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port, 819 818 struct ib_port_attr *props, int netw_view) 820 819 { 821 820 int err; ··· 829 828 return err; 830 829 } 831 830 832 - static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, 831 + static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port, 833 832 struct ib_port_attr *props) 834 833 { 835 834 /* returns host view */ 836 835 return __mlx4_ib_query_port(ibdev, port, props, 0); 837 836 } 838 837 839 - int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 838 + int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, 840 839 union ib_gid *gid, int netw_view) 841 840 { 842 841 struct ib_smp *in_mad = NULL; ··· 892 891 return err; 893 892 } 894 893 895 - static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 894 + static int mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, 896 895 union ib_gid *gid) 897 896 { 898 897 if (rdma_protocol_ib(ibdev, port)) ··· 900 899 return 0; 901 900 } 902 901 903 - static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl) 902 + static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port, 903 + u64 *sl2vl_tbl) 904 904 { 905 905 union sl2vl_tbl_to_u64 sl2vl64; 906 906 struct ib_smp *in_mad = NULL; ··· 961 959 } 962 960 } 963 961 964 - int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 962 + int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, 965 963 u16 *pkey, int netw_view) 966 964 { 967 965 struct ib_smp *in_mad = NULL; ··· 994 992 return err; 995 993 } 996 994 997 - static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) 995 + static int mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, 996 + u16 *pkey) 998 997 { 999 998 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0); 1000 999 } ··· 1036 1033 return 0; 1037 1034 } 1038 1035 1039 - static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, 1040 - u32 cap_mask) 1036 + static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u32 port, 1037 + int reset_qkey_viols, u32 cap_mask) 1041 1038 { 1042 1039 struct mlx4_cmd_mailbox *mailbox; 1043 1040 int err; ··· 1062 1059 return err; 1063 1060 } 1064 1061 1065 - static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 1062 + static int mlx4_ib_modify_port(struct ib_device *ibdev, u32 port, int mask, 1066 1063 struct ib_port_modify *props) 1067 1064 { 1068 1065 struct mlx4_ib_dev *mdev = to_mdev(ibdev); ··· 2106 2103 }; 2107 2104 2108 2105 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev, 2109 - u8 port_num) 2106 + u32 port_num) 2110 2107 { 2111 2108 struct mlx4_ib_dev *dev = to_mdev(ibdev); 2112 2109 struct mlx4_ib_diag_counters *diag = dev->diag_counters; ··· 2121 2118 2122 2119 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev, 2123 2120 struct rdma_hw_stats *stats, 2124 - u8 port, int index) 2121 + u32 port, int index) 2125 2122 { 2126 2123 struct mlx4_ib_dev *dev = to_mdev(ibdev); 2127 2124 struct mlx4_ib_diag_counters *diag = dev->diag_counters; ··· 2469 2466 ibdev->eq_table = NULL; 2470 2467 } 2471 2468 2472 - static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num, 2469 + static int mlx4_port_immutable(struct ib_device *ibdev, u32 port_num, 2473 2470 struct ib_port_immutable *immutable) 2474 2471 { 2475 2472 struct ib_port_attr attr;
+13 -13
drivers/infiniband/hw/mlx4/mlx4_ib.h
··· 429 429 struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT]; 430 430 struct workqueue_struct *wq; 431 431 struct delayed_work alias_guid_work; 432 - u8 port; 432 + u32 port; 433 433 u32 state_flags; 434 434 struct mlx4_sriov_alias_guid *parent; 435 435 struct list_head cb_list; ··· 657 657 struct ib_qp_init_attr init_attr; 658 658 int slave; 659 659 enum ib_qp_type proxy_qp_type; 660 - u8 port; 660 + u32 port; 661 661 }; 662 662 663 663 struct mlx4_uverbs_ex_query_device { ··· 810 810 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags, 811 811 int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, 812 812 const void *in_mad, void *response_mad); 813 - int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 813 + int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, 814 814 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 815 815 const struct ib_mad *in, struct ib_mad *out, 816 816 size_t *out_mad_size, u16 *out_mad_pkey_index); 817 817 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev); 818 818 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev); 819 819 820 - int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port, 820 + int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port, 821 821 struct ib_port_attr *props, int netw_view); 822 - int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 822 + int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, 823 823 u16 *pkey, int netw_view); 824 824 825 - int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 825 + int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, 826 826 union ib_gid *gid, int netw_view); 827 827 828 828 static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) 829 829 { 830 - u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; 830 + u32 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; 831 831 832 832 if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET) 833 833 return true; ··· 841 841 int mlx4_ib_mcg_init(void); 842 842 void mlx4_ib_mcg_destroy(void); 843 843 844 - int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid); 844 + int mlx4_ib_find_real_gid(struct ib_device *ibdev, u32 port, __be64 guid); 845 845 846 846 int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave, 847 847 struct ib_sa_mad *sa_mad); ··· 851 851 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 852 852 union ib_gid *gid); 853 853 854 - void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, 854 + void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u32 port_num, 855 855 enum ib_event_type type); 856 856 857 857 void mlx4_ib_tunnels_update_work(struct work_struct *work); 858 858 859 - int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, 859 + int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u32 port, 860 860 enum ib_qp_type qpt, struct ib_wc *wc, 861 861 struct ib_grh *grh, struct ib_mad *mad); 862 862 863 - int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, 863 + int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u32 port, 864 864 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn, 865 865 u32 qkey, struct rdma_ah_attr *attr, u8 *s_mac, 866 866 u16 vlan_id, struct ib_mad *mad); ··· 884 884 885 885 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, 886 886 int block_num, 887 - u8 port_num, u8 *p_data); 887 + u32 port_num, u8 *p_data); 888 888 889 889 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, 890 - int block_num, u8 port_num, 890 + int block_num, u32 port_num, 891 891 u8 *p_data); 892 892 893 893 int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
+4 -4
drivers/infiniband/hw/mlx5/cong.c
··· 267 267 } 268 268 } 269 269 270 - static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u8 port_num, 270 + static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u32 port_num, 271 271 int offset, u32 *var) 272 272 { 273 273 int outlen = MLX5_ST_SZ_BYTES(query_cong_params_out); ··· 304 304 return err; 305 305 } 306 306 307 - static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u8 port_num, 307 + static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u32 port_num, 308 308 int offset, u32 var) 309 309 { 310 310 int inlen = MLX5_ST_SZ_BYTES(modify_cong_params_in); ··· 397 397 .read = get_param, 398 398 }; 399 399 400 - void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num) 400 + void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num) 401 401 { 402 402 if (!mlx5_debugfs_root || 403 403 !dev->port[port_num].dbg_cc_params || ··· 409 409 dev->port[port_num].dbg_cc_params = NULL; 410 410 } 411 411 412 - void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num) 412 + void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num) 413 413 { 414 414 struct mlx5_ib_dbg_cc_params *dbg_cc_params; 415 415 struct mlx5_core_dev *mdev;
+5 -5
drivers/infiniband/hw/mlx5/counters.c
··· 139 139 140 140 141 141 static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev, 142 - u8 port_num) 142 + u32 port_num) 143 143 { 144 144 return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts : 145 145 &dev->port[port_num].cnts; ··· 154 154 * device port combination in switchdev and non switchdev mode of the 155 155 * parent device. 156 156 */ 157 - u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num) 157 + u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num) 158 158 { 159 159 const struct mlx5_ib_counters *cnts = get_counters(dev, port_num); 160 160 ··· 162 162 } 163 163 164 164 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, 165 - u8 port_num) 165 + u32 port_num) 166 166 { 167 167 struct mlx5_ib_dev *dev = to_mdev(ibdev); 168 168 const struct mlx5_ib_counters *cnts; ··· 236 236 237 237 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, 238 238 struct rdma_hw_stats *stats, 239 - u8 port_num, int index) 239 + u32 port_num, int index) 240 240 { 241 241 struct mlx5_ib_dev *dev = to_mdev(ibdev); 242 242 const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1); 243 243 struct mlx5_core_dev *mdev; 244 244 int ret, num_counters; 245 - u8 mdev_port_num; 245 + u32 mdev_port_num; 246 246 247 247 if (!stats) 248 248 return -EINVAL;
+1 -1
drivers/infiniband/hw/mlx5/counters.h
··· 13 13 void mlx5_ib_counters_clear_description(struct ib_counters *counters); 14 14 int mlx5_ib_flow_counters_set_data(struct ib_counters *ibcounters, 15 15 struct mlx5_ib_create_flow *ucmd); 16 - u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num); 16 + u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num); 17 17 #endif /* _MLX5_IB_COUNTERS_H */
+2 -2
drivers/infiniband/hw/mlx5/ib_rep.c
··· 29 29 static int 30 30 mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) 31 31 { 32 - int num_ports = mlx5_eswitch_get_total_vports(dev); 32 + u32 num_ports = mlx5_eswitch_get_total_vports(dev); 33 33 const struct mlx5_ib_profile *profile; 34 34 struct mlx5_ib_dev *ibdev; 35 35 int vport_index; ··· 110 110 111 111 struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, 112 112 struct mlx5_ib_sq *sq, 113 - u16 port) 113 + u32 port) 114 114 { 115 115 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; 116 116 struct mlx5_eswitch_rep *rep;
+2 -2
drivers/infiniband/hw/mlx5/ib_rep.h
··· 16 16 void mlx5r_rep_cleanup(void); 17 17 struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, 18 18 struct mlx5_ib_sq *sq, 19 - u16 port); 19 + u32 port); 20 20 struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, 21 21 u16 vport_num); 22 22 #else /* CONFIG_MLX5_ESWITCH */ ··· 25 25 static inline 26 26 struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, 27 27 struct mlx5_ib_sq *sq, 28 - u16 port) 28 + u32 port) 29 29 { 30 30 return NULL; 31 31 }
+9 -7
drivers/infiniband/hw/mlx5/ib_virt.c
··· 48 48 } 49 49 } 50 50 51 - int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u8 port, 51 + int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u32 port, 52 52 struct ifla_vf_info *info) 53 53 { 54 54 struct mlx5_ib_dev *dev = to_mdev(device); ··· 91 91 } 92 92 93 93 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, 94 - u8 port, int state) 94 + u32 port, int state) 95 95 { 96 96 struct mlx5_ib_dev *dev = to_mdev(device); 97 97 struct mlx5_core_dev *mdev = dev->mdev; ··· 119 119 } 120 120 121 121 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, 122 - u8 port, struct ifla_vf_stats *stats) 122 + u32 port, struct ifla_vf_stats *stats) 123 123 { 124 124 int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); 125 125 struct mlx5_core_dev *mdev; ··· 149 149 return err; 150 150 } 151 151 152 - static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid) 152 + static int set_vf_node_guid(struct ib_device *device, int vf, u32 port, 153 + u64 guid) 153 154 { 154 155 struct mlx5_ib_dev *dev = to_mdev(device); 155 156 struct mlx5_core_dev *mdev = dev->mdev; ··· 173 172 return err; 174 173 } 175 174 176 - static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid) 175 + static int set_vf_port_guid(struct ib_device *device, int vf, u32 port, 176 + u64 guid) 177 177 { 178 178 struct mlx5_ib_dev *dev = to_mdev(device); 179 179 struct mlx5_core_dev *mdev = dev->mdev; ··· 197 195 return err; 198 196 } 199 197 200 - int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, 198 + int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port, 201 199 u64 guid, int type) 202 200 { 203 201 if (type == IFLA_VF_IB_NODE_GUID) ··· 208 206 return -EINVAL; 209 207 } 210 208 211 - int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port, 209 + int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port, 212 210 struct ifla_vf_guid *node_guid, 213 211 struct ifla_vf_guid *port_guid) 214 212 {
+8 -8
drivers/infiniband/hw/mlx5/mad.c
··· 42 42 MLX5_IB_VENDOR_CLASS2 = 0xa 43 43 }; 44 44 45 - static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num, 45 + static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u32 port_num, 46 46 struct ib_mad *in_mad) 47 47 { 48 48 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED && ··· 52 52 } 53 53 54 54 static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, 55 - int ignore_bkey, u8 port, const struct ib_wc *in_wc, 55 + int ignore_bkey, u32 port, const struct ib_wc *in_wc, 56 56 const struct ib_grh *in_grh, const void *in_mad, 57 57 void *response_mad) 58 58 { ··· 147 147 vl_15_dropped); 148 148 } 149 149 150 - static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num, 150 + static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num, 151 151 const struct ib_mad *in_mad, struct ib_mad *out_mad) 152 152 { 153 153 struct mlx5_core_dev *mdev; 154 154 bool native_port = true; 155 - u8 mdev_port_num; 155 + u32 mdev_port_num; 156 156 void *out_cnt; 157 157 int err; 158 158 ··· 216 216 return err; 217 217 } 218 218 219 - int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 219 + int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, 220 220 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 221 221 const struct ib_mad *in, struct ib_mad *out, 222 222 size_t *out_mad_size, u16 *out_mad_pkey_index) ··· 444 444 return err; 445 445 } 446 446 447 - int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, 447 + int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index, 448 448 u16 *pkey) 449 449 { 450 450 struct ib_smp *in_mad = NULL; ··· 473 473 return err; 474 474 } 475 475 476 - int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, 476 + int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index, 477 477 union ib_gid *gid) 478 478 { 479 479 struct ib_smp *in_mad = NULL; ··· 513 513 return err; 514 514 } 515 515 516 - int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, 516 + int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port, 517 517 struct ib_port_attr *props) 518 518 { 519 519 struct mlx5_ib_dev *dev = to_mdev(ibdev);
+42 -41
drivers/infiniband/hw/mlx5/main.c
··· 101 101 } 102 102 103 103 static enum rdma_link_layer 104 - mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num) 104 + mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num) 105 105 { 106 106 struct mlx5_ib_dev *dev = to_mdev(device); 107 107 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); ··· 110 110 } 111 111 112 112 static int get_port_state(struct ib_device *ibdev, 113 - u8 port_num, 113 + u32 port_num, 114 114 enum ib_port_state *state) 115 115 { 116 116 struct ib_port_attr attr; ··· 125 125 126 126 static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, 127 127 struct net_device *ndev, 128 - u8 *port_num) 128 + u32 *port_num) 129 129 { 130 130 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; 131 131 struct net_device *rep_ndev; ··· 156 156 { 157 157 struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb); 158 158 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 159 - u8 port_num = roce->native_port_num; 159 + u32 port_num = roce->native_port_num; 160 160 struct mlx5_core_dev *mdev; 161 161 struct mlx5_ib_dev *ibdev; 162 162 ··· 235 235 } 236 236 237 237 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device, 238 - u8 port_num) 238 + u32 port_num) 239 239 { 240 240 struct mlx5_ib_dev *ibdev = to_mdev(device); 241 241 struct net_device *ndev; ··· 263 263 } 264 264 265 265 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, 266 - u8 ib_port_num, 267 - u8 *native_port_num) 266 + u32 ib_port_num, 267 + u32 *native_port_num) 268 268 { 269 269 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, 270 270 ib_port_num); ··· 298 298 return mdev; 299 299 } 300 300 301 - void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num) 301 + void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num) 302 302 { 303 303 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, 304 304 port_num); ··· 454 454 active_width); 455 455 } 456 456 457 - static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, 457 + static int mlx5_query_port_roce(struct ib_device *device, u32 port_num, 458 458 struct ib_port_attr *props) 459 459 { 460 460 struct mlx5_ib_dev *dev = to_mdev(device); ··· 464 464 enum ib_mtu ndev_ib_mtu; 465 465 bool put_mdev = true; 466 466 u32 eth_prot_oper; 467 - u8 mdev_port_num; 467 + u32 mdev_port_num; 468 468 bool ext; 469 469 int err; 470 470 ··· 551 551 return err; 552 552 } 553 553 554 - static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, 554 + static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num, 555 555 unsigned int index, const union ib_gid *gid, 556 556 const struct ib_gid_attr *attr) 557 557 { ··· 1269 1269 return 0; 1270 1270 } 1271 1271 1272 - static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, 1272 + static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port, 1273 1273 struct ib_port_attr *props) 1274 1274 { 1275 1275 struct mlx5_ib_dev *dev = to_mdev(ibdev); ··· 1337 1337 return err; 1338 1338 } 1339 1339 1340 - int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 1340 + int mlx5_ib_query_port(struct ib_device *ibdev, u32 port, 1341 1341 struct ib_port_attr *props) 1342 1342 { 1343 1343 unsigned int count; ··· 1382 1382 return ret; 1383 1383 } 1384 1384 1385 - static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port, 1385 + static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port, 1386 1386 struct ib_port_attr *props) 1387 1387 { 1388 1388 return mlx5_query_port_roce(ibdev, port, props); 1389 1389 } 1390 1390 1391 - static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1391 + static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index, 1392 1392 u16 *pkey) 1393 1393 { 1394 1394 /* Default special Pkey for representor device port as per the ··· 1398 1398 return 0; 1399 1399 } 1400 1400 1401 - static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 1401 + static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index, 1402 1402 union ib_gid *gid) 1403 1403 { 1404 1404 struct mlx5_ib_dev *dev = to_mdev(ibdev); ··· 1417 1417 1418 1418 } 1419 1419 1420 - static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port, 1420 + static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port, 1421 1421 u16 index, u16 *pkey) 1422 1422 { 1423 1423 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1424 1424 struct mlx5_core_dev *mdev; 1425 1425 bool put_mdev = true; 1426 - u8 mdev_port_num; 1426 + u32 mdev_port_num; 1427 1427 int err; 1428 1428 1429 1429 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num); ··· 1444 1444 return err; 1445 1445 } 1446 1446 1447 - static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1447 + static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, 1448 1448 u16 *pkey) 1449 1449 { 1450 1450 switch (mlx5_get_vport_access_method(ibdev)) { ··· 1488 1488 return err; 1489 1489 } 1490 1490 1491 - static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask, 1491 + static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask, 1492 1492 u32 value) 1493 1493 { 1494 1494 struct mlx5_hca_vport_context ctx = {}; 1495 1495 struct mlx5_core_dev *mdev; 1496 - u8 mdev_port_num; 1496 + u32 mdev_port_num; 1497 1497 int err; 1498 1498 1499 1499 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); ··· 1522 1522 return err; 1523 1523 } 1524 1524 1525 - static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 1525 + static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask, 1526 1526 struct ib_port_modify *props) 1527 1527 { 1528 1528 struct mlx5_ib_dev *dev = to_mdev(ibdev); ··· 1931 1931 print_lib_caps(dev, context->lib_caps); 1932 1932 1933 1933 if (mlx5_ib_lag_should_assign_affinity(dev)) { 1934 - u8 port = mlx5_core_native_port_num(dev->mdev) - 1; 1934 + u32 port = mlx5_core_native_port_num(dev->mdev) - 1; 1935 1935 1936 1936 atomic_set(&context->tx_port_affinity, 1937 1937 atomic_add_return( ··· 2781 2781 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, 2782 2782 struct ib_event *ibev) 2783 2783 { 2784 - u8 port = (eqe->data.port.port >> 4) & 0xf; 2784 + u32 port = (eqe->data.port.port >> 4) & 0xf; 2785 2785 2786 2786 switch (eqe->sub_type) { 2787 2787 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT: ··· 2797 2797 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, 2798 2798 struct ib_event *ibev) 2799 2799 { 2800 - u8 port = (eqe->data.port.port >> 4) & 0xf; 2800 + u32 port = (eqe->data.port.port >> 4) & 0xf; 2801 2801 2802 2802 ibev->element.port_num = port; 2803 2803 ··· 3154 3154 return ret; 3155 3155 } 3156 3156 3157 - static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, 3157 + static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num, 3158 3158 struct ib_port_immutable *immutable) 3159 3159 { 3160 3160 struct ib_port_attr attr; ··· 3182 3182 return 0; 3183 3183 } 3184 3184 3185 - static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num, 3185 + static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num, 3186 3186 struct ib_port_immutable *immutable) 3187 3187 { 3188 3188 struct ib_port_attr attr; ··· 3254 3254 } 3255 3255 } 3256 3256 3257 - static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) 3257 + static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num) 3258 3258 { 3259 3259 int err; 3260 3260 ··· 3268 3268 return 0; 3269 3269 } 3270 3270 3271 - static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) 3271 + static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num) 3272 3272 { 3273 3273 if (dev->port[port_num].roce.nb.notifier_call) { 3274 3274 unregister_netdevice_notifier(&dev->port[port_num].roce.nb); ··· 3302 3302 mlx5_nic_vport_disable_roce(dev->mdev); 3303 3303 } 3304 3304 3305 - static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num, 3305 + static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num, 3306 3306 enum rdma_netdev_t type, 3307 3307 struct rdma_netdev_alloc_params *params) 3308 3308 { ··· 3354 3354 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, 3355 3355 struct mlx5_ib_multiport_info *mpi) 3356 3356 { 3357 - u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 3357 + u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 3358 3358 struct mlx5_ib_port *port = &ibdev->port[port_num]; 3359 3359 int comps; 3360 3360 int err; ··· 3400 3400 3401 3401 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev); 3402 3402 3403 - mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1); 3403 + mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1); 3404 3404 /* Log an error, still needed to cleanup the pointers and add 3405 3405 * it back to the list. 3406 3406 */ ··· 3414 3414 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, 3415 3415 struct mlx5_ib_multiport_info *mpi) 3416 3416 { 3417 - u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 3417 + u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 3418 3418 int err; 3419 3419 3420 3420 lockdep_assert_held(&mlx5_ib_multiport_mutex); 3421 3421 3422 3422 spin_lock(&ibdev->port[port_num].mp.mpi_lock); 3423 3423 if (ibdev->port[port_num].mp.mpi) { 3424 - mlx5_ib_dbg(ibdev, "port %d already affiliated.\n", 3424 + mlx5_ib_dbg(ibdev, "port %u already affiliated.\n", 3425 3425 port_num + 1); 3426 3426 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 3427 3427 return false; ··· 3457 3457 3458 3458 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev) 3459 3459 { 3460 - int port_num = mlx5_core_native_port_num(dev->mdev) - 1; 3460 + u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 3461 3461 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 3462 3462 port_num + 1); 3463 3463 struct mlx5_ib_multiport_info *mpi; 3464 3464 int err; 3465 - int i; 3465 + u32 i; 3466 3466 3467 3467 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 3468 3468 return 0; ··· 3525 3525 3526 3526 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev) 3527 3527 { 3528 - int port_num = mlx5_core_native_port_num(dev->mdev) - 1; 3528 + u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 3529 3529 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 3530 3530 port_num + 1); 3531 - int i; 3531 + u32 i; 3532 3532 3533 3533 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 3534 3534 return; ··· 3541 3541 kfree(dev->port[i].mp.mpi); 3542 3542 dev->port[i].mp.mpi = NULL; 3543 3543 } else { 3544 - mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1); 3544 + mlx5_ib_dbg(dev, "unbinding port_num: %u\n", 3545 + i + 1); 3545 3546 mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi); 3546 3547 } 3547 3548 } ··· 4163 4162 struct mlx5_core_dev *mdev = dev->mdev; 4164 4163 enum rdma_link_layer ll; 4165 4164 int port_type_cap; 4166 - u8 port_num = 0; 4165 + u32 port_num = 0; 4167 4166 int err; 4168 4167 4169 4168 port_type_cap = MLX5_CAP_GEN(mdev, port_type); ··· 4200 4199 struct mlx5_core_dev *mdev = dev->mdev; 4201 4200 enum rdma_link_layer ll; 4202 4201 int port_type_cap; 4203 - u8 port_num; 4202 + u32 port_num; 4204 4203 4205 4204 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 4206 4205 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+20 -20
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 406 406 struct mlx5_ib_qp_trans { 407 407 struct mlx5_ib_qp_base base; 408 408 u16 xrcdn; 409 - u8 alt_port; 409 + u32 alt_port; 410 410 u8 atomic_rd_en; 411 411 u8 resp_depth; 412 412 }; ··· 453 453 454 454 struct mlx5_ib_gsi_qp { 455 455 struct ib_qp *rx_qp; 456 - u8 port_num; 456 + u32 port_num; 457 457 struct ib_qp_cap cap; 458 458 struct ib_cq *cq; 459 459 struct mlx5_ib_gsi_wr *outstanding_wrs; ··· 490 490 struct mutex mutex; 491 491 /* cached variant of create_flags from struct ib_qp_init_attr */ 492 492 u32 flags; 493 - u8 port; 493 + u32 port; 494 494 u8 state; 495 495 int max_inline_data; 496 496 struct mlx5_bf bf; ··· 839 839 atomic_t tx_port_affinity; 840 840 enum ib_port_state last_port_state; 841 841 struct mlx5_ib_dev *dev; 842 - u8 native_port_num; 842 + u32 native_port_num; 843 843 }; 844 844 845 845 struct mlx5_ib_port { ··· 854 854 int offset; 855 855 struct mlx5_ib_dev *dev; 856 856 struct dentry *dentry; 857 - u8 port_num; 857 + u32 port_num; 858 858 }; 859 859 860 860 enum mlx5_ib_dbg_cc_types { ··· 1302 1302 int data_sg_nents, unsigned int *data_sg_offset, 1303 1303 struct scatterlist *meta_sg, int meta_sg_nents, 1304 1304 unsigned int *meta_sg_offset); 1305 - int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 1305 + int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, 1306 1306 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 1307 1307 const struct ib_mad *in, struct ib_mad *out, 1308 1308 size_t *out_mad_size, u16 *out_mad_pkey_index); ··· 1317 1317 u32 *vendor_id); 1318 1318 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc); 1319 1319 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid); 1320 - int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, 1320 + int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index, 1321 1321 u16 *pkey); 1322 - int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, 1322 + int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index, 1323 1323 union ib_gid *gid); 1324 - int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, 1324 + int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port, 1325 1325 struct ib_port_attr *props); 1326 - int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 1326 + int mlx5_ib_query_port(struct ib_device *ibdev, u32 port, 1327 1327 struct ib_port_attr *props); 1328 1328 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, 1329 1329 u64 access_flags); ··· 1418 1418 const struct mlx5_ib_profile *profile); 1419 1419 1420 1420 int mlx5_ib_get_vf_config(struct ib_device *device, int vf, 1421 - u8 port, struct ifla_vf_info *info); 1421 + u32 port, struct ifla_vf_info *info); 1422 1422 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, 1423 - u8 port, int state); 1423 + u32 port, int state); 1424 1424 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, 1425 - u8 port, struct ifla_vf_stats *stats); 1426 - int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port, 1425 + u32 port, struct ifla_vf_stats *stats); 1426 + int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port, 1427 1427 struct ifla_vf_guid *node_guid, 1428 1428 struct ifla_vf_guid *port_guid); 1429 - int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, 1429 + int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port, 1430 1430 u64 guid, int type); 1431 1431 1432 1432 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev, 1433 1433 const struct ib_gid_attr *attr); 1434 1434 1435 - void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); 1436 - void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); 1435 + void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num); 1436 + void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num); 1437 1437 1438 1438 /* GSI QP helper functions */ 1439 1439 int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp, ··· 1456 1456 int bfregn); 1457 1457 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi); 1458 1458 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev, 1459 - u8 ib_port_num, 1460 - u8 *native_port_num); 1459 + u32 ib_port_num, 1460 + u32 *native_port_num); 1461 1461 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev, 1462 - u8 port_num); 1462 + u32 port_num); 1463 1463 1464 1464 extern const struct uapi_definition mlx5_ib_devx_defs[]; 1465 1465 extern const struct uapi_definition mlx5_ib_flow_defs[];
+1 -1
drivers/infiniband/hw/mlx5/qp.c
··· 67 67 struct mlx5_rate_limit rl; 68 68 69 69 u8 rq_q_ctr_id; 70 - u16 port; 70 + u32 port; 71 71 }; 72 72 73 73 static void get_cqs(enum ib_qp_type qp_type,
+3 -3
drivers/infiniband/hw/mthca/mthca_av.c
··· 91 91 } 92 92 } 93 93 94 - enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port) 94 + enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u32 port) 95 95 { 96 96 if (mthca_is_memfree(dev)) { 97 97 /* Handle old Arbel FW */ ··· 131 131 } 132 132 } 133 133 134 - u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port) 134 + u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u32 port) 135 135 { 136 136 u8 rate; 137 137 ··· 293 293 { 294 294 struct mthca_ah *ah = to_mah(ibah); 295 295 struct mthca_dev *dev = to_mdev(ibah->device); 296 - u8 port_num = be32_to_cpu(ah->av->port_pd) >> 24; 296 + u32 port_num = be32_to_cpu(ah->av->port_pd) >> 24; 297 297 298 298 /* Only implement for MAD and memfree ah for now. */ 299 299 if (ah->type == MTHCA_AH_ON_HCA)
+4 -4
drivers/infiniband/hw/mthca/mthca_dev.h
··· 546 546 enum ib_sig_type send_policy, 547 547 struct ib_qp_cap *cap, 548 548 int qpn, 549 - int port, 549 + u32 port, 550 550 struct mthca_qp *qp, 551 551 struct ib_udata *udata); 552 552 void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp); ··· 559 559 struct ib_ud_header *header); 560 560 int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr); 561 561 int mthca_ah_grh_present(struct mthca_ah *ah); 562 - u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port); 563 - enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port); 562 + u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u32 port); 563 + enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u32 port); 564 564 565 565 int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 566 566 int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 567 567 568 - int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 568 + int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, 569 569 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 570 570 const struct ib_mad *in, struct ib_mad *out, 571 571 size_t *out_mad_size, u16 *out_mad_pkey_index);
+2 -2
drivers/infiniband/hw/mthca/mthca_mad.c
··· 162 162 } 163 163 164 164 static void forward_trap(struct mthca_dev *dev, 165 - u8 port_num, 165 + u32 port_num, 166 166 const struct ib_mad *mad) 167 167 { 168 168 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; ··· 196 196 } 197 197 } 198 198 199 - int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 199 + int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, 200 200 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 201 201 const struct ib_mad *in, struct ib_mad *out, 202 202 size_t *out_mad_size, u16 *out_mad_pkey_index)
+5 -5
drivers/infiniband/hw/mthca/mthca_provider.c
··· 127 127 } 128 128 129 129 static int mthca_query_port(struct ib_device *ibdev, 130 - u8 port, struct ib_port_attr *props) 130 + u32 port, struct ib_port_attr *props) 131 131 { 132 132 struct ib_smp *in_mad = NULL; 133 133 struct ib_smp *out_mad = NULL; ··· 194 194 } 195 195 196 196 static int mthca_modify_port(struct ib_device *ibdev, 197 - u8 port, int port_modify_mask, 197 + u32 port, int port_modify_mask, 198 198 struct ib_port_modify *props) 199 199 { 200 200 struct mthca_set_ib_param set_ib; ··· 223 223 } 224 224 225 225 static int mthca_query_pkey(struct ib_device *ibdev, 226 - u8 port, u16 index, u16 *pkey) 226 + u32 port, u16 index, u16 *pkey) 227 227 { 228 228 struct ib_smp *in_mad = NULL; 229 229 struct ib_smp *out_mad = NULL; ··· 251 251 return err; 252 252 } 253 253 254 - static int mthca_query_gid(struct ib_device *ibdev, u8 port, 254 + static int mthca_query_gid(struct ib_device *ibdev, u32 port, 255 255 int index, union ib_gid *gid) 256 256 { 257 257 struct ib_smp *in_mad = NULL; ··· 1051 1051 return err; 1052 1052 } 1053 1053 1054 - static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num, 1054 + static int mthca_port_immutable(struct ib_device *ibdev, u32 port_num, 1055 1055 struct ib_port_immutable *immutable) 1056 1056 { 1057 1057 struct ib_port_attr attr;
+1 -1
drivers/infiniband/hw/mthca/mthca_qp.c
··· 1370 1370 enum ib_sig_type send_policy, 1371 1371 struct ib_qp_cap *cap, 1372 1372 int qpn, 1373 - int port, 1373 + u32 port, 1374 1374 struct mthca_qp *qp, 1375 1375 struct ib_udata *udata) 1376 1376 {
+1 -1
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
··· 250 250 } 251 251 252 252 int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags, 253 - u8 port_num, const struct ib_wc *in_wc, 253 + u32 port_num, const struct ib_wc *in_wc, 254 254 const struct ib_grh *in_grh, const struct ib_mad *in, 255 255 struct ib_mad *out, size_t *out_mad_size, 256 256 u16 *out_mad_pkey_index)
+1 -1
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
··· 57 57 int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 58 58 59 59 int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags, 60 - u8 port_num, const struct ib_wc *in_wc, 60 + u32 port_num, const struct ib_wc *in_wc, 61 61 const struct ib_grh *in_grh, const struct ib_mad *in, 62 62 struct ib_mad *out, size_t *out_mad_size, 63 63 u16 *out_mad_pkey_index);
+2 -2
drivers/infiniband/hw/ocrdma/ocrdma_main.c
··· 77 77 guid[7] = mac_addr[5]; 78 78 } 79 79 static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, 80 - u8 port_num) 80 + u32 port_num) 81 81 { 82 82 return IB_LINK_LAYER_ETHERNET; 83 83 } 84 84 85 - static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num, 85 + static int ocrdma_port_immutable(struct ib_device *ibdev, u32 port_num, 86 86 struct ib_port_immutable *immutable) 87 87 { 88 88 struct ib_port_attr attr;
+2 -2
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 54 54 #include "ocrdma_verbs.h" 55 55 #include <rdma/ocrdma-abi.h> 56 56 57 - int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) 57 + int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) 58 58 { 59 59 if (index > 0) 60 60 return -EINVAL; ··· 150 150 } 151 151 152 152 int ocrdma_query_port(struct ib_device *ibdev, 153 - u8 port, struct ib_port_attr *props) 153 + u32 port, struct ib_port_attr *props) 154 154 { 155 155 enum ib_port_state port_state; 156 156 struct ocrdma_dev *dev;
+4 -3
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
··· 53 53 54 54 int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props, 55 55 struct ib_udata *uhw); 56 - int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props); 56 + int ocrdma_query_port(struct ib_device *ibdev, u32 port, 57 + struct ib_port_attr *props); 57 58 58 59 enum rdma_protocol_type 59 - ocrdma_query_protocol(struct ib_device *device, u8 port_num); 60 + ocrdma_query_protocol(struct ib_device *device, u32 port_num); 60 61 61 62 void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); 62 - int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey); 63 + int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey); 63 64 64 65 int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); 65 66 void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
+4 -4
drivers/infiniband/hw/qedr/main.c
··· 53 53 54 54 #define QEDR_WQ_MULTIPLIER_DFT (3) 55 55 56 - static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num, 56 + static void qedr_ib_dispatch_event(struct qedr_dev *dev, u32 port_num, 57 57 enum ib_event_type type) 58 58 { 59 59 struct ib_event ibev; ··· 66 66 } 67 67 68 68 static enum rdma_link_layer qedr_link_layer(struct ib_device *device, 69 - u8 port_num) 69 + u32 port_num) 70 70 { 71 71 return IB_LINK_LAYER_ETHERNET; 72 72 } ··· 81 81 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF); 82 82 } 83 83 84 - static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num, 84 + static int qedr_roce_port_immutable(struct ib_device *ibdev, u32 port_num, 85 85 struct ib_port_immutable *immutable) 86 86 { 87 87 struct ib_port_attr attr; ··· 100 100 return 0; 101 101 } 102 102 103 - static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num, 103 + static int qedr_iw_port_immutable(struct ib_device *ibdev, u32 port_num, 104 104 struct ib_port_immutable *immutable) 105 105 { 106 106 struct ib_port_attr attr;
+5 -4
drivers/infiniband/hw/qedr/verbs.c
··· 72 72 return ib_copy_to_udata(udata, src, min_len); 73 73 } 74 74 75 - int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) 75 + int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) 76 76 { 77 77 if (index >= QEDR_ROCE_PKEY_TABLE_LEN) 78 78 return -EINVAL; ··· 81 81 return 0; 82 82 } 83 83 84 - int qedr_iw_query_gid(struct ib_device *ibdev, u8 port, 84 + int qedr_iw_query_gid(struct ib_device *ibdev, u32 port, 85 85 int index, union ib_gid *sgid) 86 86 { 87 87 struct qedr_dev *dev = get_qedr_dev(ibdev); ··· 210 210 } 211 211 } 212 212 213 - int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr) 213 + int qedr_query_port(struct ib_device *ibdev, u32 port, 214 + struct ib_port_attr *attr) 214 215 { 215 216 struct qedr_dev *dev; 216 217 struct qed_rdma_port *rdma_port; ··· 4483 4482 } 4484 4483 4485 4484 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags, 4486 - u8 port_num, const struct ib_wc *in_wc, 4485 + u32 port_num, const struct ib_wc *in_wc, 4487 4486 const struct ib_grh *in_grh, const struct ib_mad *in, 4488 4487 struct ib_mad *out_mad, size_t *out_mad_size, 4489 4488 u16 *out_mad_pkey_index)
+6 -5
drivers/infiniband/hw/qedr/verbs.h
··· 34 34 35 35 int qedr_query_device(struct ib_device *ibdev, 36 36 struct ib_device_attr *attr, struct ib_udata *udata); 37 - int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props); 37 + int qedr_query_port(struct ib_device *ibdev, u32 port, 38 + struct ib_port_attr *props); 38 39 39 - int qedr_iw_query_gid(struct ib_device *ibdev, u8 port, 40 + int qedr_iw_query_gid(struct ib_device *ibdev, u32 port, 40 41 int index, union ib_gid *gid); 41 42 42 - int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey); 43 + int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey); 43 44 44 45 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); 45 46 void qedr_dealloc_ucontext(struct ib_ucontext *uctx); ··· 93 92 int qedr_post_recv(struct ib_qp *, const struct ib_recv_wr *, 94 93 const struct ib_recv_wr **bad_wr); 95 94 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags, 96 - u8 port_num, const struct ib_wc *in_wc, 95 + u32 port_num, const struct ib_wc *in_wc, 97 96 const struct ib_grh *in_grh, const struct ib_mad *in_mad, 98 97 struct ib_mad *out_mad, size_t *out_mad_size, 99 98 u16 *out_mad_pkey_index); 100 99 101 - int qedr_port_immutable(struct ib_device *ibdev, u8 port_num, 100 + int qedr_port_immutable(struct ib_device *ibdev, u32 port_num, 102 101 struct ib_port_immutable *immutable); 103 102 #endif
+4 -4
drivers/infiniband/hw/qib/qib.h
··· 630 630 u8 rx_pol_inv; 631 631 632 632 u8 hw_pidx; /* physical port index */ 633 - u8 port; /* IB port number and index into dd->pports - 1 */ 633 + u32 port; /* IB port number and index into dd->pports - 1 */ 634 634 635 635 u8 delay_mult; 636 636 ··· 1200 1200 return container_of(ibp, struct qib_pportdata, ibport_data); 1201 1201 } 1202 1202 1203 - static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port) 1203 + static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u32 port) 1204 1204 { 1205 1205 struct qib_devdata *dd = dd_from_ibdev(ibdev); 1206 - unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ 1206 + u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */ 1207 1207 1208 1208 WARN_ON(pidx >= dd->num_pports); 1209 1209 return &dd->pport[pidx].ibport_data; ··· 1369 1369 int qib_device_create(struct qib_devdata *); 1370 1370 void qib_device_remove(struct qib_devdata *); 1371 1371 1372 - int qib_create_port_files(struct ib_device *ibdev, u8 port_num, 1372 + int qib_create_port_files(struct ib_device *ibdev, u32 port_num, 1373 1373 struct kobject *kobj); 1374 1374 void qib_verbs_unregister_sysfs(struct qib_devdata *); 1375 1375 /* Hook for sysfs read of QSFP */
+2 -2
drivers/infiniband/hw/qib/qib_mad.c
··· 203 203 /* 204 204 * Send a Port Capability Mask Changed trap (ch. 14.3.11). 205 205 */ 206 - void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num) 206 + void qib_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num) 207 207 { 208 208 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); 209 209 struct qib_devdata *dd = dd_from_dev(ibdev); ··· 2360 2360 * 2361 2361 * This is called by the ib_mad module. 2362 2362 */ 2363 - int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, 2363 + int qib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port, 2364 2364 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 2365 2365 const struct ib_mad *in, struct ib_mad *out, 2366 2366 size_t *out_mad_size, u16 *out_mad_pkey_index)
+2 -2
drivers/infiniband/hw/qib/qib_qp.c
··· 125 125 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. 126 126 */ 127 127 int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 128 - enum ib_qp_type type, u8 port) 128 + enum ib_qp_type type, u32 port) 129 129 { 130 130 u32 i, offset, max_scan, qpn; 131 131 struct rvt_qpn_map *map; ··· 136 136 u16 qpt_mask = dd->qpn_mask; 137 137 138 138 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 139 - unsigned n; 139 + u32 n; 140 140 141 141 ret = type == IB_QPT_GSI; 142 142 n = 1 << (ret + 2 * (port - 1));
+1 -1
drivers/infiniband/hw/qib/qib_sysfs.c
··· 728 728 .attrs = qib_attributes, 729 729 }; 730 730 731 - int qib_create_port_files(struct ib_device *ibdev, u8 port_num, 731 + int qib_create_port_files(struct ib_device *ibdev, u32 port_num, 732 732 struct kobject *kobj) 733 733 { 734 734 struct qib_pportdata *ppd;
+3 -3
drivers/infiniband/hw/qib/qib_verbs.c
··· 1188 1188 } 1189 1189 } 1190 1190 1191 - static int qib_query_port(struct rvt_dev_info *rdi, u8 port_num, 1191 + static int qib_query_port(struct rvt_dev_info *rdi, u32 port_num, 1192 1192 struct ib_port_attr *props) 1193 1193 { 1194 1194 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); ··· 1273 1273 return ret; 1274 1274 } 1275 1275 1276 - static int qib_shut_down_port(struct rvt_dev_info *rdi, u8 port_num) 1276 + static int qib_shut_down_port(struct rvt_dev_info *rdi, u32 port_num) 1277 1277 { 1278 1278 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); 1279 1279 struct qib_devdata *dd = dd_from_dev(ibdev); ··· 1342 1342 struct rvt_qp *qp0; 1343 1343 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1344 1344 struct qib_devdata *dd = dd_from_ppd(ppd); 1345 - u8 port_num = ppd->port; 1345 + u32 port_num = ppd->port; 1346 1346 1347 1347 memset(&attr, 0, sizeof(attr)); 1348 1348 attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
+3 -3
drivers/infiniband/hw/qib/qib_verbs.h
··· 239 239 240 240 void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl, 241 241 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2); 242 - void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num); 242 + void qib_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num); 243 243 void qib_sys_guid_chg(struct qib_ibport *ibp); 244 244 void qib_node_desc_chg(struct qib_ibport *ibp); 245 - int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 245 + int qib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, 246 246 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 247 247 const struct ib_mad *in, struct ib_mad *out, 248 248 size_t *out_mad_size, u16 *out_mad_pkey_index); ··· 273 273 void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); 274 274 void qib_notify_qp_reset(struct rvt_qp *qp); 275 275 int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 276 - enum ib_qp_type type, u8 port); 276 + enum ib_qp_type type, u32 port); 277 277 void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait); 278 278 #ifdef CONFIG_DEBUG_FS 279 279
+1 -1
drivers/infiniband/hw/usnic/usnic_ib_main.c
··· 303 303 }; 304 304 /* End of inet section*/ 305 305 306 - static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num, 306 + static int usnic_port_immutable(struct ib_device *ibdev, u32 port_num, 307 307 struct ib_port_immutable *immutable) 308 308 { 309 309 struct ib_port_attr attr;
+3 -3
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
··· 270 270 /* Start of ib callback functions */ 271 271 272 272 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device, 273 - u8 port_num) 273 + u32 port_num) 274 274 { 275 275 return IB_LINK_LAYER_ETHERNET; 276 276 } ··· 332 332 return 0; 333 333 } 334 334 335 - int usnic_ib_query_port(struct ib_device *ibdev, u8 port, 335 + int usnic_ib_query_port(struct ib_device *ibdev, u32 port, 336 336 struct ib_port_attr *props) 337 337 { 338 338 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); ··· 420 420 return err; 421 421 } 422 422 423 - int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 423 + int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index, 424 424 union ib_gid *gid) 425 425 { 426 426
+3 -3
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
··· 37 37 #include "usnic_ib.h" 38 38 39 39 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device, 40 - u8 port_num); 40 + u32 port_num); 41 41 int usnic_ib_query_device(struct ib_device *ibdev, 42 42 struct ib_device_attr *props, 43 43 struct ib_udata *uhw); 44 - int usnic_ib_query_port(struct ib_device *ibdev, u8 port, 44 + int usnic_ib_query_port(struct ib_device *ibdev, u32 port, 45 45 struct ib_port_attr *props); 46 46 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 47 47 int qp_attr_mask, 48 48 struct ib_qp_init_attr *qp_init_attr); 49 - int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 49 + int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index, 50 50 union ib_gid *gid); 51 51 int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 52 52 int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+1 -1
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
··· 121 121 return 0; 122 122 } 123 123 124 - static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num, 124 + static int pvrdma_port_immutable(struct ib_device *ibdev, u32 port_num, 125 125 struct ib_port_immutable *immutable) 126 126 { 127 127 struct pvrdma_dev *dev = to_vdev(ibdev);
+6 -6
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
··· 125 125 * 126 126 * @return: 0 on success, otherwise negative errno 127 127 */ 128 - int pvrdma_query_port(struct ib_device *ibdev, u8 port, 128 + int pvrdma_query_port(struct ib_device *ibdev, u32 port, 129 129 struct ib_port_attr *props) 130 130 { 131 131 struct pvrdma_dev *dev = to_vdev(ibdev); ··· 183 183 * 184 184 * @return: 0 on success, otherwise negative errno 185 185 */ 186 - int pvrdma_query_gid(struct ib_device *ibdev, u8 port, int index, 186 + int pvrdma_query_gid(struct ib_device *ibdev, u32 port, int index, 187 187 union ib_gid *gid) 188 188 { 189 189 struct pvrdma_dev *dev = to_vdev(ibdev); ··· 205 205 * 206 206 * @return: 0 on success, otherwise negative errno 207 207 */ 208 - int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 208 + int pvrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, 209 209 u16 *pkey) 210 210 { 211 211 int err = 0; ··· 232 232 } 233 233 234 234 enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev, 235 - u8 port) 235 + u32 port) 236 236 { 237 237 return IB_LINK_LAYER_ETHERNET; 238 238 } ··· 274 274 * 275 275 * @return: 0 on success, otherwise negative errno 276 276 */ 277 - int pvrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, 277 + int pvrdma_modify_port(struct ib_device *ibdev, u32 port, int mask, 278 278 struct ib_port_modify *props) 279 279 { 280 280 struct ib_port_attr attr; ··· 516 516 struct pvrdma_dev *dev = to_vdev(ibah->device); 517 517 struct pvrdma_ah *ah = to_vah(ibah); 518 518 const struct ib_global_route *grh; 519 - u8 port_num = rdma_ah_get_port_num(ah_attr); 519 + u32 port_num = rdma_ah_get_port_num(ah_attr); 520 520 521 521 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) 522 522 return -EINVAL;
+5 -5
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
··· 348 348 int pvrdma_query_device(struct ib_device *ibdev, 349 349 struct ib_device_attr *props, 350 350 struct ib_udata *udata); 351 - int pvrdma_query_port(struct ib_device *ibdev, u8 port, 351 + int pvrdma_query_port(struct ib_device *ibdev, u32 port, 352 352 struct ib_port_attr *props); 353 - int pvrdma_query_gid(struct ib_device *ibdev, u8 port, 353 + int pvrdma_query_gid(struct ib_device *ibdev, u32 port, 354 354 int index, union ib_gid *gid); 355 - int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, 355 + int pvrdma_query_pkey(struct ib_device *ibdev, u32 port, 356 356 u16 index, u16 *pkey); 357 357 enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev, 358 - u8 port); 358 + u32 port); 359 359 int pvrdma_modify_device(struct ib_device *ibdev, int mask, 360 360 struct ib_device_modify *props); 361 - int pvrdma_modify_port(struct ib_device *ibdev, u8 port, 361 + int pvrdma_modify_port(struct ib_device *ibdev, u32 port, 362 362 int mask, struct ib_port_modify *props); 363 363 int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 364 364 int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
+1 -4
drivers/infiniband/sw/rdmavt/mad.c
··· 70 70 * 71 71 * Return: IB_MAD_RESULT_SUCCESS or error 72 72 */ 73 - int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 73 + int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, 74 74 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 75 75 const struct ib_mad_hdr *in, size_t in_mad_size, 76 76 struct ib_mad_hdr *out, size_t *out_mad_size, ··· 82 82 * future may choose to implement this but it should not be made into a 83 83 * requirement. 84 84 */ 85 - if (ibport_num_to_idx(ibdev, port_num) < 0) 86 - return -EINVAL; 87 - 88 85 return IB_MAD_RESULT_FAILURE; 89 86 } 90 87
+1 -1
drivers/infiniband/sw/rdmavt/mad.h
··· 50 50 51 51 #include <rdma/rdma_vt.h> 52 52 53 - int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 53 + int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, 54 54 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 55 55 const struct ib_mad_hdr *in, size_t in_mad_size, 56 56 struct ib_mad_hdr *out, size_t *out_mad_size,
+10 -24
drivers/infiniband/sw/rdmavt/vt.c
··· 151 151 * 152 152 * Return: 0 on success 153 153 */ 154 - static int rvt_query_port(struct ib_device *ibdev, u8 port_num, 154 + static int rvt_query_port(struct ib_device *ibdev, u32 port_num, 155 155 struct ib_port_attr *props) 156 156 { 157 157 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 158 158 struct rvt_ibport *rvp; 159 - int port_index = ibport_num_to_idx(ibdev, port_num); 160 - 161 - if (port_index < 0) 162 - return -EINVAL; 159 + u32 port_index = ibport_num_to_idx(ibdev, port_num); 163 160 164 161 rvp = rdi->ports[port_index]; 165 162 /* props being zeroed by the caller, avoid zeroing it here */ ··· 183 186 * 184 187 * Return: 0 on success 185 188 */ 186 - static int rvt_modify_port(struct ib_device *ibdev, u8 port_num, 189 + static int rvt_modify_port(struct ib_device *ibdev, u32 port_num, 187 190 int port_modify_mask, struct ib_port_modify *props) 188 191 { 189 192 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 190 193 struct rvt_ibport *rvp; 191 194 int ret = 0; 192 - int port_index = ibport_num_to_idx(ibdev, port_num); 193 - 194 - if (port_index < 0) 195 - return -EINVAL; 195 + u32 port_index = ibport_num_to_idx(ibdev, port_num); 196 196 197 197 rvp = rdi->ports[port_index]; 198 198 if (port_modify_mask & IB_PORT_OPA_MASK_CHG) { ··· 219 225 * 220 226 * Return: 0 on failure pkey otherwise 221 227 */ 222 - static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index, 228 + static int rvt_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index, 223 229 u16 *pkey) 224 230 { 225 231 /* ··· 229 235 * no way to protect against that anyway. 230 236 */ 231 237 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 232 - int port_index; 238 + u32 port_index; 233 239 234 240 port_index = ibport_num_to_idx(ibdev, port_num); 235 - if (port_index < 0) 236 - return -EINVAL; 237 241 238 242 if (index >= rvt_get_npkeys(rdi)) 239 243 return -EINVAL; ··· 249 257 * 250 258 * Return: 0 on success 251 259 */ 252 - static int rvt_query_gid(struct ib_device *ibdev, u8 port_num, 260 + static int rvt_query_gid(struct ib_device *ibdev, u32 port_num, 253 261 int guid_index, union ib_gid *gid) 254 262 { 255 263 struct rvt_dev_info *rdi; 256 264 struct rvt_ibport *rvp; 257 - int port_index; 265 + u32 port_index; 258 266 259 267 /* 260 268 * Driver is responsible for updating the guid table. Which will be used ··· 262 270 * is being done. 263 271 */ 264 272 port_index = ibport_num_to_idx(ibdev, port_num); 265 - if (port_index < 0) 266 - return -EINVAL; 267 273 268 274 rdi = ib_to_rvt(ibdev); 269 275 rvp = rdi->ports[port_index]; ··· 291 301 return; 292 302 } 293 303 294 - static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num, 304 + static int rvt_get_port_immutable(struct ib_device *ibdev, u32 port_num, 295 305 struct ib_port_immutable *immutable) 296 306 { 297 307 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 298 308 struct ib_port_attr attr; 299 - int err, port_index; 300 - 301 - port_index = ibport_num_to_idx(ibdev, port_num); 302 - if (port_index < 0) 303 - return -EINVAL; 309 + int err; 304 310 305 311 immutable->core_cap_flags = rdi->dparms.core_cap_flags; 306 312
+2 -9
drivers/infiniband/sw/rdmavt/vt.h
··· 96 96 #define __rvt_pr_err_ratelimited(pdev, name, fmt, ...) \ 97 97 dev_err_ratelimited(&(pdev)->dev, "%s: " fmt, name, ##__VA_ARGS__) 98 98 99 - static inline int ibport_num_to_idx(struct ib_device *ibdev, u8 port_num) 99 + static inline u32 ibport_num_to_idx(struct ib_device *ibdev, u32 port_num) 100 100 { 101 - struct rvt_dev_info *rdi = ib_to_rvt(ibdev); 102 - int port_index; 103 - 104 - port_index = port_num - 1; /* IB ports start at 1 our arrays at 0 */ 105 - if ((port_index < 0) || (port_index >= rdi->dparms.nports)) 106 - return -EINVAL; 107 - 108 - return port_index; 101 + return port_num - 1; /* IB ports start at 1 our arrays at 0 */ 109 102 } 110 103 111 104 #endif /* DEF_RDMAVT_H */
+2 -2
drivers/infiniband/sw/rxe/rxe_hw_counters.c
··· 26 26 27 27 int rxe_ib_get_hw_stats(struct ib_device *ibdev, 28 28 struct rdma_hw_stats *stats, 29 - u8 port, int index) 29 + u32 port, int index) 30 30 { 31 31 struct rxe_dev *dev = to_rdev(ibdev); 32 32 unsigned int cnt; ··· 41 41 } 42 42 43 43 struct rdma_hw_stats *rxe_ib_alloc_hw_stats(struct ib_device *ibdev, 44 - u8 port_num) 44 + u32 port_num) 45 45 { 46 46 BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_name) != RXE_NUM_OF_COUNTERS); 47 47 /* We support only per port stats */
+2 -2
drivers/infiniband/sw/rxe/rxe_hw_counters.h
··· 30 30 }; 31 31 32 32 struct rdma_hw_stats *rxe_ib_alloc_hw_stats(struct ib_device *ibdev, 33 - u8 port_num); 33 + u32 port_num); 34 34 int rxe_ib_get_hw_stats(struct ib_device *ibdev, 35 35 struct rdma_hw_stats *stats, 36 - u8 port, int index); 36 + u32 port, int index); 37 37 #endif /* RXE_HW_COUNTERS_H */
+5 -5
drivers/infiniband/sw/rxe/rxe_verbs.c
··· 26 26 } 27 27 28 28 static int rxe_query_port(struct ib_device *dev, 29 - u8 port_num, struct ib_port_attr *attr) 29 + u32 port_num, struct ib_port_attr *attr) 30 30 { 31 31 struct rxe_dev *rxe = to_rdev(dev); 32 32 struct rxe_port *port; ··· 54 54 } 55 55 56 56 static int rxe_query_pkey(struct ib_device *device, 57 - u8 port_num, u16 index, u16 *pkey) 57 + u32 port_num, u16 index, u16 *pkey) 58 58 { 59 59 if (index > 0) 60 60 return -EINVAL; ··· 84 84 } 85 85 86 86 static int rxe_modify_port(struct ib_device *dev, 87 - u8 port_num, int mask, struct ib_port_modify *attr) 87 + u32 port_num, int mask, struct ib_port_modify *attr) 88 88 { 89 89 struct rxe_dev *rxe = to_rdev(dev); 90 90 struct rxe_port *port; ··· 101 101 } 102 102 103 103 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, 104 - u8 port_num) 104 + u32 port_num) 105 105 { 106 106 return IB_LINK_LAYER_ETHERNET; 107 107 } ··· 121 121 rxe_drop_ref(uc); 122 122 } 123 123 124 - static int rxe_port_immutable(struct ib_device *dev, u8 port_num, 124 + static int rxe_port_immutable(struct ib_device *dev, u32 port_num, 125 125 struct ib_port_immutable *immutable) 126 126 { 127 127 int err;
+4 -4
drivers/infiniband/sw/siw/siw_verbs.c
··· 160 160 return 0; 161 161 } 162 162 163 - int siw_query_port(struct ib_device *base_dev, u8 port, 163 + int siw_query_port(struct ib_device *base_dev, u32 port, 164 164 struct ib_port_attr *attr) 165 165 { 166 166 struct siw_device *sdev = to_siw_dev(base_dev); ··· 194 194 return rv; 195 195 } 196 196 197 - int siw_get_port_immutable(struct ib_device *base_dev, u8 port, 197 + int siw_get_port_immutable(struct ib_device *base_dev, u32 port, 198 198 struct ib_port_immutable *port_immutable) 199 199 { 200 200 struct ib_port_attr attr; ··· 209 209 return 0; 210 210 } 211 211 212 - int siw_query_gid(struct ib_device *base_dev, u8 port, int idx, 212 + int siw_query_gid(struct ib_device *base_dev, u32 port, int idx, 213 213 union ib_gid *gid) 214 214 { 215 215 struct siw_device *sdev = to_siw_dev(base_dev); ··· 1848 1848 } 1849 1849 } 1850 1850 1851 - void siw_port_event(struct siw_device *sdev, u8 port, enum ib_event_type etype) 1851 + void siw_port_event(struct siw_device *sdev, u32 port, enum ib_event_type etype) 1852 1852 { 1853 1853 struct ib_event event; 1854 1854
+5 -5
drivers/infiniband/sw/siw/siw_verbs.h
··· 36 36 37 37 int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata); 38 38 void siw_dealloc_ucontext(struct ib_ucontext *base_ctx); 39 - int siw_query_port(struct ib_device *base_dev, u8 port, 39 + int siw_query_port(struct ib_device *base_dev, u32 port, 40 40 struct ib_port_attr *attr); 41 - int siw_get_port_immutable(struct ib_device *base_dev, u8 port, 41 + int siw_get_port_immutable(struct ib_device *base_dev, u32 port, 42 42 struct ib_port_immutable *port_immutable); 43 43 int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, 44 44 struct ib_udata *udata); 45 45 int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr, 46 46 struct ib_udata *udata); 47 - int siw_query_port(struct ib_device *base_dev, u8 port, 47 + int siw_query_port(struct ib_device *base_dev, u32 port, 48 48 struct ib_port_attr *attr); 49 - int siw_query_gid(struct ib_device *base_dev, u8 port, int idx, 49 + int siw_query_gid(struct ib_device *base_dev, u32 port, int idx, 50 50 union ib_gid *gid); 51 51 int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); 52 52 int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata); ··· 86 86 void siw_qp_event(struct siw_qp *qp, enum ib_event_type type); 87 87 void siw_cq_event(struct siw_cq *cq, enum ib_event_type type); 88 88 void siw_srq_event(struct siw_srq *srq, enum ib_event_type type); 89 - void siw_port_event(struct siw_device *dev, u8 port, enum ib_event_type type); 89 + void siw_port_event(struct siw_device *dev, u32 port, enum ib_event_type type); 90 90 91 91 #endif
+2 -2
drivers/infiniband/ulp/ipoib/ipoib.h
··· 501 501 struct ipoib_path *__path_find(struct net_device *dev, void *gid); 502 502 void ipoib_mark_paths_invalid(struct net_device *dev); 503 503 void ipoib_flush_paths(struct net_device *dev); 504 - struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port, 504 + struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port, 505 505 const char *format); 506 - int ipoib_intf_init(struct ib_device *hca, u8 port, const char *format, 506 + int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format, 507 507 struct net_device *dev); 508 508 void ipoib_ib_tx_timer_func(struct timer_list *t); 509 509 void ipoib_ib_dev_flush_light(struct work_struct *work);
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 1060 1060 union ib_gid *netdev_gid; 1061 1061 int err; 1062 1062 u16 index; 1063 - u8 port; 1063 + u32 port; 1064 1064 bool ret = false; 1065 1065 1066 1066 netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
+7 -7
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 90 90 static void ipoib_remove_one(struct ib_device *device, void *client_data); 91 91 static void ipoib_neigh_reclaim(struct rcu_head *rp); 92 92 static struct net_device *ipoib_get_net_dev_by_params( 93 - struct ib_device *dev, u8 port, u16 pkey, 93 + struct ib_device *dev, u32 port, u16 pkey, 94 94 const union ib_gid *gid, const struct sockaddr *addr, 95 95 void *client_data); 96 96 static int ipoib_set_mac(struct net_device *dev, void *addr); ··· 438 438 /* Returns the number of matching net_devs found (between 0 and 2). Also 439 439 * return the matching net_device in the @net_dev parameter, holding a 440 440 * reference to the net_device, if the number of matches >= 1 */ 441 - static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port, 441 + static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u32 port, 442 442 u16 pkey_index, 443 443 const union ib_gid *gid, 444 444 const struct sockaddr *addr, ··· 463 463 } 464 464 465 465 static struct net_device *ipoib_get_net_dev_by_params( 466 - struct ib_device *dev, u8 port, u16 pkey, 466 + struct ib_device *dev, u32 port, u16 pkey, 467 467 const union ib_gid *gid, const struct sockaddr *addr, 468 468 void *client_data) 469 469 { ··· 2145 2145 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh); 2146 2146 } 2147 2147 2148 - static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port, 2148 + static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u32 port, 2149 2149 const char *name) 2150 2150 { 2151 2151 struct net_device *dev; ··· 2162 2162 return dev; 2163 2163 } 2164 2164 2165 - int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name, 2165 + int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name, 2166 2166 struct net_device *dev) 2167 2167 { 2168 2168 struct rdma_netdev *rn = netdev_priv(dev); ··· 2213 2213 return rc; 2214 2214 } 2215 2215 2216 - struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port, 2216 + struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port, 2217 2217 const char *name) 2218 2218 { 2219 2219 struct net_device *dev; ··· 2456 2456 } 2457 2457 2458 2458 static struct net_device *ipoib_add_port(const char *format, 2459 - struct ib_device *hca, u8 port) 2459 + struct ib_device *hca, u32 port) 2460 2460 { 2461 2461 struct rtnl_link_ops *ops = ipoib_get_link_ops(); 2462 2462 struct rdma_netdev_alloc_params params;
+2 -1
drivers/infiniband/ulp/srpt/ib_srpt.c
··· 3105 3105 { 3106 3106 struct srpt_device *sdev; 3107 3107 struct srpt_port *sport; 3108 - int i, ret; 3108 + int ret; 3109 + u32 i; 3109 3110 3110 3111 pr_debug("device = %p\n", device); 3111 3112
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 718 718 return &mlx5i_nic_profile; 719 719 } 720 720 721 - static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num, 721 + static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num, 722 722 struct net_device *netdev, void *param) 723 723 { 724 724 struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
+9 -9
include/rdma/ib_cache.h
··· 10 10 11 11 #include <rdma/ib_verbs.h> 12 12 13 - int rdma_query_gid(struct ib_device *device, u8 port_num, int index, 13 + int rdma_query_gid(struct ib_device *device, u32 port_num, int index, 14 14 union ib_gid *gid); 15 15 void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr); 16 16 const struct ib_gid_attr *rdma_find_gid(struct ib_device *device, ··· 20 20 const struct ib_gid_attr *rdma_find_gid_by_port(struct ib_device *ib_dev, 21 21 const union ib_gid *gid, 22 22 enum ib_gid_type gid_type, 23 - u8 port, 23 + u32 port, 24 24 struct net_device *ndev); 25 25 const struct ib_gid_attr *rdma_find_gid_by_filter( 26 - struct ib_device *device, const union ib_gid *gid, u8 port_num, 26 + struct ib_device *device, const union ib_gid *gid, u32 port_num, 27 27 bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *, 28 28 void *), 29 29 void *context); ··· 43 43 * the local software cache. 44 44 */ 45 45 int ib_get_cached_pkey(struct ib_device *device_handle, 46 - u8 port_num, 46 + u32 port_num, 47 47 int index, 48 48 u16 *pkey); 49 49 ··· 59 59 * the local software cache. 60 60 */ 61 61 int ib_find_cached_pkey(struct ib_device *device, 62 - u8 port_num, 62 + u32 port_num, 63 63 u16 pkey, 64 64 u16 *index); 65 65 ··· 75 75 * the local software cache. 76 76 */ 77 77 int ib_find_exact_cached_pkey(struct ib_device *device, 78 - u8 port_num, 78 + u32 port_num, 79 79 u16 pkey, 80 80 u16 *index); 81 81 ··· 89 89 * the local software cache. 90 90 */ 91 91 int ib_get_cached_lmc(struct ib_device *device, 92 - u8 port_num, 92 + u32 port_num, 93 93 u8 *lmc); 94 94 95 95 /** ··· 102 102 * the local software cache. 103 103 */ 104 104 int ib_get_cached_port_state(struct ib_device *device, 105 - u8 port_num, 105 + u32 port_num, 106 106 enum ib_port_state *port_active); 107 107 108 108 bool rdma_is_zero_gid(const union ib_gid *gid); 109 109 const struct ib_gid_attr *rdma_get_gid_attr(struct ib_device *device, 110 - u8 port_num, int index); 110 + u32 port_num, int index); 111 111 void rdma_put_gid_attr(const struct ib_gid_attr *attr); 112 112 void rdma_hold_gid_attr(const struct ib_gid_attr *attr); 113 113 ssize_t rdma_query_gid_table(struct ib_device *device,
+1 -1
include/rdma/ib_mad.h
··· 668 668 * @registration_flags: Registration flags to set for this agent 669 669 */ 670 670 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, 671 - u8 port_num, 671 + u32 port_num, 672 672 enum ib_qp_type qp_type, 673 673 struct ib_mad_reg_req *mad_reg_req, 674 674 u8 rmpp_version,
+8 -7
include/rdma/ib_sa.h
··· 423 423 void ib_sa_cancel_query(int id, struct ib_sa_query *query); 424 424 425 425 int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device, 426 - u8 port_num, struct sa_path_rec *rec, 426 + u32 port_num, struct sa_path_rec *rec, 427 427 ib_sa_comp_mask comp_mask, unsigned long timeout_ms, 428 428 gfp_t gfp_mask, 429 429 void (*callback)(int status, struct sa_path_rec *resp, ··· 431 431 void *context, struct ib_sa_query **query); 432 432 433 433 int ib_sa_service_rec_query(struct ib_sa_client *client, 434 - struct ib_device *device, u8 port_num, u8 method, 434 + struct ib_device *device, u32 port_num, u8 method, 435 435 struct ib_sa_service_rec *rec, 436 436 ib_sa_comp_mask comp_mask, unsigned long timeout_ms, 437 437 gfp_t gfp_mask, ··· 477 477 * group, and the user must rejoin the group to continue using it. 478 478 */ 479 479 struct ib_sa_multicast *ib_sa_join_multicast(struct ib_sa_client *client, 480 - struct ib_device *device, u8 port_num, 480 + struct ib_device *device, 481 + u32 port_num, 481 482 struct ib_sa_mcmember_rec *rec, 482 483 ib_sa_comp_mask comp_mask, gfp_t gfp_mask, 483 484 int (*callback)(int status, ··· 507 506 * @mgid: MGID of multicast group. 508 507 * @rec: Location to copy SA multicast member record. 509 508 */ 510 - int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num, 509 + int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num, 511 510 union ib_gid *mgid, struct ib_sa_mcmember_rec *rec); 512 511 513 512 /** 514 513 * ib_init_ah_from_mcmember - Initialize address handle attributes based on 515 514 * an SA multicast member record. 516 515 */ 517 - int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, 516 + int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num, 518 517 struct ib_sa_mcmember_rec *rec, 519 518 struct net_device *ndev, 520 519 enum ib_gid_type gid_type, 521 520 struct rdma_ah_attr *ah_attr); 522 521 523 - int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, 522 + int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num, 524 523 struct sa_path_rec *rec, 525 524 struct rdma_ah_attr *ah_attr, 526 525 const struct ib_gid_attr *sgid_attr); ··· 539 538 540 539 /* Support GuidInfoRecord */ 541 540 int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 542 - struct ib_device *device, u8 port_num, 541 + struct ib_device *device, u32 port_num, 543 542 struct ib_sa_guidinfo_rec *rec, 544 543 ib_sa_comp_mask comp_mask, u8 method, 545 544 unsigned long timeout_ms, gfp_t gfp_mask,
+95 -82
include/rdma/ib_verbs.h
··· 152 152 union ib_gid gid; 153 153 enum ib_gid_type gid_type; 154 154 u16 index; 155 - u8 port_num; 155 + u32 port_num; 156 156 }; 157 157 158 158 enum { ··· 736 736 struct ib_qp *qp; 737 737 struct ib_srq *srq; 738 738 struct ib_wq *wq; 739 - u8 port_num; 739 + u32 port_num; 740 740 } element; 741 741 enum ib_event_type event; 742 742 }; ··· 919 919 struct ib_global_route grh; 920 920 u8 sl; 921 921 u8 static_rate; 922 - u8 port_num; 922 + u32 port_num; 923 923 u8 ah_flags; 924 924 enum rdma_ah_attr_type type; 925 925 union { ··· 1006 1006 u16 pkey_index; 1007 1007 u8 sl; 1008 1008 u8 dlid_path_bits; 1009 - u8 port_num; /* valid only for DR SMPs on switches */ 1009 + u32 port_num; /* valid only for DR SMPs on switches */ 1010 1010 u8 smac[ETH_ALEN]; 1011 1011 u16 vlan_id; 1012 1012 u8 network_hdr_type; ··· 1161 1161 /* 1162 1162 * Only needed for special QP types, or when using the RW API. 1163 1163 */ 1164 - u8 port_num; 1164 + u32 port_num; 1165 1165 struct ib_rwq_ind_table *rwq_ind_tbl; 1166 1166 u32 source_qpn; 1167 1167 }; ··· 1280 1280 u8 max_rd_atomic; 1281 1281 u8 max_dest_rd_atomic; 1282 1282 u8 min_rnr_timer; 1283 - u8 port_num; 1283 + u32 port_num; 1284 1284 u8 timeout; 1285 1285 u8 retry_cnt; 1286 1286 u8 rnr_retry; 1287 - u8 alt_port_num; 1287 + u32 alt_port_num; 1288 1288 u8 alt_timeout; 1289 1289 u32 rate_limit; 1290 1290 struct net_device *xmit_slave; ··· 1401 1401 u32 remote_qpn; 1402 1402 u32 remote_qkey; 1403 1403 u16 pkey_index; /* valid for GSI only */ 1404 - u8 port_num; /* valid for DR SMPs on switch only */ 1404 + u32 port_num; /* valid for DR SMPs on switch only */ 1405 1405 }; 1406 1406 1407 1407 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr) ··· 1708 1708 struct ib_port_pkey { 1709 1709 enum port_pkey_state state; 1710 1710 u16 pkey_index; 1711 - u8 port_num; 1711 + u32 port_num; 1712 1712 struct list_head qp_list; 1713 1713 struct list_head to_error_list; 1714 1714 struct ib_qp_security *sec; ··· 1769 1769 enum ib_qp_type qp_type; 1770 1770 struct ib_rwq_ind_table *rwq_ind_tbl; 1771 1771 struct ib_qp_security *qp_sec; 1772 - u8 port; 1772 + u32 port; 1773 1773 1774 1774 bool integrity_en; 1775 1775 /* ··· 2065 2065 u16 priority; 2066 2066 u32 flags; 2067 2067 u8 num_of_specs; 2068 - u8 port; 2068 + u32 port; 2069 2069 union ib_flow_spec flows[]; 2070 2070 }; 2071 2071 ··· 2194 2194 struct rdma_netdev { 2195 2195 void *clnt_priv; 2196 2196 struct ib_device *hca; 2197 - u8 port_num; 2197 + u32 port_num; 2198 2198 int mtu; 2199 2199 2200 2200 /* ··· 2223 2223 unsigned int rxqs; 2224 2224 void *param; 2225 2225 2226 - int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num, 2226 + int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num, 2227 2227 struct net_device *netdev, void *param); 2228 2228 }; 2229 2229 ··· 2305 2305 const struct ib_recv_wr *recv_wr, 2306 2306 const struct ib_recv_wr **bad_recv_wr); 2307 2307 int (*process_mad)(struct ib_device *device, int process_mad_flags, 2308 - u8 port_num, const struct ib_wc *in_wc, 2308 + u32 port_num, const struct ib_wc *in_wc, 2309 2309 const struct ib_grh *in_grh, 2310 2310 const struct ib_mad *in_mad, struct ib_mad *out_mad, 2311 2311 size_t *out_mad_size, u16 *out_mad_pkey_index); ··· 2317 2317 void (*get_dev_fw_str)(struct ib_device *device, char *str); 2318 2318 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, 2319 2319 int comp_vector); 2320 - int (*query_port)(struct ib_device *device, u8 port_num, 2320 + int (*query_port)(struct ib_device *device, u32 port_num, 2321 2321 struct ib_port_attr *port_attr); 2322 - int (*modify_port)(struct ib_device *device, u8 port_num, 2322 + int (*modify_port)(struct ib_device *device, u32 port_num, 2323 2323 int port_modify_mask, 2324 2324 struct ib_port_modify *port_modify); 2325 2325 /** ··· 2328 2328 * structure to avoid cache line misses when accessing struct ib_device 2329 2329 * in fast paths. 2330 2330 */ 2331 - int (*get_port_immutable)(struct ib_device *device, u8 port_num, 2331 + int (*get_port_immutable)(struct ib_device *device, u32 port_num, 2332 2332 struct ib_port_immutable *immutable); 2333 2333 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 2334 - u8 port_num); 2334 + u32 port_num); 2335 2335 /** 2336 2336 * When calling get_netdev, the HW vendor's driver should return the 2337 2337 * net device of device @device at port @port_num or NULL if such ··· 2340 2340 * that this function returns NULL before the net device has finished 2341 2341 * NETDEV_UNREGISTER state. 2342 2342 */ 2343 - struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num); 2343 + struct net_device *(*get_netdev)(struct ib_device *device, 2344 + u32 port_num); 2344 2345 /** 2345 2346 * rdma netdev operation 2346 2347 * ··· 2349 2348 * must return -EOPNOTSUPP if it doesn't support the specified type. 2350 2349 */ 2351 2350 struct net_device *(*alloc_rdma_netdev)( 2352 - struct ib_device *device, u8 port_num, enum rdma_netdev_t type, 2351 + struct ib_device *device, u32 port_num, enum rdma_netdev_t type, 2353 2352 const char *name, unsigned char name_assign_type, 2354 2353 void (*setup)(struct net_device *)); 2355 2354 2356 - int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num, 2355 + int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num, 2357 2356 enum rdma_netdev_t type, 2358 2357 struct rdma_netdev_alloc_params *params); 2359 2358 /** ··· 2361 2360 * link layer is either IB or iWarp. It is no-op if @port_num port 2362 2361 * is RoCE link layer. 2363 2362 */ 2364 - int (*query_gid)(struct ib_device *device, u8 port_num, int index, 2363 + int (*query_gid)(struct ib_device *device, u32 port_num, int index, 2365 2364 union ib_gid *gid); 2366 2365 /** 2367 2366 * When calling add_gid, the HW vendor's driver should add the gid ··· 2386 2385 * This function is only called when roce_gid_table is used. 2387 2386 */ 2388 2387 int (*del_gid)(const struct ib_gid_attr *attr, void **context); 2389 - int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index, 2388 + int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index, 2390 2389 u16 *pkey); 2391 2390 int (*alloc_ucontext)(struct ib_ucontext *context, 2392 2391 struct ib_udata *udata); ··· 2475 2474 struct ib_flow_action *action, 2476 2475 const struct ib_flow_action_attrs_esp *attr, 2477 2476 struct uverbs_attr_bundle *attrs); 2478 - int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2477 + int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port, 2479 2478 int state); 2480 - int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2479 + int (*get_vf_config)(struct ib_device *device, int vf, u32 port, 2481 2480 struct ifla_vf_info *ivf); 2482 - int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2481 + int (*get_vf_stats)(struct ib_device *device, int vf, u32 port, 2483 2482 struct ifla_vf_stats *stats); 2484 - int (*get_vf_guid)(struct ib_device *device, int vf, u8 port, 2483 + int (*get_vf_guid)(struct ib_device *device, int vf, u32 port, 2485 2484 struct ifla_vf_guid *node_guid, 2486 2485 struct ifla_vf_guid *port_guid); 2487 - int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2486 + int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid, 2488 2487 int type); 2489 2488 struct ib_wq *(*create_wq)(struct ib_pd *pd, 2490 2489 struct ib_wq_init_attr *init_attr, ··· 2522 2521 * struct tells the core to set a default lifespan. 2523 2522 */ 2524 2523 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 2525 - u8 port_num); 2524 + u32 port_num); 2526 2525 /** 2527 2526 * get_hw_stats - Fill in the counter value(s) in the stats struct. 2528 2527 * @index - The index in the value array we wish to have updated, or ··· 2536 2535 * one given in index at their option 2537 2536 */ 2538 2537 int (*get_hw_stats)(struct ib_device *device, 2539 - struct rdma_hw_stats *stats, u8 port, int index); 2538 + struct rdma_hw_stats *stats, u32 port, int index); 2540 2539 /* 2541 2540 * This function is called once for each port when a ib device is 2542 2541 * registered. 2543 2542 */ 2544 - int (*init_port)(struct ib_device *device, u8 port_num, 2543 + int (*init_port)(struct ib_device *device, u32 port_num, 2545 2544 struct kobject *port_sysfs); 2546 2545 /** 2547 2546 * Allows rdma drivers to add their own restrack attributes. ··· 2685 2684 /* CQ adaptive moderation (RDMA DIM) */ 2686 2685 u16 use_cq_dim:1; 2687 2686 u8 node_type; 2688 - u8 phys_port_cnt; 2687 + u32 phys_port_cnt; 2689 2688 struct ib_device_attr attrs; 2690 2689 struct attribute_group *hw_stats_ag; 2691 2690 struct rdma_hw_stats *hw_stats; ··· 2751 2750 * netdev. */ 2752 2751 struct net_device *(*get_net_dev_by_params)( 2753 2752 struct ib_device *dev, 2754 - u8 port, 2753 + u32 port, 2755 2754 u16 pkey, 2756 2755 const union ib_gid *gid, 2757 2756 const struct sockaddr *addr, ··· 2932 2931 void ib_dispatch_event(const struct ib_event *event); 2933 2932 2934 2933 int ib_query_port(struct ib_device *device, 2935 - u8 port_num, struct ib_port_attr *port_attr); 2934 + u32 port_num, struct ib_port_attr *port_attr); 2936 2935 2937 2936 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2938 - u8 port_num); 2937 + u32 port_num); 2939 2938 2940 2939 /** 2941 2940 * rdma_cap_ib_switch - Check if the device is IB switch ··· 2959 2958 * 2960 2959 * Return start port number 2961 2960 */ 2962 - static inline u8 rdma_start_port(const struct ib_device *device) 2961 + static inline u32 rdma_start_port(const struct ib_device *device) 2963 2962 { 2964 2963 return rdma_cap_ib_switch(device) ? 0 : 1; 2965 2964 } ··· 2970 2969 * @iter - The unsigned int to store the port number 2971 2970 */ 2972 2971 #define rdma_for_each_port(device, iter) \ 2973 - for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \ 2974 - unsigned int, iter))); \ 2975 - iter <= rdma_end_port(device); (iter)++) 2972 + for (iter = rdma_start_port(device + \ 2973 + BUILD_BUG_ON_ZERO(!__same_type(u32, \ 2974 + iter))); \ 2975 + iter <= rdma_end_port(device); iter++) 2976 2976 2977 2977 /** 2978 2978 * rdma_end_port - Return the last valid port number for the device ··· 2983 2981 * 2984 2982 * Return last port number 2985 2983 */ 2986 - static inline u8 rdma_end_port(const struct ib_device *device) 2984 + static inline u32 rdma_end_port(const struct ib_device *device) 2987 2985 { 2988 2986 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2989 2987 } ··· 2996 2994 } 2997 2995 2998 2996 static inline bool rdma_is_grh_required(const struct ib_device *device, 2999 - u8 port_num) 2997 + u32 port_num) 3000 2998 { 3001 2999 return device->port_data[port_num].immutable.core_cap_flags & 3002 3000 RDMA_CORE_PORT_IB_GRH_REQUIRED; 3003 3001 } 3004 3002 3005 - static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 3003 + static inline bool rdma_protocol_ib(const struct ib_device *device, 3004 + u32 port_num) 3006 3005 { 3007 3006 return device->port_data[port_num].immutable.core_cap_flags & 3008 3007 RDMA_CORE_CAP_PROT_IB; 3009 3008 } 3010 3009 3011 - static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 3010 + static inline bool rdma_protocol_roce(const struct ib_device *device, 3011 + u32 port_num) 3012 3012 { 3013 3013 return device->port_data[port_num].immutable.core_cap_flags & 3014 3014 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 3015 3015 } 3016 3016 3017 - static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 3017 + static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, 3018 + u32 port_num) 3018 3019 { 3019 3020 return device->port_data[port_num].immutable.core_cap_flags & 3020 3021 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 3021 3022 } 3022 3023 3023 - static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 3024 + static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, 3025 + u32 port_num) 3024 3026 { 3025 3027 return device->port_data[port_num].immutable.core_cap_flags & 3026 3028 RDMA_CORE_CAP_PROT_ROCE; 3027 3029 } 3028 3030 3029 - static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 3031 + static inline bool rdma_protocol_iwarp(const struct ib_device *device, 3032 + u32 port_num) 3030 3033 { 3031 3034 return device->port_data[port_num].immutable.core_cap_flags & 3032 3035 RDMA_CORE_CAP_PROT_IWARP; 3033 3036 } 3034 3037 3035 - static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 3038 + static inline bool rdma_ib_or_roce(const struct ib_device *device, 3039 + u32 port_num) 3036 3040 { 3037 3041 return rdma_protocol_ib(device, port_num) || 3038 3042 rdma_protocol_roce(device, port_num); 3039 3043 } 3040 3044 3041 - static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num) 3045 + static inline bool rdma_protocol_raw_packet(const struct ib_device *device, 3046 + u32 port_num) 3042 3047 { 3043 3048 return device->port_data[port_num].immutable.core_cap_flags & 3044 3049 RDMA_CORE_CAP_PROT_RAW_PACKET; 3045 3050 } 3046 3051 3047 - static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num) 3052 + static inline bool rdma_protocol_usnic(const struct ib_device *device, 3053 + u32 port_num) 3048 3054 { 3049 3055 return device->port_data[port_num].immutable.core_cap_flags & 3050 3056 RDMA_CORE_CAP_PROT_USNIC; ··· 3070 3060 * 3071 3061 * Return: true if the port supports sending/receiving of MAD packets. 3072 3062 */ 3073 - static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 3063 + static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num) 3074 3064 { 3075 3065 return device->port_data[port_num].immutable.core_cap_flags & 3076 3066 RDMA_CORE_CAP_IB_MAD; ··· 3095 3085 * 3096 3086 * Return: true if the port supports OPA MAD packet formats. 3097 3087 */ 3098 - static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 3088 + static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num) 3099 3089 { 3100 3090 return device->port_data[port_num].immutable.core_cap_flags & 3101 3091 RDMA_CORE_CAP_OPA_MAD; ··· 3121 3111 * 3122 3112 * Return: true if the port provides an SMI. 3123 3113 */ 3124 - static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 3114 + static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num) 3125 3115 { 3126 3116 return device->port_data[port_num].immutable.core_cap_flags & 3127 3117 RDMA_CORE_CAP_IB_SMI; ··· 3142 3132 * Return: true if the port supports an IB CM (this does not guarantee that 3143 3133 * a CM is actually running however). 3144 3134 */ 3145 - static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 3135 + static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num) 3146 3136 { 3147 3137 return device->port_data[port_num].immutable.core_cap_flags & 3148 3138 RDMA_CORE_CAP_IB_CM; ··· 3160 3150 * Return: true if the port supports an iWARP CM (this does not guarantee that 3161 3151 * a CM is actually running however). 3162 3152 */ 3163 - static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 3153 + static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num) 3164 3154 { 3165 3155 return device->port_data[port_num].immutable.core_cap_flags & 3166 3156 RDMA_CORE_CAP_IW_CM; ··· 3181 3171 * Administration interface. This does not imply that the SA service is 3182 3172 * running locally. 3183 3173 */ 3184 - static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 3174 + static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num) 3185 3175 { 3186 3176 return device->port_data[port_num].immutable.core_cap_flags & 3187 3177 RDMA_CORE_CAP_IB_SA; ··· 3204 3194 * overhead of registering/unregistering with the SM and tracking of the 3205 3195 * total number of queue pairs attached to the multicast group. 3206 3196 */ 3207 - static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 3197 + static inline bool rdma_cap_ib_mcast(const struct ib_device *device, 3198 + u32 port_num) 3208 3199 { 3209 3200 return rdma_cap_ib_sa(device, port_num); 3210 3201 } ··· 3223 3212 * Return: true if the port uses a GID address to identify devices on the 3224 3213 * network. 3225 3214 */ 3226 - static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 3215 + static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num) 3227 3216 { 3228 3217 return device->port_data[port_num].immutable.core_cap_flags & 3229 3218 RDMA_CORE_CAP_AF_IB; ··· 3245 3234 * addition of a Global Route Header built from our Ethernet Address 3246 3235 * Handle into our header list for connectionless packets. 3247 3236 */ 3248 - static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 3237 + static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num) 3249 3238 { 3250 3239 return device->port_data[port_num].immutable.core_cap_flags & 3251 3240 RDMA_CORE_CAP_ETH_AH; ··· 3260 3249 * Return: true if we are running on an OPA device which supports 3261 3250 * the extended OPA addressing. 3262 3251 */ 3263 - static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num) 3252 + static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num) 3264 3253 { 3265 3254 return (device->port_data[port_num].immutable.core_cap_flags & 3266 3255 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH; ··· 3278 3267 * Return the max MAD size required by the Port. Will return 0 if the port 3279 3268 * does not support MADs 3280 3269 */ 3281 - static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 3270 + static inline size_t rdma_max_mad_size(const struct ib_device *device, 3271 + u32 port_num) 3282 3272 { 3283 3273 return device->port_data[port_num].immutable.max_mad_size; 3284 3274 } ··· 3298 3286 * its GIDs. 3299 3287 */ 3300 3288 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 3301 - u8 port_num) 3289 + u32 port_num) 3302 3290 { 3303 3291 return rdma_protocol_roce(device, port_num) && 3304 3292 device->ops.add_gid && device->ops.del_gid; ··· 3339 3327 * Return the MTU size supported by the port as an integer value. Will return 3340 3328 * -1 if enum value of mtu is not supported. 3341 3329 */ 3342 - static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port, 3330 + static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port, 3343 3331 int mtu) 3344 3332 { 3345 3333 if (rdma_core_cap_opa_port(device, port)) ··· 3356 3344 * 3357 3345 * Return the MTU size supported by the port as an integer value. 3358 3346 */ 3359 - static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port, 3347 + static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port, 3360 3348 struct ib_port_attr *attr) 3361 3349 { 3362 3350 if (rdma_core_cap_opa_port(device, port)) ··· 3365 3353 return ib_mtu_enum_to_int(attr->max_mtu); 3366 3354 } 3367 3355 3368 - int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 3356 + int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port, 3369 3357 int state); 3370 - int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 3358 + int ib_get_vf_config(struct ib_device *device, int vf, u32 port, 3371 3359 struct ifla_vf_info *info); 3372 - int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 3360 + int ib_get_vf_stats(struct ib_device *device, int vf, u32 port, 3373 3361 struct ifla_vf_stats *stats); 3374 - int ib_get_vf_guid(struct ib_device *device, int vf, u8 port, 3362 + int ib_get_vf_guid(struct ib_device *device, int vf, u32 port, 3375 3363 struct ifla_vf_guid *node_guid, 3376 3364 struct ifla_vf_guid *port_guid); 3377 - int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 3365 + int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid, 3378 3366 int type); 3379 3367 3380 3368 int ib_query_pkey(struct ib_device *device, 3381 - u8 port_num, u16 index, u16 *pkey); 3369 + u32 port_num, u16 index, u16 *pkey); 3382 3370 3383 3371 int ib_modify_device(struct ib_device *device, 3384 3372 int device_modify_mask, 3385 3373 struct ib_device_modify *device_modify); 3386 3374 3387 3375 int ib_modify_port(struct ib_device *device, 3388 - u8 port_num, int port_modify_mask, 3376 + u32 port_num, int port_modify_mask, 3389 3377 struct ib_port_modify *port_modify); 3390 3378 3391 3379 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 3392 - u8 *port_num, u16 *index); 3380 + u32 *port_num, u16 *index); 3393 3381 3394 3382 int ib_find_pkey(struct ib_device *device, 3395 - u8 port_num, u16 pkey, u16 *index); 3383 + u32 port_num, u16 pkey, u16 *index); 3396 3384 3397 3385 enum ib_pd_flags { 3398 3386 /* ··· 3507 3495 * attributes which are initialized using ib_init_ah_attr_from_wc(). 3508 3496 * 3509 3497 */ 3510 - int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, 3498 + int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num, 3511 3499 const struct ib_wc *wc, const struct ib_grh *grh, 3512 3500 struct rdma_ah_attr *ah_attr); 3513 3501 ··· 3524 3512 * in all UD QP post sends. 3525 3513 */ 3526 3514 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 3527 - const struct ib_grh *grh, u8 port_num); 3515 + const struct ib_grh *grh, u32 port_num); 3528 3516 3529 3517 /** 3530 3518 * rdma_modify_ah - Modifies the address vector associated with an address ··· 4269 4257 enum rdma_driver_id driver_id); 4270 4258 struct ib_device *ib_device_get_by_name(const char *name, 4271 4259 enum rdma_driver_id driver_id); 4272 - struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 4260 + struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port, 4273 4261 u16 pkey, const union ib_gid *gid, 4274 4262 const struct sockaddr *addr); 4275 4263 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, 4276 4264 unsigned int port); 4277 - struct net_device *ib_device_netdev(struct ib_device *dev, u8 port); 4265 + struct net_device *ib_device_netdev(struct ib_device *dev, u32 port); 4278 4266 4279 4267 struct ib_wq *ib_create_wq(struct ib_pd *pd, 4280 4268 struct ib_wq_init_attr *init_attr); ··· 4308 4296 void ib_drain_sq(struct ib_qp *qp); 4309 4297 void ib_drain_qp(struct ib_qp *qp); 4310 4298 4311 - int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width); 4299 + int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, 4300 + u8 *width); 4312 4301 4313 4302 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) 4314 4303 { ··· 4377 4364 return false; 4378 4365 } 4379 4366 4380 - static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num) 4367 + static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num) 4381 4368 { 4382 4369 attr->port_num = port_num; 4383 4370 } 4384 4371 4385 - static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr) 4372 + static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr) 4386 4373 { 4387 4374 return attr->port_num; 4388 4375 } ··· 4480 4467 * @port_num: Port number 4481 4468 */ 4482 4469 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, 4483 - u8 port_num) 4470 + u32 port_num) 4484 4471 { 4485 4472 if (rdma_protocol_roce(dev, port_num)) 4486 4473 return RDMA_AH_ATTR_TYPE_ROCE; ··· 4552 4539 4553 4540 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs); 4554 4541 4555 - struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, 4542 + struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num, 4556 4543 enum rdma_netdev_t type, const char *name, 4557 4544 unsigned char name_assign_type, 4558 4545 void (*setup)(struct net_device *)); 4559 4546 4560 - int rdma_init_netdev(struct ib_device *device, u8 port_num, 4547 + int rdma_init_netdev(struct ib_device *device, u32 port_num, 4561 4548 enum rdma_netdev_t type, const char *name, 4562 4549 unsigned char name_assign_type, 4563 4550 void (*setup)(struct net_device *),
+1 -1
include/rdma/rdma_cm.h
··· 107 107 struct rdma_route route; 108 108 enum rdma_ucm_port_space ps; 109 109 enum ib_qp_type qp_type; 110 - u8 port_num; 110 + u32 port_num; 111 111 }; 112 112 113 113 struct rdma_cm_id *
+8 -8
include/rdma/rdma_counter.h
··· 40 40 struct rdma_counter_mode mode; 41 41 struct mutex lock; 42 42 struct rdma_hw_stats *stats; 43 - u8 port; 43 + u32 port; 44 44 }; 45 45 46 46 void rdma_counter_init(struct ib_device *dev); 47 47 void rdma_counter_release(struct ib_device *dev); 48 - int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port, 48 + int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port, 49 49 enum rdma_nl_counter_mask mask, 50 50 struct netlink_ext_ack *extack); 51 - int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port); 51 + int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port); 52 52 int rdma_counter_unbind_qp(struct ib_qp *qp, bool force); 53 53 54 54 int rdma_counter_query_stats(struct rdma_counter *counter); 55 - u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index); 56 - int rdma_counter_bind_qpn(struct ib_device *dev, u8 port, 55 + u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index); 56 + int rdma_counter_bind_qpn(struct ib_device *dev, u32 port, 57 57 u32 qp_num, u32 counter_id); 58 - int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port, 58 + int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port, 59 59 u32 qp_num, u32 *counter_id); 60 - int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port, 60 + int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port, 61 61 u32 qp_num, u32 counter_id); 62 - int rdma_counter_get_mode(struct ib_device *dev, u8 port, 62 + int rdma_counter_get_mode(struct ib_device *dev, u32 port, 63 63 enum rdma_nl_counter_mode *mode, 64 64 enum rdma_nl_counter_mask *mask); 65 65
+4 -4
include/rdma/rdma_vt.h
··· 309 309 /* 310 310 * Query driver for the state of the port. 311 311 */ 312 - int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num, 312 + int (*query_port_state)(struct rvt_dev_info *rdi, u32 port_num, 313 313 struct ib_port_attr *props); 314 314 315 315 /* 316 316 * Tell driver to shutdown a port 317 317 */ 318 - int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num); 318 + int (*shut_down_port)(struct rvt_dev_info *rdi, u32 port_num); 319 319 320 320 /* Tell driver to send a trap for changed port capabilities */ 321 - void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num); 321 + void (*cap_mask_chg)(struct rvt_dev_info *rdi, u32 port_num); 322 322 323 323 /* 324 324 * The following functions can be safely ignored completely. Any use of ··· 338 338 339 339 /* Let the driver pick the next queue pair number*/ 340 340 int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 341 - enum ib_qp_type type, u8 port_num); 341 + enum ib_qp_type type, u32 port_num); 342 342 343 343 /* Determine if its safe or allowed to modify the qp */ 344 344 int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
+9 -9
include/rdma/rw.h
··· 42 42 }; 43 43 }; 44 44 45 - int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 45 + int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, 46 46 struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, 47 47 u64 remote_addr, u32 rkey, enum dma_data_direction dir); 48 - void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 49 - struct scatterlist *sg, u32 sg_cnt, 50 - enum dma_data_direction dir); 48 + void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 49 + u32 port_num, struct scatterlist *sg, u32 sg_cnt, 50 + enum dma_data_direction dir); 51 51 52 52 int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 53 - u8 port_num, struct scatterlist *sg, u32 sg_cnt, 53 + u32 port_num, struct scatterlist *sg, u32 sg_cnt, 54 54 struct scatterlist *prot_sg, u32 prot_sg_cnt, 55 55 struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey, 56 56 enum dma_data_direction dir); 57 57 void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 58 - u8 port_num, struct scatterlist *sg, u32 sg_cnt, 58 + u32 port_num, struct scatterlist *sg, u32 sg_cnt, 59 59 struct scatterlist *prot_sg, u32 prot_sg_cnt, 60 60 enum dma_data_direction dir); 61 61 62 62 struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 63 - u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr); 64 - int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 63 + u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr); 64 + int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, 65 65 struct ib_cqe *cqe, struct ib_send_wr *chain_wr); 66 66 67 - unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num, 67 + unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num, 68 68 unsigned int maxpages); 69 69 void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr); 70 70 int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr);