Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/hfi1: Rework AIP and VNIC dummy netdev usage

All other users of the dummy netdevice embed the netdev in other
structures:

init_dummy_netdev(&mal->dummy_dev);
init_dummy_netdev(&eth->dummy_dev);
init_dummy_netdev(&ar->napi_dev);
init_dummy_netdev(&irq_grp->napi_ndev);
init_dummy_netdev(&wil->napi_ndev);
init_dummy_netdev(&trans_pcie->napi_dev);
init_dummy_netdev(&dev->napi_dev);
init_dummy_netdev(&bus->mux_dev);

The AIP and VNIC implementation turns that model inside out and used a
kfree() to free what appears to be a netdev struct when in reality, it is
a struct that enbodies the rx state as well as the dummy netdev used to
support napi_poll across disparate receive contexts. The relationship is
infered by the odd allocation:

const int netdev_size = sizeof(*dd->dummy_netdev) +
sizeof(struct hfi1_netdev_priv);
<snip>
dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node);

Correct the issue by:
- Correctly naming the alloc and free functions
- Renaming hfi1_netdev_priv to hfi1_netdev_rx
- Replacing dd dummy_netdev with a netdev_rx pointer
- Embedding the net_device in hfi1_netdev_rx
- Moving the init_dummy_netdev to the alloc routine
- Adjusting wrappers to fit the new model

Fixes: 6991abcb993c ("IB/hfi1: Add functions to receive accelerated ipoib packets")
Link: https://lore.kernel.org/r/1617026056-50483-11-git-send-email-dennis.dalessandro@cornelisnetworks.com
Reviewed-by: Kaike Wan <kaike.wan@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Mike Marciniszyn and committed by
Jason Gunthorpe
780278c2 9c8823e0

+105 -116
+3 -3
drivers/infiniband/hw/hfi1/chip.c
··· 15243 15243 (dd->revision >> CCE_REVISION_SW_SHIFT) 15244 15244 & CCE_REVISION_SW_MASK); 15245 15245 15246 - /* alloc netdev data */ 15247 - ret = hfi1_netdev_alloc(dd); 15246 + /* alloc VNIC/AIP rx data */ 15247 + ret = hfi1_alloc_rx(dd); 15248 15248 if (ret) 15249 15249 goto bail_cleanup; 15250 15250 ··· 15348 15348 hfi1_comp_vectors_clean_up(dd); 15349 15349 msix_clean_up_interrupts(dd); 15350 15350 bail_cleanup: 15351 - hfi1_netdev_free(dd); 15351 + hfi1_free_rx(dd); 15352 15352 hfi1_pcie_ddcleanup(dd); 15353 15353 bail_free: 15354 15354 hfi1_free_devdata(dd);
+2 -2
drivers/infiniband/hw/hfi1/hfi.h
··· 69 69 #include <rdma/ib_hdrs.h> 70 70 #include <rdma/opa_addr.h> 71 71 #include <linux/rhashtable.h> 72 - #include <linux/netdevice.h> 73 72 #include <rdma/rdma_vt.h> 74 73 75 74 #include "chip_registers.h" ··· 1059 1060 #define SERIAL_MAX 16 /* length of the serial number */ 1060 1061 1061 1062 typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64); 1063 + struct hfi1_netdev_rx; 1062 1064 struct hfi1_devdata { 1063 1065 struct hfi1_ibdev verbs_dev; /* must be first */ 1064 1066 /* pointers to related structs for this device */ ··· 1402 1402 /* Lock to protect IRQ SRC register access */ 1403 1403 spinlock_t irq_src_lock; 1404 1404 int vnic_num_vports; 1405 - struct net_device *dummy_netdev; 1405 + struct hfi1_netdev_rx *netdev_rx; 1406 1406 1407 1407 /* Keeps track of IPoIB RSM rule users */ 1408 1408 atomic_t ipoib_rsm_usr_num;
+1 -1
drivers/infiniband/hw/hfi1/init.c
··· 1767 1767 hfi1_unregister_ib_device(dd); 1768 1768 1769 1769 /* free netdev data */ 1770 - hfi1_netdev_free(dd); 1770 + hfi1_free_rx(dd); 1771 1771 1772 1772 /* 1773 1773 * Disable the IB link, disable interrupts on the device,
+13 -26
drivers/infiniband/hw/hfi1/netdev.h
··· 14 14 15 15 /** 16 16 * struct hfi1_netdev_rxq - Receive Queue for HFI 17 - * dummy netdev. Both IPoIB and VNIC netdevices will be working on 18 - * top of this device. 17 + * Both IPoIB and VNIC netdevices will be working on the rx abstraction. 19 18 * @napi: napi object 20 - * @priv: ptr to netdev_priv 19 + * @rx: ptr to netdev_rx 21 20 * @rcd: ptr to receive context data 22 21 */ 23 22 struct hfi1_netdev_rxq { 24 23 struct napi_struct napi; 25 - struct hfi1_netdev_priv *priv; 24 + struct hfi1_netdev_rx *rx; 26 25 struct hfi1_ctxtdata *rcd; 27 26 }; 28 27 ··· 35 36 #define NUM_NETDEV_MAP_ENTRIES HFI1_MAX_NETDEV_CTXTS 36 37 37 38 /** 38 - * struct hfi1_netdev_priv: data required to setup and run HFI netdev. 39 + * struct hfi1_netdev_rx: data required to setup and run HFI netdev. 40 + * @rx_napi: the dummy netdevice to support "polling" the receive contexts 39 41 * @dd: hfi1_devdata 40 42 * @rxq: pointer to dummy netdev receive queues. 41 43 * @num_rx_q: number of receive queues ··· 48 48 * @netdevs: atomic counter of netdevs using dummy netdev. 49 49 * When 0 receive queues will be freed. 50 50 */ 51 - struct hfi1_netdev_priv { 51 + struct hfi1_netdev_rx { 52 + struct net_device rx_napi; 52 53 struct hfi1_devdata *dd; 53 54 struct hfi1_netdev_rxq *rxq; 54 55 int num_rx_q; ··· 62 61 }; 63 62 64 63 static inline 65 - struct hfi1_netdev_priv *hfi1_netdev_priv(struct net_device *dev) 66 - { 67 - return (struct hfi1_netdev_priv *)&dev[1]; 68 - } 69 - 70 - static inline 71 64 int hfi1_netdev_ctxt_count(struct hfi1_devdata *dd) 72 65 { 73 - struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev); 74 - 75 - return priv->num_rx_q; 66 + return dd->netdev_rx->num_rx_q; 76 67 } 77 68 78 69 static inline 79 70 struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt) 80 71 { 81 - struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev); 82 - 83 - return priv->rxq[ctxt].rcd; 72 + return dd->netdev_rx->rxq[ctxt].rcd; 84 73 } 85 74 86 75 static inline 87 76 int hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata *dd) 88 77 { 89 - struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev); 90 - 91 - return priv->rmt_start; 78 + return dd->netdev_rx->rmt_start; 92 79 } 93 80 94 81 static inline 95 82 void hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata *dd, int rmt_idx) 96 83 { 97 - struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev); 98 - 99 - priv->rmt_start = rmt_idx; 84 + dd->netdev_rx->rmt_start = rmt_idx; 100 85 } 101 86 102 87 u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts, ··· 92 105 void hfi1_netdev_disable_queues(struct hfi1_devdata *dd); 93 106 int hfi1_netdev_rx_init(struct hfi1_devdata *dd); 94 107 int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd); 95 - int hfi1_netdev_alloc(struct hfi1_devdata *dd); 96 - void hfi1_netdev_free(struct hfi1_devdata *dd); 108 + int hfi1_alloc_rx(struct hfi1_devdata *dd); 109 + void hfi1_free_rx(struct hfi1_devdata *dd); 97 110 int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data); 98 111 void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id); 99 112 void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id);
+86 -84
drivers/infiniband/hw/hfi1/netdev_rx.c
··· 17 17 #include <linux/etherdevice.h> 18 18 #include <rdma/ib_verbs.h> 19 19 20 - static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv *priv, 20 + static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx, 21 21 struct hfi1_ctxtdata *uctxt) 22 22 { 23 23 unsigned int rcvctrl_ops; 24 - struct hfi1_devdata *dd = priv->dd; 24 + struct hfi1_devdata *dd = rx->dd; 25 25 int ret; 26 26 27 27 uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions; ··· 118 118 hfi1_free_ctxt(uctxt); 119 119 } 120 120 121 - static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv, 121 + static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx, 122 122 struct hfi1_ctxtdata **ctxt) 123 123 { 124 124 int rc; 125 - struct hfi1_devdata *dd = priv->dd; 125 + struct hfi1_devdata *dd = rx->dd; 126 126 127 127 rc = hfi1_netdev_allocate_ctxt(dd, ctxt); 128 128 if (rc) { ··· 130 130 return rc; 131 131 } 132 132 133 - rc = hfi1_netdev_setup_ctxt(priv, *ctxt); 133 + rc = hfi1_netdev_setup_ctxt(rx, *ctxt); 134 134 if (rc) { 135 135 dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc); 136 136 hfi1_netdev_deallocate_ctxt(dd, *ctxt); ··· 184 184 (u32)HFI1_MAX_NETDEV_CTXTS); 185 185 } 186 186 187 - static int hfi1_netdev_rxq_init(struct net_device *dev) 187 + static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx) 188 188 { 189 189 int i; 190 190 int rc; 191 - struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev); 192 - struct hfi1_devdata *dd = priv->dd; 191 + struct hfi1_devdata *dd = rx->dd; 192 + struct net_device *dev = &rx->rx_napi; 193 193 194 - priv->num_rx_q = dd->num_netdev_contexts; 195 - priv->rxq = kcalloc_node(priv->num_rx_q, sizeof(struct hfi1_netdev_rxq), 196 - GFP_KERNEL, dd->node); 194 + rx->num_rx_q = dd->num_netdev_contexts; 195 + rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq), 196 + GFP_KERNEL, dd->node); 197 197 198 - if (!priv->rxq) { 198 + if (!rx->rxq) { 199 199 dd_dev_err(dd, "Unable to allocate netdev queue data\n"); 200 200 return (-ENOMEM); 201 201 } 202 202 203 - for (i = 0; i < priv->num_rx_q; i++) { 204 - struct hfi1_netdev_rxq *rxq = &priv->rxq[i]; 203 + for (i = 0; i < rx->num_rx_q; i++) { 204 + struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; 205 205 206 - rc = hfi1_netdev_allot_ctxt(priv, &rxq->rcd); 206 + rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd); 207 207 if (rc) 208 208 goto bail_context_irq_failure; 209 209 210 210 hfi1_rcd_get(rxq->rcd); 211 - rxq->priv = priv; 211 + rxq->rx = rx; 212 212 rxq->rcd->napi = &rxq->napi; 213 213 dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n", 214 214 i, rxq->rcd->ctxt); ··· 228 228 bail_context_irq_failure: 229 229 dd_dev_err(dd, "Unable to allot receive context\n"); 230 230 for (; i >= 0; i--) { 231 - struct hfi1_netdev_rxq *rxq = &priv->rxq[i]; 231 + struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; 232 232 233 233 if (rxq->rcd) { 234 234 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd); ··· 236 236 rxq->rcd = NULL; 237 237 } 238 238 } 239 - kfree(priv->rxq); 240 - priv->rxq = NULL; 239 + kfree(rx->rxq); 240 + rx->rxq = NULL; 241 241 242 242 return rc; 243 243 } 244 244 245 - static void hfi1_netdev_rxq_deinit(struct net_device *dev) 245 + static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx) 246 246 { 247 247 int i; 248 - struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev); 249 - struct hfi1_devdata *dd = priv->dd; 248 + struct hfi1_devdata *dd = rx->dd; 250 249 251 - for (i = 0; i < priv->num_rx_q; i++) { 252 - struct hfi1_netdev_rxq *rxq = &priv->rxq[i]; 250 + for (i = 0; i < rx->num_rx_q; i++) { 251 + struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; 253 252 254 253 netif_napi_del(&rxq->napi); 255 254 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd); ··· 256 257 rxq->rcd = NULL; 257 258 } 258 259 259 - kfree(priv->rxq); 260 - priv->rxq = NULL; 261 - priv->num_rx_q = 0; 260 + kfree(rx->rxq); 261 + rx->rxq = NULL; 262 + rx->num_rx_q = 0; 262 263 } 263 264 264 - static void enable_queues(struct hfi1_netdev_priv *priv) 265 + static void enable_queues(struct hfi1_netdev_rx *rx) 265 266 { 266 267 int i; 267 268 268 - for (i = 0; i < priv->num_rx_q; i++) { 269 - struct hfi1_netdev_rxq *rxq = &priv->rxq[i]; 269 + for (i = 0; i < rx->num_rx_q; i++) { 270 + struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; 270 271 271 - dd_dev_info(priv->dd, "enabling queue %d on context %d\n", i, 272 + dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i, 272 273 rxq->rcd->ctxt); 273 274 napi_enable(&rxq->napi); 274 - hfi1_rcvctrl(priv->dd, 275 + hfi1_rcvctrl(rx->dd, 275 276 HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB, 276 277 rxq->rcd); 277 278 } 278 279 } 279 280 280 - static void disable_queues(struct hfi1_netdev_priv *priv) 281 + static void disable_queues(struct hfi1_netdev_rx *rx) 281 282 { 282 283 int i; 283 284 284 - msix_netdev_synchronize_irq(priv->dd); 285 + msix_netdev_synchronize_irq(rx->dd); 285 286 286 - for (i = 0; i < priv->num_rx_q; i++) { 287 - struct hfi1_netdev_rxq *rxq = &priv->rxq[i]; 287 + for (i = 0; i < rx->num_rx_q; i++) { 288 + struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; 288 289 289 - dd_dev_info(priv->dd, "disabling queue %d on context %d\n", i, 290 + dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i, 290 291 rxq->rcd->ctxt); 291 292 292 293 /* wait for napi if it was scheduled */ 293 - hfi1_rcvctrl(priv->dd, 294 + hfi1_rcvctrl(rx->dd, 294 295 HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS, 295 296 rxq->rcd); 296 297 napi_synchronize(&rxq->napi); ··· 307 308 */ 308 309 int hfi1_netdev_rx_init(struct hfi1_devdata *dd) 309 310 { 310 - struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev); 311 + struct hfi1_netdev_rx *rx = dd->netdev_rx; 311 312 int res; 312 313 313 - if (atomic_fetch_inc(&priv->netdevs)) 314 + if (atomic_fetch_inc(&rx->netdevs)) 314 315 return 0; 315 316 316 317 mutex_lock(&hfi1_mutex); 317 - init_dummy_netdev(dd->dummy_netdev); 318 - res = hfi1_netdev_rxq_init(dd->dummy_netdev); 318 + res = hfi1_netdev_rxq_init(rx); 319 319 mutex_unlock(&hfi1_mutex); 320 320 return res; 321 321 } ··· 327 329 */ 328 330 int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd) 329 331 { 330 - struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev); 332 + struct hfi1_netdev_rx *rx = dd->netdev_rx; 331 333 332 334 /* destroy the RX queues only if it is the last netdev going away */ 333 - if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) { 335 + if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) { 334 336 mutex_lock(&hfi1_mutex); 335 - hfi1_netdev_rxq_deinit(dd->dummy_netdev); 337 + hfi1_netdev_rxq_deinit(rx); 336 338 mutex_unlock(&hfi1_mutex); 337 339 } 338 340 ··· 340 342 } 341 343 342 344 /** 343 - * hfi1_netdev_alloc - Allocates netdev and private data. It is required 344 - * because RMT index and MSI-X interrupt can be set only 345 - * during driver initialization. 346 - * 345 + * hfi1_alloc_rx - Allocates the rx support structure 347 346 * @dd: hfi1 dev data 347 + * 348 + * Allocate the rx structure to support gathering the receive 349 + * resources and the dummy netdev. 350 + * 351 + * Updates dd struct pointer upon success. 352 + * 353 + * Return: 0 (success) -error on failure 354 + * 348 355 */ 349 - int hfi1_netdev_alloc(struct hfi1_devdata *dd) 356 + int hfi1_alloc_rx(struct hfi1_devdata *dd) 350 357 { 351 - struct hfi1_netdev_priv *priv; 352 - const int netdev_size = sizeof(*dd->dummy_netdev) + 353 - sizeof(struct hfi1_netdev_priv); 358 + struct hfi1_netdev_rx *rx; 354 359 355 - dd_dev_info(dd, "allocating netdev size %d\n", netdev_size); 356 - dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node); 360 + dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx)); 361 + rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node); 357 362 358 - if (!dd->dummy_netdev) 363 + if (!rx) 359 364 return -ENOMEM; 365 + rx->dd = dd; 366 + init_dummy_netdev(&rx->rx_napi); 360 367 361 - priv = hfi1_netdev_priv(dd->dummy_netdev); 362 - priv->dd = dd; 363 - xa_init(&priv->dev_tbl); 364 - atomic_set(&priv->enabled, 0); 365 - atomic_set(&priv->netdevs, 0); 368 + xa_init(&rx->dev_tbl); 369 + atomic_set(&rx->enabled, 0); 370 + atomic_set(&rx->netdevs, 0); 371 + dd->netdev_rx = rx; 366 372 367 373 return 0; 368 374 } 369 375 370 - void hfi1_netdev_free(struct hfi1_devdata *dd) 376 + void hfi1_free_rx(struct hfi1_devdata *dd) 371 377 { 372 - if (dd->dummy_netdev) { 373 - dd_dev_info(dd, "hfi1 netdev freed\n"); 374 - kfree(dd->dummy_netdev); 375 - dd->dummy_netdev = NULL; 378 + if (dd->netdev_rx) { 379 + dd_dev_info(dd, "hfi1 rx freed\n"); 380 + kfree(dd->netdev_rx); 381 + dd->netdev_rx = NULL; 376 382 } 377 383 } 378 384 ··· 391 389 */ 392 390 void hfi1_netdev_enable_queues(struct hfi1_devdata *dd) 393 391 { 394 - struct hfi1_netdev_priv *priv; 392 + struct hfi1_netdev_rx *rx; 395 393 396 - if (!dd->dummy_netdev) 394 + if (!dd->netdev_rx) 397 395 return; 398 396 399 - priv = hfi1_netdev_priv(dd->dummy_netdev); 400 - if (atomic_fetch_inc(&priv->enabled)) 397 + rx = dd->netdev_rx; 398 + if (atomic_fetch_inc(&rx->enabled)) 401 399 return; 402 400 403 401 mutex_lock(&hfi1_mutex); 404 - enable_queues(priv); 402 + enable_queues(rx); 405 403 mutex_unlock(&hfi1_mutex); 406 404 } 407 405 408 406 void hfi1_netdev_disable_queues(struct hfi1_devdata *dd) 409 407 { 410 - struct hfi1_netdev_priv *priv; 408 + struct hfi1_netdev_rx *rx; 411 409 412 - if (!dd->dummy_netdev) 410 + if (!dd->netdev_rx) 413 411 return; 414 412 415 - priv = hfi1_netdev_priv(dd->dummy_netdev); 416 - if (atomic_dec_if_positive(&priv->enabled)) 413 + rx = dd->netdev_rx; 414 + if (atomic_dec_if_positive(&rx->enabled)) 417 415 return; 418 416 419 417 mutex_lock(&hfi1_mutex); 420 - disable_queues(priv); 418 + disable_queues(rx); 421 419 mutex_unlock(&hfi1_mutex); 422 420 } 423 421 ··· 433 431 */ 434 432 int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data) 435 433 { 436 - struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev); 434 + struct hfi1_netdev_rx *rx = dd->netdev_rx; 437 435 438 - return xa_insert(&priv->dev_tbl, id, data, GFP_NOWAIT); 436 + return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT); 439 437 } 440 438 441 439 /** ··· 447 445 */ 448 446 void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id) 449 447 { 450 - struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev); 448 + struct hfi1_netdev_rx *rx = dd->netdev_rx; 451 449 452 - return xa_erase(&priv->dev_tbl, id); 450 + return xa_erase(&rx->dev_tbl, id); 453 451 } 454 452 455 453 /** ··· 460 458 */ 461 459 void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id) 462 460 { 463 - struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev); 461 + struct hfi1_netdev_rx *rx = dd->netdev_rx; 464 462 465 - return xa_load(&priv->dev_tbl, id); 463 + return xa_load(&rx->dev_tbl, id); 466 464 } 467 465 468 466 /** ··· 473 471 */ 474 472 void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id) 475 473 { 476 - struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev); 474 + struct hfi1_netdev_rx *rx = dd->netdev_rx; 477 475 unsigned long index = *start_id; 478 476 void *ret; 479 477 480 - ret = xa_find(&priv->dev_tbl, &index, UINT_MAX, XA_PRESENT); 478 + ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT); 481 479 *start_id = (int)index; 482 480 return ret; 483 481 }