Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: flow_offload: fix flow_indr_dev_unregister path

If the representor is removed, then identify the indirect flow_blocks
that need to be removed by the release callback and the port representor
structure. To identify the port representor structure, a new
indr.cb_priv field needs to be introduced. The flow_block also needs to
be removed from the driver list from the cleanup path.

Fixes: 1fac52da5942 ("net: flow_offload: consolidate indirect flow_block infrastructure")

Signed-off-by: wenxu <wenxu@ucloud.cn>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

wenxu and committed by
David S. Miller
a1db2178 66f1939a

+27 -18
+2 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
··· 1911 1911 block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, 1912 1912 cb_priv, cb_priv, 1913 1913 bnxt_tc_setup_indr_rel, f, 1914 - netdev, data, cleanup); 1914 + netdev, data, bp, cleanup); 1915 1915 if (IS_ERR(block_cb)) { 1916 1916 list_del(&cb_priv->list); 1917 1917 kfree(cb_priv); ··· 2079 2079 return; 2080 2080 2081 2081 flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp, 2082 - bnxt_tc_setup_indr_block_cb); 2082 + bnxt_tc_setup_indr_rel); 2083 2083 rhashtable_destroy(&tc_info->flow_table); 2084 2084 rhashtable_destroy(&tc_info->l2_table); 2085 2085 rhashtable_destroy(&tc_info->decap_l2_table);
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
··· 442 442 443 443 block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv, 444 444 mlx5e_rep_indr_block_unbind, 445 - f, netdev, data, cleanup); 445 + f, netdev, data, rpriv, 446 + cleanup); 446 447 if (IS_ERR(block_cb)) { 447 448 list_del(&indr_priv->list); 448 449 kfree(indr_priv); ··· 504 503 void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) 505 504 { 506 505 flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv, 507 - mlx5e_rep_indr_setup_tc_cb); 506 + mlx5e_rep_indr_block_unbind); 508 507 } 509 508 510 509 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+1 -1
drivers/net/ethernet/netronome/nfp/flower/main.c
··· 861 861 flush_work(&app_priv->cmsg_work); 862 862 863 863 flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app, 864 - nfp_flower_setup_indr_block_cb); 864 + nfp_flower_setup_indr_tc_release); 865 865 866 866 if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) 867 867 nfp_flower_qos_cleanup(app);
+1 -2
drivers/net/ethernet/netronome/nfp/flower/main.h
··· 462 462 enum tc_setup_type type, void *type_data, 463 463 void *data, 464 464 void (*cleanup)(struct flow_block_cb *block_cb)); 465 - int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data, 466 - void *cb_priv); 465 + void nfp_flower_setup_indr_tc_release(void *cb_priv); 467 466 468 467 void 469 468 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
+4 -4
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 1619 1619 return NULL; 1620 1620 } 1621 1621 1622 - int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, 1623 - void *type_data, void *cb_priv) 1622 + static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, 1623 + void *type_data, void *cb_priv) 1624 1624 { 1625 1625 struct nfp_flower_indr_block_cb_priv *priv = cb_priv; 1626 1626 struct flow_cls_offload *flower = type_data; ··· 1637 1637 } 1638 1638 } 1639 1639 1640 - static void nfp_flower_setup_indr_tc_release(void *cb_priv) 1640 + void nfp_flower_setup_indr_tc_release(void *cb_priv) 1641 1641 { 1642 1642 struct nfp_flower_indr_block_cb_priv *priv = cb_priv; 1643 1643 ··· 1680 1680 block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb, 1681 1681 cb_priv, cb_priv, 1682 1682 nfp_flower_setup_indr_tc_release, 1683 - f, netdev, data, cleanup); 1683 + f, netdev, data, app, cleanup); 1684 1684 if (IS_ERR(block_cb)) { 1685 1685 list_del(&cb_priv->list); 1686 1686 kfree(cb_priv);
+3 -1
include/net/flow_offload.h
··· 450 450 struct net_device *dev; 451 451 enum flow_block_binder_type binder_type; 452 452 void *data; 453 + void *cb_priv; 453 454 void (*cleanup)(struct flow_block_cb *block_cb); 454 455 }; 455 456 ··· 473 472 void (*release)(void *cb_priv), 474 473 struct flow_block_offload *bo, 475 474 struct net_device *dev, void *data, 475 + void *indr_cb_priv, 476 476 void (*cleanup)(struct flow_block_cb *block_cb)); 477 477 void flow_block_cb_free(struct flow_block_cb *block_cb); 478 478 ··· 553 551 554 552 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv); 555 553 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, 556 - flow_setup_cb_t *setup_cb); 554 + void (*release)(void *cb_priv)); 557 555 int flow_indr_dev_setup_offload(struct net_device *dev, 558 556 enum tc_setup_type type, void *data, 559 557 struct flow_block_offload *bo,
+10 -6
net/core/flow_offload.c
··· 372 372 } 373 373 EXPORT_SYMBOL(flow_indr_dev_register); 374 374 375 - static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv, 375 + static void __flow_block_indr_cleanup(void (*release)(void *cb_priv), 376 + void *cb_priv, 376 377 struct list_head *cleanup_list) 377 378 { 378 379 struct flow_block_cb *this, *next; 379 380 380 381 list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) { 381 - if (this->cb == setup_cb && 382 - this->cb_priv == cb_priv) { 382 + if (this->release == release && 383 + this->indr.cb_priv == cb_priv) { 383 384 list_move(&this->indr.list, cleanup_list); 384 385 return; 385 386 } ··· 398 397 } 399 398 400 399 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, 401 - flow_setup_cb_t *setup_cb) 400 + void (*release)(void *cb_priv)) 402 401 { 403 402 struct flow_indr_dev *this, *next, *indr_dev = NULL; 404 403 LIST_HEAD(cleanup_list); ··· 419 418 return; 420 419 } 421 420 422 - __flow_block_indr_cleanup(setup_cb, cb_priv, &cleanup_list); 421 + __flow_block_indr_cleanup(release, cb_priv, &cleanup_list); 423 422 mutex_unlock(&flow_indr_block_lock); 424 423 425 424 flow_block_indr_notify(&cleanup_list); ··· 430 429 static void flow_block_indr_init(struct flow_block_cb *flow_block, 431 430 struct flow_block_offload *bo, 432 431 struct net_device *dev, void *data, 432 + void *cb_priv, 433 433 void (*cleanup)(struct flow_block_cb *block_cb)) 434 434 { 435 435 flow_block->indr.binder_type = bo->binder_type; 436 436 flow_block->indr.data = data; 437 + flow_block->indr.cb_priv = cb_priv; 437 438 flow_block->indr.dev = dev; 438 439 flow_block->indr.cleanup = cleanup; 439 440 } ··· 445 442 void (*release)(void *cb_priv), 446 443 struct flow_block_offload *bo, 447 444 struct net_device *dev, void *data, 445 + void *indr_cb_priv, 448 446 void (*cleanup)(struct flow_block_cb *block_cb)) 449 447 { 450 448 struct flow_block_cb *block_cb; ··· 454 450 if (IS_ERR(block_cb)) 455 451 goto out; 456 452 457 - flow_block_indr_init(block_cb, bo, dev, data, cleanup); 453 + flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup); 458 454 list_add(&block_cb->indr.list, &flow_block_indr_list); 459 455 460 456 out:
+1
net/netfilter/nf_flow_table_offload.c
··· 950 950 nf_flow_table_gc_cleanup(flowtable, dev); 951 951 down_write(&flowtable->flow_block_lock); 952 952 list_del(&block_cb->list); 953 + list_del(&block_cb->driver_list); 953 954 flow_block_cb_free(block_cb); 954 955 up_write(&flowtable->flow_block_lock); 955 956 }
+1
net/netfilter/nf_tables_offload.c
··· 296 296 nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND, 297 297 basechain, &extack); 298 298 mutex_lock(&net->nft.commit_mutex); 299 + list_del(&block_cb->driver_list); 299 300 list_move(&block_cb->list, &bo.cb_list); 300 301 nft_flow_offload_unbind(&bo, basechain); 301 302 mutex_unlock(&net->nft.commit_mutex);
+1
net/sched/cls_api.c
··· 652 652 &block->flow_block, tcf_block_shared(block), 653 653 &extack); 654 654 down_write(&block->cb_lock); 655 + list_del(&block_cb->driver_list); 655 656 list_move(&block_cb->list, &bo.cb_list); 656 657 up_write(&block->cb_lock); 657 658 rtnl_lock();