Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: sched: Pass qdisc reference in struct flow_block_offload

Previously, shared blocks were only relevant for the pseudo-qdiscs ingress
and clsact. Recently, a qevent facility was introduced, which allows to
bind blocks to well-defined slots of a qdisc instance. RED in particular
got two qevents: early_drop and mark. Drivers that wish to offload these
blocks will be sent the usual notification, and need to know which qdisc it
is related to.

To that end, extend flow_block_offload with a "sch" pointer, and initialize
as appropriate. This prompts changes in the indirect block facility, which
now tracks the scheduler in addition to the netdevice. Update signatures of
several functions similarly.

Signed-off-by: Petr Machata <petrm@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Petr Machata and committed by
David S. Miller
c40f4e50 e1d82f7a

+38 -32
+4 -5
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
··· 1888 1888 kfree(priv); 1889 1889 } 1890 1890 1891 - static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, 1891 + static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp, 1892 1892 struct flow_block_offload *f, void *data, 1893 1893 void (*cleanup)(struct flow_block_cb *block_cb)) 1894 1894 { ··· 1911 1911 block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, 1912 1912 cb_priv, cb_priv, 1913 1913 bnxt_tc_setup_indr_rel, f, 1914 - netdev, data, bp, cleanup); 1914 + netdev, sch, data, bp, cleanup); 1915 1915 if (IS_ERR(block_cb)) { 1916 1916 list_del(&cb_priv->list); 1917 1917 kfree(cb_priv); ··· 1946 1946 return netif_is_vxlan(netdev); 1947 1947 } 1948 1948 1949 - static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv, 1949 + static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, 1950 1950 enum tc_setup_type type, void *type_data, 1951 1951 void *data, 1952 1952 void (*cleanup)(struct flow_block_cb *block_cb)) ··· 1956 1956 1957 1957 switch (type) { 1958 1958 case TC_SETUP_BLOCK: 1959 - return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data, 1960 - cleanup); 1959 + return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, cleanup); 1961 1960 default: 1962 1961 break; 1963 1962 }
+5 -5
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
··· 404 404 static LIST_HEAD(mlx5e_block_cb_list); 405 405 406 406 static int 407 - mlx5e_rep_indr_setup_block(struct net_device *netdev, 407 + mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, 408 408 struct mlx5e_rep_priv *rpriv, 409 409 struct flow_block_offload *f, 410 410 flow_setup_cb_t *setup_cb, ··· 442 442 443 443 block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv, 444 444 mlx5e_rep_indr_block_unbind, 445 - f, netdev, data, rpriv, 445 + f, netdev, sch, data, rpriv, 446 446 cleanup); 447 447 if (IS_ERR(block_cb)) { 448 448 list_del(&indr_priv->list); ··· 472 472 } 473 473 474 474 static 475 - int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv, 475 + int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, 476 476 enum tc_setup_type type, void *type_data, 477 477 void *data, 478 478 void (*cleanup)(struct flow_block_cb *block_cb)) 479 479 { 480 480 switch (type) { 481 481 case TC_SETUP_BLOCK: 482 - return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, 482 + return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data, 483 483 mlx5e_rep_indr_setup_tc_cb, 484 484 data, cleanup); 485 485 case TC_SETUP_FT: 486 - return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, 486 + return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data, 487 487 mlx5e_rep_indr_setup_ft_cb, 488 488 data, cleanup); 489 489 default:
+1 -1
drivers/net/ethernet/netronome/nfp/flower/main.h
··· 458 458 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, 459 459 struct tc_cls_matchall_offload *flow); 460 460 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb); 461 - int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, 461 + int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, 462 462 enum tc_setup_type type, void *type_data, 463 463 void *data, 464 464 void (*cleanup)(struct flow_block_cb *block_cb));
+4 -4
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 1646 1646 } 1647 1647 1648 1648 static int 1649 - nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, 1649 + nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app, 1650 1650 struct flow_block_offload *f, void *data, 1651 1651 void (*cleanup)(struct flow_block_cb *block_cb)) 1652 1652 { ··· 1680 1680 block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb, 1681 1681 cb_priv, cb_priv, 1682 1682 nfp_flower_setup_indr_tc_release, 1683 - f, netdev, data, app, cleanup); 1683 + f, netdev, sch, data, app, cleanup); 1684 1684 if (IS_ERR(block_cb)) { 1685 1685 list_del(&cb_priv->list); 1686 1686 kfree(cb_priv); ··· 1711 1711 } 1712 1712 1713 1713 int 1714 - nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, 1714 + nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, 1715 1715 enum tc_setup_type type, void *type_data, 1716 1716 void *data, 1717 1717 void (*cleanup)(struct flow_block_cb *block_cb)) ··· 1721 1721 1722 1722 switch (type) { 1723 1723 case TC_SETUP_BLOCK: 1724 - return nfp_flower_setup_indr_tc_block(netdev, cb_priv, 1724 + return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv, 1725 1725 type_data, data, cleanup); 1726 1726 default: 1727 1727 return -EOPNOTSUPP;
+6 -3
include/net/flow_offload.h
··· 444 444 struct list_head cb_list; 445 445 struct list_head *driver_block_list; 446 446 struct netlink_ext_ack *extack; 447 + struct Qdisc *sch; 447 448 }; 448 449 449 450 enum tc_setup_type; ··· 456 455 struct flow_block_indr { 457 456 struct list_head list; 458 457 struct net_device *dev; 458 + struct Qdisc *sch; 459 459 enum flow_block_binder_type binder_type; 460 460 void *data; 461 461 void *cb_priv; ··· 481 479 void *cb_ident, void *cb_priv, 482 480 void (*release)(void *cb_priv), 483 481 struct flow_block_offload *bo, 484 - struct net_device *dev, void *data, 482 + struct net_device *dev, 483 + struct Qdisc *sch, void *data, 485 484 void *indr_cb_priv, 486 485 void (*cleanup)(struct flow_block_cb *block_cb)); 487 486 void flow_block_cb_free(struct flow_block_cb *block_cb); ··· 556 553 INIT_LIST_HEAD(&flow_block->cb_list); 557 554 } 558 555 559 - typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, 556 + typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv, 560 557 enum tc_setup_type type, void *type_data, 561 558 void *data, 562 559 void (*cleanup)(struct flow_block_cb *block_cb)); ··· 564 561 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv); 565 562 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, 566 563 void (*release)(void *cb_priv)); 567 - int flow_indr_dev_setup_offload(struct net_device *dev, 564 + int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, 568 565 enum tc_setup_type type, void *data, 569 566 struct flow_block_offload *bo, 570 567 void (*cleanup)(struct flow_block_cb *block_cb));
+7 -5
net/core/flow_offload.c
··· 429 429 430 430 static void flow_block_indr_init(struct flow_block_cb *flow_block, 431 431 struct flow_block_offload *bo, 432 - struct net_device *dev, void *data, 432 + struct net_device *dev, struct Qdisc *sch, void *data, 433 433 void *cb_priv, 434 434 void (*cleanup)(struct flow_block_cb *block_cb)) 435 435 { ··· 437 437 flow_block->indr.data = data; 438 438 flow_block->indr.cb_priv = cb_priv; 439 439 flow_block->indr.dev = dev; 440 + flow_block->indr.sch = sch; 440 441 flow_block->indr.cleanup = cleanup; 441 442 } 442 443 ··· 445 444 void *cb_ident, void *cb_priv, 446 445 void (*release)(void *cb_priv), 447 446 struct flow_block_offload *bo, 448 - struct net_device *dev, void *data, 447 + struct net_device *dev, 448 + struct Qdisc *sch, void *data, 449 449 void *indr_cb_priv, 450 450 void (*cleanup)(struct flow_block_cb *block_cb)) 451 451 { ··· 456 454 if (IS_ERR(block_cb)) 457 455 goto out; 458 456 459 - flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup); 457 + flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup); 460 458 list_add(&block_cb->indr.list, &flow_block_indr_list); 461 459 462 460 out: ··· 464 462 } 465 463 EXPORT_SYMBOL(flow_indr_block_cb_alloc); 466 464 467 - int flow_indr_dev_setup_offload(struct net_device *dev, 465 + int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, 468 466 enum tc_setup_type type, void *data, 469 467 struct flow_block_offload *bo, 470 468 void (*cleanup)(struct flow_block_cb *block_cb)) ··· 473 471 474 472 mutex_lock(&flow_indr_block_lock); 475 473 list_for_each_entry(this, &flow_block_indr_dev_list, list) 476 - this->cb(dev, this->cb_priv, type, bo, data, cleanup); 474 + this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); 477 475 478 476 mutex_unlock(&flow_indr_block_lock); 479 477
+1 -1
net/netfilter/nf_flow_table_offload.c
··· 964 964 nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable, 965 965 extack); 966 966 967 - return flow_indr_dev_setup_offload(dev, TC_SETUP_FT, flowtable, bo, 967 + return flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_FT, flowtable, bo, 968 968 nf_flow_table_indr_cleanup); 969 969 } 970 970
+1 -1
net/netfilter/nf_tables_offload.c
··· 312 312 313 313 nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack); 314 314 315 - err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, basechain, &bo, 315 + err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo, 316 316 nft_indr_block_cleanup); 317 317 if (err < 0) 318 318 return err;
+9 -7
net/sched/cls_api.c
··· 622 622 struct flow_block_offload *bo); 623 623 624 624 static void tcf_block_offload_init(struct flow_block_offload *bo, 625 - struct net_device *dev, 625 + struct net_device *dev, struct Qdisc *sch, 626 626 enum flow_block_command command, 627 627 enum flow_block_binder_type binder_type, 628 628 struct flow_block *flow_block, ··· 634 634 bo->block = flow_block; 635 635 bo->block_shared = shared; 636 636 bo->extack = extack; 637 + bo->sch = sch; 637 638 INIT_LIST_HEAD(&bo->cb_list); 638 639 } 639 640 ··· 645 644 { 646 645 struct tcf_block *block = block_cb->indr.data; 647 646 struct net_device *dev = block_cb->indr.dev; 647 + struct Qdisc *sch = block_cb->indr.sch; 648 648 struct netlink_ext_ack extack = {}; 649 649 struct flow_block_offload bo; 650 650 651 - tcf_block_offload_init(&bo, dev, FLOW_BLOCK_UNBIND, 651 + tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND, 652 652 block_cb->indr.binder_type, 653 653 &block->flow_block, tcf_block_shared(block), 654 654 &extack); ··· 668 666 } 669 667 670 668 static int tcf_block_offload_cmd(struct tcf_block *block, 671 - struct net_device *dev, 669 + struct net_device *dev, struct Qdisc *sch, 672 670 struct tcf_block_ext_info *ei, 673 671 enum flow_block_command command, 674 672 struct netlink_ext_ack *extack) 675 673 { 676 674 struct flow_block_offload bo = {}; 677 675 678 - tcf_block_offload_init(&bo, dev, command, ei->binder_type, 676 + tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type, 679 677 &block->flow_block, tcf_block_shared(block), 680 678 extack); 681 679 ··· 692 690 return tcf_block_setup(block, &bo); 693 691 } 694 692 695 - flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, &bo, 693 + flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, 696 694 tc_block_indr_cleanup); 697 695 tcf_block_setup(block, &bo); 698 696 ··· 719 717 goto err_unlock; 720 718 } 721 719 722 - err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); 720 + err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); 723 721 if (err == -EOPNOTSUPP) 724 722 goto no_offload_dev_inc; 725 723 if (err) ··· 746 744 int err; 747 745 748 746 down_write(&block->cb_lock); 749 - err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 747 + err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); 750 748 if (err == -EOPNOTSUPP) 751 749 goto no_offload_dev_dec; 752 750 up_write(&block->cb_lock);