Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: core: rename indirect block ingress cb function

With indirect blocks, a driver can register for callbacks from a device
that is does not 'own', for example, a tunnel device. When registering to
or unregistering from a new device, a callback is triggered to generate
a bind/unbind event. This, in turn, allows the driver to receive any
existing rules or to properly clean up installed rules.

When first added, it was assumed that all indirect block registrations
would be for ingress offloads. However, the NFP driver can, in some
instances, support clsact qdisc binds for egress offload.

Change the name of the indirect block callback command in flow_offload to
remove the 'ingress' identifier from it. While this does not change
functionality, a follow up patch will implement a more more generic
callback than just those currently just supporting ingress offload.

Fixes: 4d12ba42787b ("nfp: flower: allow offloading of matches on 'internal' ports")
Signed-off-by: John Hurley <john.hurley@netronome.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

John Hurley and committed by
David S. Miller
dbad3408 e0b60903

+34 -36
+7 -8
include/net/flow_offload.h
··· 380 380 typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, 381 381 enum tc_setup_type type, void *type_data); 382 382 383 - typedef void flow_indr_block_ing_cmd_t(struct net_device *dev, 384 - flow_indr_block_bind_cb_t *cb, 385 - void *cb_priv, 386 - enum flow_block_command command); 383 + typedef void flow_indr_block_cmd_t(struct net_device *dev, 384 + flow_indr_block_bind_cb_t *cb, void *cb_priv, 385 + enum flow_block_command command); 387 386 388 - struct flow_indr_block_ing_entry { 389 - flow_indr_block_ing_cmd_t *cb; 387 + struct flow_indr_block_entry { 388 + flow_indr_block_cmd_t *cb; 390 389 struct list_head list; 391 390 }; 392 391 393 - void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry); 392 + void flow_indr_add_block_cb(struct flow_indr_block_entry *entry); 394 393 395 - void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry); 394 + void flow_indr_del_block_cb(struct flow_indr_block_entry *entry); 396 395 397 396 int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, 398 397 flow_indr_block_bind_cb_t *cb,
+22 -23
net/core/flow_offload.c
··· 283 283 } 284 284 EXPORT_SYMBOL(flow_block_cb_setup_simple); 285 285 286 - static LIST_HEAD(block_ing_cb_list); 286 + static LIST_HEAD(block_cb_list); 287 287 288 288 static struct rhashtable indr_setup_block_ht; 289 289 ··· 391 391 kfree(indr_block_cb); 392 392 } 393 393 394 - static DEFINE_MUTEX(flow_indr_block_ing_cb_lock); 394 + static DEFINE_MUTEX(flow_indr_block_cb_lock); 395 395 396 - static void flow_block_ing_cmd(struct net_device *dev, 397 - flow_indr_block_bind_cb_t *cb, 398 - void *cb_priv, 399 - enum flow_block_command command) 396 + static void flow_block_cmd(struct net_device *dev, 397 + flow_indr_block_bind_cb_t *cb, void *cb_priv, 398 + enum flow_block_command command) 400 399 { 401 - struct flow_indr_block_ing_entry *entry; 400 + struct flow_indr_block_entry *entry; 402 401 403 - mutex_lock(&flow_indr_block_ing_cb_lock); 404 - list_for_each_entry(entry, &block_ing_cb_list, list) { 402 + mutex_lock(&flow_indr_block_cb_lock); 403 + list_for_each_entry(entry, &block_cb_list, list) { 405 404 entry->cb(dev, cb, cb_priv, command); 406 405 } 407 - mutex_unlock(&flow_indr_block_ing_cb_lock); 406 + mutex_unlock(&flow_indr_block_cb_lock); 408 407 } 409 408 410 409 int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, ··· 423 424 if (err) 424 425 goto err_dev_put; 425 426 426 - flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, 427 - FLOW_BLOCK_BIND); 427 + flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, 428 + FLOW_BLOCK_BIND); 428 429 429 430 return 0; 430 431 ··· 463 464 if (!indr_block_cb) 464 465 return; 465 466 466 - flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, 467 - FLOW_BLOCK_UNBIND); 467 + flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, 468 + FLOW_BLOCK_UNBIND); 468 469 469 470 flow_indr_block_cb_del(indr_block_cb); 470 471 flow_indr_block_dev_put(indr_dev); ··· 498 499 } 499 500 EXPORT_SYMBOL_GPL(flow_indr_block_call); 500 501 501 - void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry) 502 + void flow_indr_add_block_cb(struct flow_indr_block_entry *entry) 502 503 { 503 - mutex_lock(&flow_indr_block_ing_cb_lock); 504 - list_add_tail(&entry->list, &block_ing_cb_list); 505 - mutex_unlock(&flow_indr_block_ing_cb_lock); 504 + mutex_lock(&flow_indr_block_cb_lock); 505 + list_add_tail(&entry->list, &block_cb_list); 506 + mutex_unlock(&flow_indr_block_cb_lock); 506 507 } 507 - EXPORT_SYMBOL_GPL(flow_indr_add_block_ing_cb); 508 + EXPORT_SYMBOL_GPL(flow_indr_add_block_cb); 508 509 509 - void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry) 510 + void flow_indr_del_block_cb(struct flow_indr_block_entry *entry) 510 511 { 511 - mutex_lock(&flow_indr_block_ing_cb_lock); 512 + mutex_lock(&flow_indr_block_cb_lock); 512 513 list_del(&entry->list); 513 - mutex_unlock(&flow_indr_block_ing_cb_lock); 514 + mutex_unlock(&flow_indr_block_cb_lock); 514 515 } 515 - EXPORT_SYMBOL_GPL(flow_indr_del_block_ing_cb); 516 + EXPORT_SYMBOL_GPL(flow_indr_del_block_cb); 516 517 517 518 static int __init init_flow_indr_rhashtable(void) 518 519 {
+3 -3
net/netfilter/nf_tables_offload.c
··· 588 588 return NOTIFY_DONE; 589 589 } 590 590 591 - static struct flow_indr_block_ing_entry block_ing_entry = { 591 + static struct flow_indr_block_entry block_ing_entry = { 592 592 .cb = nft_indr_block_cb, 593 593 .list = LIST_HEAD_INIT(block_ing_entry.list), 594 594 }; ··· 605 605 if (err < 0) 606 606 return err; 607 607 608 - flow_indr_add_block_ing_cb(&block_ing_entry); 608 + flow_indr_add_block_cb(&block_ing_entry); 609 609 610 610 return 0; 611 611 } 612 612 613 613 void nft_offload_exit(void) 614 614 { 615 - flow_indr_del_block_ing_cb(&block_ing_entry); 615 + flow_indr_del_block_cb(&block_ing_entry); 616 616 unregister_netdevice_notifier(&nft_offload_netdev_notifier); 617 617 }
+2 -2
net/sched/cls_api.c
··· 3626 3626 .size = sizeof(struct tcf_net), 3627 3627 }; 3628 3628 3629 - static struct flow_indr_block_ing_entry block_ing_entry = { 3629 + static struct flow_indr_block_entry block_ing_entry = { 3630 3630 .cb = tc_indr_block_get_and_ing_cmd, 3631 3631 .list = LIST_HEAD_INIT(block_ing_entry.list), 3632 3632 }; ··· 3643 3643 if (err) 3644 3644 goto err_register_pernet_subsys; 3645 3645 3646 - flow_indr_add_block_ing_cb(&block_ing_entry); 3646 + flow_indr_add_block_cb(&block_ing_entry); 3647 3647 3648 3648 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 3649 3649 RTNL_FLAG_DOIT_UNLOCKED);