at master 18 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * net/sched/act_mirred.c packet mirroring and redirect actions 4 * 5 * Authors: Jamal Hadi Salim (2002-4) 6 * 7 * TODO: Add ingress support (and socket redirect support) 8 */ 9 10#include <linux/types.h> 11#include <linux/kernel.h> 12#include <linux/string.h> 13#include <linux/errno.h> 14#include <linux/skbuff.h> 15#include <linux/rtnetlink.h> 16#include <linux/module.h> 17#include <linux/init.h> 18#include <linux/gfp.h> 19#include <linux/if_arp.h> 20#include <net/net_namespace.h> 21#include <net/netlink.h> 22#include <net/dst.h> 23#include <net/pkt_sched.h> 24#include <net/pkt_cls.h> 25#include <linux/tc_act/tc_mirred.h> 26#include <net/tc_act/tc_mirred.h> 27#include <net/tc_wrapper.h> 28 29static LIST_HEAD(mirred_list); 30static DEFINE_SPINLOCK(mirred_list_lock); 31 32static bool tcf_mirred_is_act_redirect(int action) 33{ 34 return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR; 35} 36 37static bool tcf_mirred_act_wants_ingress(int action) 38{ 39 switch (action) { 40 case TCA_EGRESS_REDIR: 41 case TCA_EGRESS_MIRROR: 42 return false; 43 case TCA_INGRESS_REDIR: 44 case TCA_INGRESS_MIRROR: 45 return true; 46 default: 47 BUG(); 48 } 49} 50 51static bool tcf_mirred_can_reinsert(int action) 52{ 53 switch (action) { 54 case TC_ACT_SHOT: 55 case TC_ACT_STOLEN: 56 case TC_ACT_QUEUED: 57 case TC_ACT_TRAP: 58 return true; 59 } 60 return false; 61} 62 63static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m) 64{ 65 return rcu_dereference_protected(m->tcfm_dev, 66 lockdep_is_held(&m->tcf_lock)); 67} 68 69static void tcf_mirred_release(struct tc_action *a) 70{ 71 struct tcf_mirred *m = to_mirred(a); 72 struct net_device *dev; 73 74 spin_lock(&mirred_list_lock); 75 list_del(&m->tcfm_list); 76 spin_unlock(&mirred_list_lock); 77 78 /* last reference to action, no need to lock */ 79 dev = rcu_dereference_protected(m->tcfm_dev, 1); 80 netdev_put(dev, &m->tcfm_dev_tracker); 81} 82 83static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = { 84 [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) }, 85 [TCA_MIRRED_BLOCKID] = NLA_POLICY_MIN(NLA_U32, 1), 86}; 87 88static struct tc_action_ops act_mirred_ops; 89 90static void tcf_mirred_replace_dev(struct tcf_mirred *m, 91 struct net_device *ndev) 92{ 93 struct net_device *odev; 94 95 odev = rcu_replace_pointer(m->tcfm_dev, ndev, 96 lockdep_is_held(&m->tcf_lock)); 97 netdev_put(odev, &m->tcfm_dev_tracker); 98} 99 100static int tcf_mirred_init(struct net *net, struct nlattr *nla, 101 struct nlattr *est, struct tc_action **a, 102 struct tcf_proto *tp, 103 u32 flags, struct netlink_ext_ack *extack) 104{ 105 struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id); 106 bool bind = flags & TCA_ACT_FLAGS_BIND; 107 struct nlattr *tb[TCA_MIRRED_MAX + 1]; 108 struct tcf_chain *goto_ch = NULL; 109 bool mac_header_xmit = false; 110 struct tc_mirred *parm; 111 struct tcf_mirred *m; 112 bool exists = false; 113 int ret, err; 114 u32 index; 115 116 if (!nla) { 117 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed"); 118 return -EINVAL; 119 } 120 ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla, 121 mirred_policy, extack); 122 if (ret < 0) 123 return ret; 124 if (!tb[TCA_MIRRED_PARMS]) { 125 NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters"); 126 return -EINVAL; 127 } 128 parm = nla_data(tb[TCA_MIRRED_PARMS]); 129 index = parm->index; 130 err = tcf_idr_check_alloc(tn, &index, a, bind); 131 if (err < 0) 132 return err; 133 exists = err; 134 if (exists && bind) 135 return ACT_P_BOUND; 136 137 if (tb[TCA_MIRRED_BLOCKID] && parm->ifindex) { 138 NL_SET_ERR_MSG_MOD(extack, 139 "Cannot specify Block ID and dev simultaneously"); 140 if (exists) 141 tcf_idr_release(*a, bind); 142 else 143 tcf_idr_cleanup(tn, index); 144 145 return -EINVAL; 146 } 147 148 switch (parm->eaction) { 149 case TCA_EGRESS_MIRROR: 150 case TCA_EGRESS_REDIR: 151 case TCA_INGRESS_REDIR: 152 case TCA_INGRESS_MIRROR: 153 break; 154 default: 155 if (exists) 156 tcf_idr_release(*a, bind); 157 else 158 tcf_idr_cleanup(tn, index); 159 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option"); 160 return -EINVAL; 161 } 162 163 if (!exists) { 164 if (!parm->ifindex && !tb[TCA_MIRRED_BLOCKID]) { 165 tcf_idr_cleanup(tn, index); 166 NL_SET_ERR_MSG_MOD(extack, 167 "Must specify device or block"); 168 return -EINVAL; 169 } 170 ret = tcf_idr_create_from_flags(tn, index, est, a, 171 &act_mirred_ops, bind, flags); 172 if (ret) { 173 tcf_idr_cleanup(tn, index); 174 return ret; 175 } 176 ret = ACT_P_CREATED; 177 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { 178 tcf_idr_release(*a, bind); 179 return -EEXIST; 180 } 181 182 m = to_mirred(*a); 183 if (ret == ACT_P_CREATED) 184 INIT_LIST_HEAD(&m->tcfm_list); 185 186 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 187 if (err < 0) 188 goto release_idr; 189 190 spin_lock_bh(&m->tcf_lock); 191 192 if (parm->ifindex) { 193 struct net_device *ndev; 194 195 ndev = dev_get_by_index(net, parm->ifindex); 196 if (!ndev) { 197 spin_unlock_bh(&m->tcf_lock); 198 err = -ENODEV; 199 goto put_chain; 200 } 201 mac_header_xmit = dev_is_mac_header_xmit(ndev); 202 tcf_mirred_replace_dev(m, ndev); 203 netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC); 204 m->tcfm_mac_header_xmit = mac_header_xmit; 205 m->tcfm_blockid = 0; 206 } else if (tb[TCA_MIRRED_BLOCKID]) { 207 tcf_mirred_replace_dev(m, NULL); 208 m->tcfm_mac_header_xmit = false; 209 m->tcfm_blockid = nla_get_u32(tb[TCA_MIRRED_BLOCKID]); 210 } 211 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 212 m->tcfm_eaction = parm->eaction; 213 spin_unlock_bh(&m->tcf_lock); 214 if (goto_ch) 215 tcf_chain_put_by_act(goto_ch); 216 217 if (ret == ACT_P_CREATED) { 218 spin_lock(&mirred_list_lock); 219 list_add(&m->tcfm_list, &mirred_list); 220 spin_unlock(&mirred_list_lock); 221 } 222 223 return ret; 224put_chain: 225 if (goto_ch) 226 tcf_chain_put_by_act(goto_ch); 227release_idr: 228 tcf_idr_release(*a, bind); 229 return err; 230} 231 232static int 233tcf_mirred_forward(bool at_ingress, bool want_ingress, struct sk_buff *skb) 234{ 235 int err; 236 237 if (!want_ingress) 238 err = tcf_dev_queue_xmit(skb, dev_queue_xmit); 239 else if (!at_ingress) 240 err = netif_rx(skb); 241 else 242 err = netif_receive_skb(skb); 243 244 return err; 245} 246 247static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m, 248 struct net_device *dev, 249 const bool m_mac_header_xmit, int m_eaction, 250 int retval) 251{ 252 struct sk_buff *skb_to_send = skb; 253 bool want_ingress; 254 bool is_redirect; 255 bool expects_nh; 256 bool at_ingress; 257 bool dont_clone; 258 int mac_len; 259 bool at_nh; 260 int err; 261 262 is_redirect = tcf_mirred_is_act_redirect(m_eaction); 263 if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) { 264 net_notice_ratelimited("tc mirred to Houston: device %s is down\n", 265 dev->name); 266 goto err_cant_do; 267 } 268 269 want_ingress = tcf_mirred_act_wants_ingress(m_eaction); 270 271 at_ingress = skb_at_tc_ingress(skb); 272 if (dev == skb->dev && want_ingress == at_ingress) { 273 pr_notice_once("tc mirred: Loop (%s:%s --> %s:%s)\n", 274 netdev_name(skb->dev), 275 at_ingress ? "ingress" : "egress", 276 netdev_name(dev), 277 want_ingress ? "ingress" : "egress"); 278 goto err_cant_do; 279 } 280 281 /* we could easily avoid the clone only if called by ingress and clsact; 282 * since we can't easily detect the clsact caller, skip clone only for 283 * ingress - that covers the TC S/W datapath. 284 */ 285 dont_clone = skb_at_tc_ingress(skb) && is_redirect && 286 tcf_mirred_can_reinsert(retval); 287 if (!dont_clone) { 288 skb_to_send = skb_clone(skb, GFP_ATOMIC); 289 if (!skb_to_send) 290 goto err_cant_do; 291 } 292 293 /* All mirred/redirected skbs should clear previous ct info */ 294 nf_reset_ct(skb_to_send); 295 if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */ 296 skb_dst_drop(skb_to_send); 297 298 expects_nh = want_ingress || !m_mac_header_xmit; 299 at_nh = skb->data == skb_network_header(skb); 300 if (at_nh != expects_nh) { 301 mac_len = at_ingress ? skb->mac_len : 302 skb_network_offset(skb); 303 if (expects_nh) { 304 /* target device/action expect data at nh */ 305 skb_pull_rcsum(skb_to_send, mac_len); 306 } else { 307 /* target device/action expect data at mac */ 308 skb_push_rcsum(skb_to_send, mac_len); 309 } 310 } 311 312 skb_to_send->skb_iif = skb->dev->ifindex; 313 skb_to_send->dev = dev; 314 315 if (is_redirect) { 316 if (skb == skb_to_send) 317 retval = TC_ACT_CONSUMED; 318 319 skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress); 320 321 err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send); 322 } else { 323 err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send); 324 } 325 if (err) 326 tcf_action_inc_overlimit_qstats(&m->common); 327 328 return retval; 329 330err_cant_do: 331 if (is_redirect) 332 retval = TC_ACT_SHOT; 333 tcf_action_inc_overlimit_qstats(&m->common); 334 return retval; 335} 336 337static int tcf_blockcast_redir(struct sk_buff *skb, struct tcf_mirred *m, 338 struct tcf_block *block, int m_eaction, 339 const u32 exception_ifindex, int retval) 340{ 341 struct net_device *dev_prev = NULL; 342 struct net_device *dev = NULL; 343 unsigned long index; 344 int mirred_eaction; 345 346 mirred_eaction = tcf_mirred_act_wants_ingress(m_eaction) ? 347 TCA_INGRESS_MIRROR : TCA_EGRESS_MIRROR; 348 349 xa_for_each(&block->ports, index, dev) { 350 if (index == exception_ifindex) 351 continue; 352 353 if (!dev_prev) 354 goto assign_prev; 355 356 tcf_mirred_to_dev(skb, m, dev_prev, 357 dev_is_mac_header_xmit(dev), 358 mirred_eaction, retval); 359assign_prev: 360 dev_prev = dev; 361 } 362 363 if (dev_prev) 364 return tcf_mirred_to_dev(skb, m, dev_prev, 365 dev_is_mac_header_xmit(dev_prev), 366 m_eaction, retval); 367 368 return retval; 369} 370 371static int tcf_blockcast_mirror(struct sk_buff *skb, struct tcf_mirred *m, 372 struct tcf_block *block, int m_eaction, 373 const u32 exception_ifindex, int retval) 374{ 375 struct net_device *dev = NULL; 376 unsigned long index; 377 378 xa_for_each(&block->ports, index, dev) { 379 if (index == exception_ifindex) 380 continue; 381 382 tcf_mirred_to_dev(skb, m, dev, 383 dev_is_mac_header_xmit(dev), 384 m_eaction, retval); 385 } 386 387 return retval; 388} 389 390static int tcf_blockcast(struct sk_buff *skb, struct tcf_mirred *m, 391 const u32 blockid, struct tcf_result *res, 392 int retval) 393{ 394 const u32 exception_ifindex = skb->dev->ifindex; 395 struct tcf_block *block; 396 bool is_redirect; 397 int m_eaction; 398 399 m_eaction = READ_ONCE(m->tcfm_eaction); 400 is_redirect = tcf_mirred_is_act_redirect(m_eaction); 401 402 /* we are already under rcu protection, so can call block lookup 403 * directly. 404 */ 405 block = tcf_block_lookup(dev_net(skb->dev), blockid); 406 if (!block || xa_empty(&block->ports)) { 407 tcf_action_inc_overlimit_qstats(&m->common); 408 return retval; 409 } 410 411 if (is_redirect) 412 return tcf_blockcast_redir(skb, m, block, m_eaction, 413 exception_ifindex, retval); 414 415 /* If it's not redirect, it is mirror */ 416 return tcf_blockcast_mirror(skb, m, block, m_eaction, exception_ifindex, 417 retval); 418} 419 420TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb, 421 const struct tc_action *a, 422 struct tcf_result *res) 423{ 424 struct tcf_mirred *m = to_mirred(a); 425 int retval = READ_ONCE(m->tcf_action); 426 struct netdev_xmit *xmit; 427 bool m_mac_header_xmit; 428 struct net_device *dev; 429 int i, m_eaction; 430 u32 blockid; 431 432#ifdef CONFIG_PREEMPT_RT 433 xmit = &current->net_xmit; 434#else 435 xmit = this_cpu_ptr(&softnet_data.xmit); 436#endif 437 if (unlikely(xmit->sched_mirred_nest >= MIRRED_NEST_LIMIT)) { 438 net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n", 439 netdev_name(skb->dev)); 440 return TC_ACT_SHOT; 441 } 442 443 tcf_lastuse_update(&m->tcf_tm); 444 tcf_action_update_bstats(&m->common, skb); 445 446 blockid = READ_ONCE(m->tcfm_blockid); 447 if (blockid) 448 return tcf_blockcast(skb, m, blockid, res, retval); 449 450 dev = rcu_dereference_bh(m->tcfm_dev); 451 if (unlikely(!dev)) { 452 pr_notice_once("tc mirred: target device is gone\n"); 453 tcf_action_inc_overlimit_qstats(&m->common); 454 return retval; 455 } 456 for (i = 0; i < xmit->sched_mirred_nest; i++) { 457 if (xmit->sched_mirred_dev[i] != dev) 458 continue; 459 pr_notice_once("tc mirred: loop on device %s\n", 460 netdev_name(dev)); 461 tcf_action_inc_overlimit_qstats(&m->common); 462 return retval; 463 } 464 465 xmit->sched_mirred_dev[xmit->sched_mirred_nest++] = dev; 466 467 m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit); 468 m_eaction = READ_ONCE(m->tcfm_eaction); 469 470 retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction, 471 retval); 472 xmit->sched_mirred_nest--; 473 474 return retval; 475} 476 477static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, 478 u64 drops, u64 lastuse, bool hw) 479{ 480 struct tcf_mirred *m = to_mirred(a); 481 struct tcf_t *tm = &m->tcf_tm; 482 483 tcf_action_update_stats(a, bytes, packets, drops, hw); 484 tm->lastuse = max_t(u64, tm->lastuse, lastuse); 485} 486 487static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, 488 int ref) 489{ 490 unsigned char *b = skb_tail_pointer(skb); 491 struct tcf_mirred *m = to_mirred(a); 492 struct tc_mirred opt = { 493 .index = m->tcf_index, 494 .refcnt = refcount_read(&m->tcf_refcnt) - ref, 495 .bindcnt = atomic_read(&m->tcf_bindcnt) - bind, 496 }; 497 struct net_device *dev; 498 struct tcf_t t; 499 u32 blockid; 500 501 spin_lock_bh(&m->tcf_lock); 502 opt.action = m->tcf_action; 503 opt.eaction = m->tcfm_eaction; 504 dev = tcf_mirred_dev_dereference(m); 505 if (dev) 506 opt.ifindex = dev->ifindex; 507 508 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt)) 509 goto nla_put_failure; 510 511 blockid = m->tcfm_blockid; 512 if (blockid && nla_put_u32(skb, TCA_MIRRED_BLOCKID, blockid)) 513 goto nla_put_failure; 514 515 tcf_tm_dump(&t, &m->tcf_tm); 516 if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD)) 517 goto nla_put_failure; 518 spin_unlock_bh(&m->tcf_lock); 519 520 return skb->len; 521 522nla_put_failure: 523 spin_unlock_bh(&m->tcf_lock); 524 nlmsg_trim(skb, b); 525 return -1; 526} 527 528static int mirred_device_event(struct notifier_block *unused, 529 unsigned long event, void *ptr) 530{ 531 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 532 struct tcf_mirred *m; 533 534 ASSERT_RTNL(); 535 if (event == NETDEV_UNREGISTER) { 536 spin_lock(&mirred_list_lock); 537 list_for_each_entry(m, &mirred_list, tcfm_list) { 538 spin_lock_bh(&m->tcf_lock); 539 if (tcf_mirred_dev_dereference(m) == dev) { 540 netdev_put(dev, &m->tcfm_dev_tracker); 541 /* Note : no rcu grace period necessary, as 542 * net_device are already rcu protected. 543 */ 544 RCU_INIT_POINTER(m->tcfm_dev, NULL); 545 } 546 spin_unlock_bh(&m->tcf_lock); 547 } 548 spin_unlock(&mirred_list_lock); 549 } 550 551 return NOTIFY_DONE; 552} 553 554static struct notifier_block mirred_device_notifier = { 555 .notifier_call = mirred_device_event, 556}; 557 558static void tcf_mirred_dev_put(void *priv) 559{ 560 struct net_device *dev = priv; 561 562 dev_put(dev); 563} 564 565static struct net_device * 566tcf_mirred_get_dev(const struct tc_action *a, 567 tc_action_priv_destructor *destructor) 568{ 569 struct tcf_mirred *m = to_mirred(a); 570 struct net_device *dev; 571 572 rcu_read_lock(); 573 dev = rcu_dereference(m->tcfm_dev); 574 if (dev) { 575 dev_hold(dev); 576 *destructor = tcf_mirred_dev_put; 577 } 578 rcu_read_unlock(); 579 580 return dev; 581} 582 583static size_t tcf_mirred_get_fill_size(const struct tc_action *act) 584{ 585 return nla_total_size(sizeof(struct tc_mirred)); 586} 587 588static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry, 589 const struct tc_action *act) 590{ 591 entry->dev = act->ops->get_dev(act, &entry->destructor); 592 if (!entry->dev) 593 return; 594 entry->destructor_priv = entry->dev; 595} 596 597static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data, 598 u32 *index_inc, bool bind, 599 struct netlink_ext_ack *extack) 600{ 601 if (bind) { 602 struct flow_action_entry *entry = entry_data; 603 604 if (is_tcf_mirred_egress_redirect(act)) { 605 entry->id = FLOW_ACTION_REDIRECT; 606 tcf_offload_mirred_get_dev(entry, act); 607 } else if (is_tcf_mirred_egress_mirror(act)) { 608 entry->id = FLOW_ACTION_MIRRED; 609 tcf_offload_mirred_get_dev(entry, act); 610 } else if (is_tcf_mirred_ingress_redirect(act)) { 611 entry->id = FLOW_ACTION_REDIRECT_INGRESS; 612 tcf_offload_mirred_get_dev(entry, act); 613 } else if (is_tcf_mirred_ingress_mirror(act)) { 614 entry->id = FLOW_ACTION_MIRRED_INGRESS; 615 tcf_offload_mirred_get_dev(entry, act); 616 } else { 617 NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload"); 618 return -EOPNOTSUPP; 619 } 620 *index_inc = 1; 621 } else { 622 struct flow_offload_action *fl_action = entry_data; 623 624 if (is_tcf_mirred_egress_redirect(act)) 625 fl_action->id = FLOW_ACTION_REDIRECT; 626 else if (is_tcf_mirred_egress_mirror(act)) 627 fl_action->id = FLOW_ACTION_MIRRED; 628 else if (is_tcf_mirred_ingress_redirect(act)) 629 fl_action->id = FLOW_ACTION_REDIRECT_INGRESS; 630 else if (is_tcf_mirred_ingress_mirror(act)) 631 fl_action->id = FLOW_ACTION_MIRRED_INGRESS; 632 else 633 return -EOPNOTSUPP; 634 } 635 636 return 0; 637} 638 639static struct tc_action_ops act_mirred_ops = { 640 .kind = "mirred", 641 .id = TCA_ID_MIRRED, 642 .owner = THIS_MODULE, 643 .act = tcf_mirred_act, 644 .stats_update = tcf_stats_update, 645 .dump = tcf_mirred_dump, 646 .cleanup = tcf_mirred_release, 647 .init = tcf_mirred_init, 648 .get_fill_size = tcf_mirred_get_fill_size, 649 .offload_act_setup = tcf_mirred_offload_act_setup, 650 .size = sizeof(struct tcf_mirred), 651 .get_dev = tcf_mirred_get_dev, 652}; 653MODULE_ALIAS_NET_ACT("mirred"); 654 655static __net_init int mirred_init_net(struct net *net) 656{ 657 struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id); 658 659 return tc_action_net_init(net, tn, &act_mirred_ops); 660} 661 662static void __net_exit mirred_exit_net(struct list_head *net_list) 663{ 664 tc_action_net_exit(net_list, act_mirred_ops.net_id); 665} 666 667static struct pernet_operations mirred_net_ops = { 668 .init = mirred_init_net, 669 .exit_batch = mirred_exit_net, 670 .id = &act_mirred_ops.net_id, 671 .size = sizeof(struct tc_action_net), 672}; 673 674MODULE_AUTHOR("Jamal Hadi Salim(2002)"); 675MODULE_DESCRIPTION("Device Mirror/redirect actions"); 676MODULE_LICENSE("GPL"); 677 678static int __init mirred_init_module(void) 679{ 680 int err = register_netdevice_notifier(&mirred_device_notifier); 681 if (err) 682 return err; 683 684 pr_info("Mirror/redirect action on\n"); 685 err = tcf_register_action(&act_mirred_ops, &mirred_net_ops); 686 if (err) 687 unregister_netdevice_notifier(&mirred_device_notifier); 688 689 return err; 690} 691 692static void __exit mirred_cleanup_module(void) 693{ 694 tcf_unregister_action(&act_mirred_ops, &mirred_net_ops); 695 unregister_netdevice_notifier(&mirred_device_notifier); 696} 697 698module_init(mirred_init_module); 699module_exit(mirred_cleanup_module);