Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/sched: act_mirred: Allow mirred to block

So far the mirred action has dealt with syntax that handles
mirror/redirection for netdev. A matching packet is redirected or mirrored
to a target netdev.

In this patch we enable mirred to mirror to a tc block as well.
IOW, the new syntax looks as follows:
... mirred <ingress | egress> <mirror | redirect> [index INDEX] < <blockid BLOCKID> | <dev <devname>> >

Examples of mirroring or redirecting to a tc block:
$ tc filter add block 22 protocol ip pref 25 \
flower dst_ip 192.168.0.0/16 action mirred egress mirror blockid 22

$ tc filter add block 22 protocol ip pref 25 \
flower dst_ip 10.10.10.10/32 action mirred egress redirect blockid 22

Co-developed-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
Co-developed-by: Pedro Tammela <pctammela@mojatatu.com>
Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>
Signed-off-by: Victor Nogueira <victor@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Victor Nogueira and committed by
David S. Miller
42f39036 415e38bf

+119 -2
+1
include/net/tc_act/tc_mirred.h
··· 8 8 struct tcf_mirred { 9 9 struct tc_action common; 10 10 int tcfm_eaction; 11 + u32 tcfm_blockid; 11 12 bool tcfm_mac_header_xmit; 12 13 struct net_device __rcu *tcfm_dev; 13 14 netdevice_tracker tcfm_dev_tracker;
+1
include/uapi/linux/tc_act/tc_mirred.h
··· 21 21 TCA_MIRRED_TM, 22 22 TCA_MIRRED_PARMS, 23 23 TCA_MIRRED_PAD, 24 + TCA_MIRRED_BLOCKID, 24 25 __TCA_MIRRED_MAX 25 26 }; 26 27 #define TCA_MIRRED_MAX (__TCA_MIRRED_MAX - 1)
+117 -2
net/sched/act_mirred.c
··· 85 85 86 86 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = { 87 87 [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) }, 88 + [TCA_MIRRED_BLOCKID] = NLA_POLICY_MIN(NLA_U32, 1), 88 89 }; 89 90 90 91 static struct tc_action_ops act_mirred_ops; ··· 137 136 if (exists && bind) 138 137 return 0; 139 138 139 + if (tb[TCA_MIRRED_BLOCKID] && parm->ifindex) { 140 + NL_SET_ERR_MSG_MOD(extack, 141 + "Cannot specify Block ID and dev simultaneously"); 142 + if (exists) 143 + tcf_idr_release(*a, bind); 144 + else 145 + tcf_idr_cleanup(tn, index); 146 + 147 + return -EINVAL; 148 + } 149 + 140 150 switch (parm->eaction) { 141 151 case TCA_EGRESS_MIRROR: 142 152 case TCA_EGRESS_REDIR: ··· 164 152 } 165 153 166 154 if (!exists) { 167 - if (!parm->ifindex) { 155 + if (!parm->ifindex && !tb[TCA_MIRRED_BLOCKID]) { 168 156 tcf_idr_cleanup(tn, index); 169 - NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist"); 157 + NL_SET_ERR_MSG_MOD(extack, 158 + "Must specify device or block"); 170 159 return -EINVAL; 171 160 } 172 161 ret = tcf_idr_create_from_flags(tn, index, est, a, ··· 205 192 tcf_mirred_replace_dev(m, ndev); 206 193 netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC); 207 194 m->tcfm_mac_header_xmit = mac_header_xmit; 195 + m->tcfm_blockid = 0; 196 + } else if (tb[TCA_MIRRED_BLOCKID]) { 197 + tcf_mirred_replace_dev(m, NULL); 198 + m->tcfm_mac_header_xmit = false; 199 + m->tcfm_blockid = nla_get_u32(tb[TCA_MIRRED_BLOCKID]); 208 200 } 209 201 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 210 202 m->tcfm_eaction = parm->eaction; ··· 334 316 return retval; 335 317 } 336 318 319 + static int tcf_blockcast_redir(struct sk_buff *skb, struct tcf_mirred *m, 320 + struct tcf_block *block, int m_eaction, 321 + const u32 exception_ifindex, int retval) 322 + { 323 + struct net_device *dev_prev = NULL; 324 + struct net_device *dev = NULL; 325 + unsigned long index; 326 + int mirred_eaction; 327 + 328 + mirred_eaction = tcf_mirred_act_wants_ingress(m_eaction) ? 329 + TCA_INGRESS_MIRROR : TCA_EGRESS_MIRROR; 330 + 331 + xa_for_each(&block->ports, index, dev) { 332 + if (index == exception_ifindex) 333 + continue; 334 + 335 + if (!dev_prev) 336 + goto assign_prev; 337 + 338 + tcf_mirred_to_dev(skb, m, dev_prev, 339 + dev_is_mac_header_xmit(dev), 340 + mirred_eaction, retval); 341 + assign_prev: 342 + dev_prev = dev; 343 + } 344 + 345 + if (dev_prev) 346 + return tcf_mirred_to_dev(skb, m, dev_prev, 347 + dev_is_mac_header_xmit(dev_prev), 348 + m_eaction, retval); 349 + 350 + return retval; 351 + } 352 + 353 + static int tcf_blockcast_mirror(struct sk_buff *skb, struct tcf_mirred *m, 354 + struct tcf_block *block, int m_eaction, 355 + const u32 exception_ifindex, int retval) 356 + { 357 + struct net_device *dev = NULL; 358 + unsigned long index; 359 + 360 + xa_for_each(&block->ports, index, dev) { 361 + if (index == exception_ifindex) 362 + continue; 363 + 364 + tcf_mirred_to_dev(skb, m, dev, 365 + dev_is_mac_header_xmit(dev), 366 + m_eaction, retval); 367 + } 368 + 369 + return retval; 370 + } 371 + 372 + static int tcf_blockcast(struct sk_buff *skb, struct tcf_mirred *m, 373 + const u32 blockid, struct tcf_result *res, 374 + int retval) 375 + { 376 + const u32 exception_ifindex = skb->dev->ifindex; 377 + struct tcf_block *block; 378 + bool is_redirect; 379 + int m_eaction; 380 + 381 + m_eaction = READ_ONCE(m->tcfm_eaction); 382 + is_redirect = tcf_mirred_is_act_redirect(m_eaction); 383 + 384 + /* we are already under rcu protection, so can call block lookup 385 + * directly. 386 + */ 387 + block = tcf_block_lookup(dev_net(skb->dev), blockid); 388 + if (!block || xa_empty(&block->ports)) { 389 + tcf_action_inc_overlimit_qstats(&m->common); 390 + return retval; 391 + } 392 + 393 + if (is_redirect) 394 + return tcf_blockcast_redir(skb, m, block, m_eaction, 395 + exception_ifindex, retval); 396 + 397 + /* If it's not redirect, it is mirror */ 398 + return tcf_blockcast_mirror(skb, m, block, m_eaction, exception_ifindex, 399 + retval); 400 + } 401 + 337 402 TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb, 338 403 const struct tc_action *a, 339 404 struct tcf_result *res) ··· 427 326 bool m_mac_header_xmit; 428 327 struct net_device *dev; 429 328 int m_eaction; 329 + u32 blockid; 430 330 431 331 nest_level = __this_cpu_inc_return(mirred_nest_level); 432 332 if (unlikely(nest_level > MIRRED_NEST_LIMIT)) { ··· 439 337 440 338 tcf_lastuse_update(&m->tcf_tm); 441 339 tcf_action_update_bstats(&m->common, skb); 340 + 341 + blockid = READ_ONCE(m->tcfm_blockid); 342 + if (blockid) { 343 + retval = tcf_blockcast(skb, m, blockid, res, retval); 344 + goto dec_nest_level; 345 + } 442 346 443 347 dev = rcu_dereference_bh(m->tcfm_dev); 444 348 if (unlikely(!dev)) { ··· 487 379 }; 488 380 struct net_device *dev; 489 381 struct tcf_t t; 382 + u32 blockid; 490 383 491 384 spin_lock_bh(&m->tcf_lock); 492 385 opt.action = m->tcf_action; ··· 497 388 opt.ifindex = dev->ifindex; 498 389 499 390 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt)) 391 + goto nla_put_failure; 392 + 393 + blockid = m->tcfm_blockid; 394 + if (blockid && nla_put_u32(skb, TCA_MIRRED_BLOCKID, blockid)) 500 395 goto nla_put_failure; 501 396 502 397 tcf_tm_dump(&t, &m->tcf_tm); ··· 533 420 * net_device are already rcu protected. 534 421 */ 535 422 RCU_INIT_POINTER(m->tcfm_dev, NULL); 423 + } else if (m->tcfm_blockid) { 424 + m->tcfm_blockid = 0; 536 425 } 537 426 spin_unlock_bh(&m->tcf_lock); 538 427 }