Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/sched: act_mirred: Create function tcf_mirred_to_dev and improve readability

As a preparation for adding block ID to mirred, separate the part of
mirred that redirect/mirrors to a dev into a specific function so that it
can be called by blockcast for each dev.

Also improve readability. Eg. rename use_reinsert to dont_clone and skb2
to skb_to_send.

Co-developed-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
Co-developed-by: Pedro Tammela <pctammela@mojatatu.com>
Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>
Signed-off-by: Victor Nogueira <victor@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Victor Nogueira and committed by
David S. Miller
16085e48 a7042cf8

+71 -56
+71 -56
net/sched/act_mirred.c
··· 225 225 return err; 226 226 } 227 227 228 - TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb, 229 - const struct tc_action *a, 230 - struct tcf_result *res) 228 + static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m, 229 + struct net_device *dev, 230 + const bool m_mac_header_xmit, int m_eaction, 231 + int retval) 231 232 { 232 - struct tcf_mirred *m = to_mirred(a); 233 - struct sk_buff *skb2 = skb; 234 - bool m_mac_header_xmit; 235 - struct net_device *dev; 236 - unsigned int nest_level; 237 - int retval, err = 0; 238 - bool use_reinsert; 233 + struct sk_buff *skb_to_send = skb; 239 234 bool want_ingress; 240 235 bool is_redirect; 241 236 bool expects_nh; 242 237 bool at_ingress; 243 - int m_eaction; 238 + bool dont_clone; 244 239 int mac_len; 245 240 bool at_nh; 241 + int err; 246 242 247 - nest_level = __this_cpu_inc_return(mirred_nest_level); 248 - if (unlikely(nest_level > MIRRED_NEST_LIMIT)) { 249 - net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n", 250 - netdev_name(skb->dev)); 251 - __this_cpu_dec(mirred_nest_level); 252 - return TC_ACT_SHOT; 253 - } 254 - 255 - tcf_lastuse_update(&m->tcf_tm); 256 - tcf_action_update_bstats(&m->common, skb); 257 - 258 - m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit); 259 - m_eaction = READ_ONCE(m->tcfm_eaction); 260 - retval = READ_ONCE(m->tcf_action); 261 - dev = rcu_dereference_bh(m->tcfm_dev); 262 - if (unlikely(!dev)) { 263 - pr_notice_once("tc mirred: target device is gone\n"); 264 - goto out; 265 - } 266 - 243 + is_redirect = tcf_mirred_is_act_redirect(m_eaction); 267 244 if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) { 268 245 net_notice_ratelimited("tc mirred to Houston: device %s is down\n", 269 246 dev->name); 247 + err = -ENODEV; 270 248 goto out; 271 249 } 272 250 ··· 252 274 * since we can't easily detect the clsact caller, skip clone only for 253 275 * ingress - that covers the TC S/W datapath. 254 276 */ 255 - is_redirect = tcf_mirred_is_act_redirect(m_eaction); 256 277 at_ingress = skb_at_tc_ingress(skb); 257 - use_reinsert = at_ingress && is_redirect && 258 - tcf_mirred_can_reinsert(retval); 259 - if (!use_reinsert) { 260 - skb2 = skb_clone(skb, GFP_ATOMIC); 261 - if (!skb2) 278 + dont_clone = skb_at_tc_ingress(skb) && is_redirect && 279 + tcf_mirred_can_reinsert(retval); 280 + if (!dont_clone) { 281 + skb_to_send = skb_clone(skb, GFP_ATOMIC); 282 + if (!skb_to_send) { 283 + err = -ENOMEM; 262 284 goto out; 285 + } 263 286 } 264 287 265 288 want_ingress = tcf_mirred_act_wants_ingress(m_eaction); 266 289 267 290 /* All mirred/redirected skbs should clear previous ct info */ 268 - nf_reset_ct(skb2); 291 + nf_reset_ct(skb_to_send); 269 292 if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */ 270 - skb_dst_drop(skb2); 293 + skb_dst_drop(skb_to_send); 271 294 272 295 expects_nh = want_ingress || !m_mac_header_xmit; 273 296 at_nh = skb->data == skb_network_header(skb); 274 297 if (at_nh != expects_nh) { 275 - mac_len = skb_at_tc_ingress(skb) ? skb->mac_len : 298 + mac_len = at_ingress ? skb->mac_len : 276 299 skb_network_offset(skb); 277 300 if (expects_nh) { 278 301 /* target device/action expect data at nh */ 279 - skb_pull_rcsum(skb2, mac_len); 302 + skb_pull_rcsum(skb_to_send, mac_len); 280 303 } else { 281 304 /* target device/action expect data at mac */ 282 - skb_push_rcsum(skb2, mac_len); 305 + skb_push_rcsum(skb_to_send, mac_len); 283 306 } 284 307 } 285 308 286 - skb2->skb_iif = skb->dev->ifindex; 287 - skb2->dev = dev; 309 + skb_to_send->skb_iif = skb->dev->ifindex; 310 + skb_to_send->dev = dev; 288 311 289 - /* mirror is always swallowed */ 290 312 if (is_redirect) { 291 - skb_set_redirected(skb2, skb2->tc_at_ingress); 313 + if (skb == skb_to_send) 314 + retval = TC_ACT_CONSUMED; 292 315 293 - /* let's the caller reinsert the packet, if possible */ 294 - if (use_reinsert) { 295 - err = tcf_mirred_forward(want_ingress, skb); 296 - if (err) 297 - tcf_action_inc_overlimit_qstats(&m->common); 298 - __this_cpu_dec(mirred_nest_level); 299 - return TC_ACT_CONSUMED; 300 - } 316 + skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress); 317 + 318 + err = tcf_mirred_forward(want_ingress, skb_to_send); 319 + } else { 320 + err = tcf_mirred_forward(want_ingress, skb_to_send); 301 321 } 302 322 303 - err = tcf_mirred_forward(want_ingress, skb2); 304 323 if (err) { 305 324 out: 306 325 tcf_action_inc_overlimit_qstats(&m->common); 307 - if (tcf_mirred_is_act_redirect(m_eaction)) 326 + if (is_redirect) 308 327 retval = TC_ACT_SHOT; 309 328 } 329 + 330 + return retval; 331 + } 332 + 333 + TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb, 334 + const struct tc_action *a, 335 + struct tcf_result *res) 336 + { 337 + struct tcf_mirred *m = to_mirred(a); 338 + int retval = READ_ONCE(m->tcf_action); 339 + unsigned int nest_level; 340 + bool m_mac_header_xmit; 341 + struct net_device *dev; 342 + int m_eaction; 343 + 344 + nest_level = __this_cpu_inc_return(mirred_nest_level); 345 + if (unlikely(nest_level > MIRRED_NEST_LIMIT)) { 346 + net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n", 347 + netdev_name(skb->dev)); 348 + retval = TC_ACT_SHOT; 349 + goto dec_nest_level; 350 + } 351 + 352 + tcf_lastuse_update(&m->tcf_tm); 353 + tcf_action_update_bstats(&m->common, skb); 354 + 355 + dev = rcu_dereference_bh(m->tcfm_dev); 356 + if (unlikely(!dev)) { 357 + pr_notice_once("tc mirred: target device is gone\n"); 358 + tcf_action_inc_overlimit_qstats(&m->common); 359 + goto dec_nest_level; 360 + } 361 + 362 + m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit); 363 + m_eaction = READ_ONCE(m->tcfm_eaction); 364 + 365 + retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction, 366 + retval); 367 + 368 + dec_nest_level: 310 369 __this_cpu_dec(mirred_nest_level); 311 370 312 371 return retval;