Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

gro: remove rcu_read_lock/rcu_read_unlock from gro_complete handlers

All gro_complete() handlers are called from napi_gro_complete()
while rcu_read_lock() has been called.

There is no point stacking more rcu_read_lock()

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Eric Dumazet and committed by
Jakub Kicinski
627b94f7 fc1ca334

+8 -32
-3
drivers/net/geneve.c
··· 545 545 gh_len = geneve_hlen(gh); 546 546 type = gh->proto_type; 547 547 548 - rcu_read_lock(); 549 548 ptype = gro_find_complete_by_type(type); 550 549 if (ptype) 551 550 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); 552 - 553 - rcu_read_unlock(); 554 551 555 552 skb_set_inner_mac_header(skb, nhoff + gh_len); 556 553
-2
net/8021q/vlan_core.c
··· 513 513 struct packet_offload *ptype; 514 514 int err = -ENOENT; 515 515 516 - rcu_read_lock(); 517 516 ptype = gro_find_complete_by_type(type); 518 517 if (ptype) 519 518 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, 520 519 ipv6_gro_complete, inet_gro_complete, 521 520 skb, nhoff + sizeof(*vhdr)); 522 521 523 - rcu_read_unlock(); 524 522 return err; 525 523 } 526 524
-2
net/ethernet/eth.c
··· 466 466 if (skb->encapsulation) 467 467 skb_set_inner_mac_header(skb, nhoff); 468 468 469 - rcu_read_lock(); 470 469 ptype = gro_find_complete_by_type(type); 471 470 if (ptype != NULL) 472 471 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, 473 472 ipv6_gro_complete, inet_gro_complete, 474 473 skb, nhoff + sizeof(*eh)); 475 474 476 - rcu_read_unlock(); 477 475 return err; 478 476 } 479 477 EXPORT_SYMBOL(eth_gro_complete);
+2 -5
net/ipv4/af_inet.c
··· 1612 1612 csum_replace2(&iph->check, iph->tot_len, newlen); 1613 1613 iph->tot_len = newlen; 1614 1614 1615 - rcu_read_lock(); 1616 1615 ops = rcu_dereference(inet_offloads[proto]); 1617 1616 if (WARN_ON(!ops || !ops->callbacks.gro_complete)) 1618 - goto out_unlock; 1617 + goto out; 1619 1618 1620 1619 /* Only need to add sizeof(*iph) to get to the next hdr below 1621 1620 * because any hdr with option will have been flushed in ··· 1624 1625 tcp4_gro_complete, udp4_gro_complete, 1625 1626 skb, nhoff + sizeof(*iph)); 1626 1627 1627 - out_unlock: 1628 - rcu_read_unlock(); 1629 - 1628 + out: 1630 1629 return err; 1631 1630 } 1632 1631
+4 -9
net/ipv4/fou.c
··· 266 266 const struct net_offload *ops; 267 267 int err = -ENOSYS; 268 268 269 - rcu_read_lock(); 270 269 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 271 270 ops = rcu_dereference(offloads[proto]); 272 271 if (WARN_ON(!ops || !ops->callbacks.gro_complete)) 273 - goto out_unlock; 272 + goto out; 274 273 275 274 err = ops->callbacks.gro_complete(skb, nhoff); 276 275 277 276 skb_set_inner_mac_header(skb, nhoff); 278 277 279 - out_unlock: 280 - rcu_read_unlock(); 281 - 278 + out: 282 279 return err; 283 280 } 284 281 ··· 477 480 return err; 478 481 } 479 482 480 - rcu_read_lock(); 481 483 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 482 484 ops = rcu_dereference(offloads[proto]); 483 485 if (WARN_ON(!ops || !ops->callbacks.gro_complete)) 484 - goto out_unlock; 486 + goto out; 485 487 486 488 err = ops->callbacks.gro_complete(skb, nhoff + guehlen); 487 489 488 490 skb_set_inner_mac_header(skb, nhoff + guehlen); 489 491 490 - out_unlock: 491 - rcu_read_unlock(); 492 + out: 492 493 return err; 493 494 } 494 495
-3
net/ipv4/gre_offload.c
··· 253 253 if (greh->flags & GRE_CSUM) 254 254 grehlen += GRE_HEADER_SECTION; 255 255 256 - rcu_read_lock(); 257 256 ptype = gro_find_complete_by_type(type); 258 257 if (ptype) 259 258 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); 260 - 261 - rcu_read_unlock(); 262 259 263 260 skb_set_inner_mac_header(skb, nhoff + grehlen); 264 261
-2
net/ipv4/udp_offload.c
··· 667 667 668 668 uh->len = newlen; 669 669 670 - rcu_read_lock(); 671 670 sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb, 672 671 udp4_lib_lookup_skb, skb, uh->source, uh->dest); 673 672 if (sk && udp_sk(sk)->gro_complete) { ··· 687 688 } else { 688 689 err = udp_gro_complete_segment(skb); 689 690 } 690 - rcu_read_unlock(); 691 691 692 692 if (skb->remcsum_offload) 693 693 skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
+2 -6
net/ipv6/ip6_offload.c
··· 327 327 328 328 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); 329 329 330 - rcu_read_lock(); 331 - 332 330 nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops); 333 331 if (WARN_ON(!ops || !ops->callbacks.gro_complete)) 334 - goto out_unlock; 332 + goto out; 335 333 336 334 err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete, 337 335 udp6_gro_complete, skb, nhoff); 338 336 339 - out_unlock: 340 - rcu_read_unlock(); 341 - 337 + out: 342 338 return err; 343 339 } 344 340