Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: Remove code duplication between offload structures

Move the offload callbacks into its own structure.

Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Vlad Yasevich and committed by
David S. Miller
f191a1d1 c6b641a4

+66 -54
+7 -3
include/linux/netdevice.h
··· 1515 1515 struct list_head list; 1516 1516 }; 1517 1517 1518 - struct packet_offload { 1519 - __be16 type; /* This is really htons(ether_type). */ 1518 + struct offload_callbacks { 1520 1519 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 1521 1520 netdev_features_t features); 1522 1521 int (*gso_send_check)(struct sk_buff *skb); 1523 1522 struct sk_buff **(*gro_receive)(struct sk_buff **head, 1524 1523 struct sk_buff *skb); 1525 1524 int (*gro_complete)(struct sk_buff *skb); 1526 - struct list_head list; 1525 + }; 1526 + 1527 + struct packet_offload { 1528 + __be16 type; /* This is really htons(ether_type). */ 1529 + struct offload_callbacks callbacks; 1530 + struct list_head list; 1527 1531 }; 1528 1532 1529 1533 #include <linux/notifier.h>
+3 -7
include/net/protocol.h
··· 29 29 #if IS_ENABLED(CONFIG_IPV6) 30 30 #include <linux/ipv6.h> 31 31 #endif 32 + #include <linux/netdevice.h> 32 33 33 34 /* This is one larger than the largest protocol value that can be 34 35 * found in an ipv4 or ipv6 header. Since in both cases the protocol ··· 64 63 #endif 65 64 66 65 struct net_offload { 67 - int (*gso_send_check)(struct sk_buff *skb); 68 - struct sk_buff *(*gso_segment)(struct sk_buff *skb, 69 - netdev_features_t features); 70 - struct sk_buff **(*gro_receive)(struct sk_buff **head, 71 - struct sk_buff *skb); 72 - int (*gro_complete)(struct sk_buff *skb); 73 - unsigned int flags; /* Flags used by IPv6 for now */ 66 + struct offload_callbacks callbacks; 67 + unsigned int flags; /* Flags used by IPv6 for now */ 74 68 }; 75 69 /* This should be set for any extension header which is compatible with GSO. */ 76 70 #define INET6_PROTO_GSO_EXTHDR 0x1
+7 -7
net/core/dev.c
··· 2102 2102 2103 2103 rcu_read_lock(); 2104 2104 list_for_each_entry_rcu(ptype, &offload_base, list) { 2105 - if (ptype->type == type && ptype->gso_segment) { 2105 + if (ptype->type == type && ptype->callbacks.gso_segment) { 2106 2106 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 2107 - err = ptype->gso_send_check(skb); 2107 + err = ptype->callbacks.gso_send_check(skb); 2108 2108 segs = ERR_PTR(err); 2109 2109 if (err || skb_gso_ok(skb, features)) 2110 2110 break; 2111 2111 __skb_push(skb, (skb->data - 2112 2112 skb_network_header(skb))); 2113 2113 } 2114 - segs = ptype->gso_segment(skb, features); 2114 + segs = ptype->callbacks.gso_segment(skb, features); 2115 2115 break; 2116 2116 } 2117 2117 } ··· 3533 3533 3534 3534 rcu_read_lock(); 3535 3535 list_for_each_entry_rcu(ptype, head, list) { 3536 - if (ptype->type != type || !ptype->gro_complete) 3536 + if (ptype->type != type || !ptype->callbacks.gro_complete) 3537 3537 continue; 3538 3538 3539 - err = ptype->gro_complete(skb); 3539 + err = ptype->callbacks.gro_complete(skb); 3540 3540 break; 3541 3541 } 3542 3542 rcu_read_unlock(); ··· 3598 3598 3599 3599 rcu_read_lock(); 3600 3600 list_for_each_entry_rcu(ptype, head, list) { 3601 - if (ptype->type != type || !ptype->gro_receive) 3601 + if (ptype->type != type || !ptype->callbacks.gro_receive) 3602 3602 continue; 3603 3603 3604 3604 skb_set_network_header(skb, skb_gro_offset(skb)); ··· 3608 3608 NAPI_GRO_CB(skb)->flush = 0; 3609 3609 NAPI_GRO_CB(skb)->free = 0; 3610 3610 3611 - pp = ptype->gro_receive(&napi->gro_list, skb); 3611 + pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); 3612 3612 break; 3613 3613 } 3614 3614 rcu_read_unlock();
+24 -18
net/ipv4/af_inet.c
··· 1276 1276 1277 1277 rcu_read_lock(); 1278 1278 ops = rcu_dereference(inet_offloads[proto]); 1279 - if (likely(ops && ops->gso_send_check)) 1280 - err = ops->gso_send_check(skb); 1279 + if (likely(ops && ops->callbacks.gso_send_check)) 1280 + err = ops->callbacks.gso_send_check(skb); 1281 1281 rcu_read_unlock(); 1282 1282 1283 1283 out: ··· 1326 1326 1327 1327 rcu_read_lock(); 1328 1328 ops = rcu_dereference(inet_offloads[proto]); 1329 - if (likely(ops && ops->gso_segment)) 1330 - segs = ops->gso_segment(skb, features); 1329 + if (likely(ops && ops->callbacks.gso_segment)) 1330 + segs = ops->callbacks.gso_segment(skb, features); 1331 1331 rcu_read_unlock(); 1332 1332 1333 1333 if (!segs || IS_ERR(segs)) ··· 1379 1379 1380 1380 rcu_read_lock(); 1381 1381 ops = rcu_dereference(inet_offloads[proto]); 1382 - if (!ops || !ops->gro_receive) 1382 + if (!ops || !ops->callbacks.gro_receive) 1383 1383 goto out_unlock; 1384 1384 1385 1385 if (*(u8 *)iph != 0x45) ··· 1420 1420 skb_gro_pull(skb, sizeof(*iph)); 1421 1421 skb_set_transport_header(skb, skb_gro_offset(skb)); 1422 1422 1423 - pp = ops->gro_receive(head, skb); 1423 + pp = ops->callbacks.gro_receive(head, skb); 1424 1424 1425 1425 out_unlock: 1426 1426 rcu_read_unlock(); ··· 1444 1444 1445 1445 rcu_read_lock(); 1446 1446 ops = rcu_dereference(inet_offloads[proto]); 1447 - if (WARN_ON(!ops || !ops->gro_complete)) 1447 + if (WARN_ON(!ops || !ops->callbacks.gro_complete)) 1448 1448 goto out_unlock; 1449 1449 1450 - err = ops->gro_complete(skb); 1450 + err = ops->callbacks.gro_complete(skb); 1451 1451 1452 1452 out_unlock: 1453 1453 rcu_read_unlock(); ··· 1563 1563 }; 1564 1564 1565 1565 static const struct net_offload tcp_offload = { 1566 - .gso_send_check = tcp_v4_gso_send_check, 1567 - .gso_segment = tcp_tso_segment, 1568 - .gro_receive = tcp4_gro_receive, 1569 - .gro_complete = tcp4_gro_complete, 1566 + .callbacks = { 1567 + .gso_send_check = tcp_v4_gso_send_check, 1568 + .gso_segment = tcp_tso_segment, 1569 + .gro_receive = tcp4_gro_receive, 1570 + .gro_complete = tcp4_gro_complete, 1571 + }, 1570 1572 }; 1571 1573 1572 1574 static const struct net_protocol udp_protocol = { ··· 1579 1577 }; 1580 1578 1581 1579 static const struct net_offload udp_offload = { 1582 - .gso_send_check = udp4_ufo_send_check, 1583 - .gso_segment = udp4_ufo_fragment, 1580 + .callbacks = { 1581 + .gso_send_check = udp4_ufo_send_check, 1582 + .gso_segment = udp4_ufo_fragment, 1583 + }, 1584 1584 }; 1585 1585 1586 1586 static const struct net_protocol icmp_protocol = { ··· 1671 1667 1672 1668 static struct packet_offload ip_packet_offload __read_mostly = { 1673 1669 .type = cpu_to_be16(ETH_P_IP), 1674 - .gso_send_check = inet_gso_send_check, 1675 - .gso_segment = inet_gso_segment, 1676 - .gro_receive = inet_gro_receive, 1677 - .gro_complete = inet_gro_complete, 1670 + .callbacks = { 1671 + .gso_send_check = inet_gso_send_check, 1672 + .gso_segment = inet_gso_segment, 1673 + .gro_receive = inet_gro_receive, 1674 + .gro_complete = inet_gro_complete, 1675 + }, 1678 1676 }; 1679 1677 1680 1678 static int __init ipv4_offload_init(void)
+15 -13
net/ipv6/ip6_offload.c
··· 70 70 ops = rcu_dereference(inet6_offloads[ 71 71 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); 72 72 73 - if (likely(ops && ops->gso_send_check)) { 73 + if (likely(ops && ops->callbacks.gso_send_check)) { 74 74 skb_reset_transport_header(skb); 75 - err = ops->gso_send_check(skb); 75 + err = ops->callbacks.gso_send_check(skb); 76 76 } 77 77 rcu_read_unlock(); 78 78 ··· 113 113 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 114 114 rcu_read_lock(); 115 115 ops = rcu_dereference(inet6_offloads[proto]); 116 - if (likely(ops && ops->gso_segment)) { 116 + if (likely(ops && ops->callbacks.gso_segment)) { 117 117 skb_reset_transport_header(skb); 118 - segs = ops->gso_segment(skb, features); 118 + segs = ops->callbacks.gso_segment(skb, features); 119 119 } 120 120 rcu_read_unlock(); 121 121 ··· 173 173 rcu_read_lock(); 174 174 proto = iph->nexthdr; 175 175 ops = rcu_dereference(inet6_offloads[proto]); 176 - if (!ops || !ops->gro_receive) { 176 + if (!ops || !ops->callbacks.gro_receive) { 177 177 __pskb_pull(skb, skb_gro_offset(skb)); 178 178 proto = ipv6_gso_pull_exthdrs(skb, proto); 179 179 skb_gro_pull(skb, -skb_transport_offset(skb)); ··· 181 181 __skb_push(skb, skb_gro_offset(skb)); 182 182 183 183 ops = rcu_dereference(inet6_offloads[proto]); 184 - if (!ops || !ops->gro_receive) 184 + if (!ops || !ops->callbacks.gro_receive) 185 185 goto out_unlock; 186 186 187 187 iph = ipv6_hdr(skb); ··· 220 220 csum = skb->csum; 221 221 skb_postpull_rcsum(skb, iph, skb_network_header_len(skb)); 222 222 223 - pp = ops->gro_receive(head, skb); 223 + pp = ops->callbacks.gro_receive(head, skb); 224 224 225 225 skb->csum = csum; 226 226 ··· 244 244 245 245 rcu_read_lock(); 246 246 ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]); 247 - if (WARN_ON(!ops || !ops->gro_complete)) 247 + if (WARN_ON(!ops || !ops->callbacks.gro_complete)) 248 248 goto out_unlock; 249 249 250 - err = ops->gro_complete(skb); 250 + err = ops->callbacks.gro_complete(skb); 251 251 252 252 out_unlock: 253 253 rcu_read_unlock(); ··· 257 257 258 258 static struct packet_offload ipv6_packet_offload __read_mostly = { 259 259 .type = cpu_to_be16(ETH_P_IPV6), 260 - .gso_send_check = ipv6_gso_send_check, 261 - .gso_segment = ipv6_gso_segment, 262 - .gro_receive = ipv6_gro_receive, 263 - .gro_complete = ipv6_gro_complete, 260 + .callbacks = { 261 + .gso_send_check = ipv6_gso_send_check, 262 + .gso_segment = ipv6_gso_segment, 263 + .gro_receive = ipv6_gro_receive, 264 + .gro_complete = ipv6_gro_complete, 265 + }, 264 266 }; 265 267 266 268 static int __init ipv6_offload_init(void)
+6 -4
net/ipv6/tcpv6_offload.c
··· 81 81 } 82 82 83 83 static const struct net_offload tcpv6_offload = { 84 - .gso_send_check = tcp_v6_gso_send_check, 85 - .gso_segment = tcp_tso_segment, 86 - .gro_receive = tcp6_gro_receive, 87 - .gro_complete = tcp6_gro_complete, 84 + .callbacks = { 85 + .gso_send_check = tcp_v6_gso_send_check, 86 + .gso_segment = tcp_tso_segment, 87 + .gro_receive = tcp6_gro_receive, 88 + .gro_complete = tcp6_gro_complete, 89 + }, 88 90 }; 89 91 90 92 int __init tcpv6_offload_init(void)
+4 -2
net/ipv6/udp_offload.c
··· 107 107 return segs; 108 108 } 109 109 static const struct net_offload udpv6_offload = { 110 - .gso_send_check = udp6_ufo_send_check, 111 - .gso_segment = udp6_ufo_fragment, 110 + .callbacks = { 111 + .gso_send_check = udp6_ufo_send_check, 112 + .gso_segment = udp6_ufo_fragment, 113 + }, 112 114 }; 113 115 114 116 int __init udp_offload_init(void)