Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: unify the pcpu_tstats and br_cpu_netstats as one

They are same, so unify them as one, pcpu_sw_netstats.

Define pcpu_sw_netstat in netdevice.h, remove pcpu_tstats
from if_tunnel and remove br_cpu_netstats from br_private.h

Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Li RongQing <roy.qing.li@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Li RongQing and committed by
David S. Miller
8f84985f 653864d9

+60 -66
+6 -5
drivers/net/vxlan.c
··· 1081 1081 struct iphdr *oip = NULL; 1082 1082 struct ipv6hdr *oip6 = NULL; 1083 1083 struct vxlan_dev *vxlan; 1084 - struct pcpu_tstats *stats; 1084 + struct pcpu_sw_netstats *stats; 1085 1085 union vxlan_addr saddr; 1086 1086 __u32 vni; 1087 1087 int err = 0; ··· 1587 1587 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 1588 1588 struct vxlan_dev *dst_vxlan) 1589 1589 { 1590 - struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 1591 - struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); 1590 + struct pcpu_sw_netstats *tx_stats, *rx_stats; 1592 1591 union vxlan_addr loopback; 1593 1592 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; 1594 1593 1594 + tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 1595 + rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); 1595 1596 skb->pkt_type = PACKET_HOST; 1596 1597 skb->encapsulation = 0; 1597 1598 skb->dev = dst_vxlan->dev; ··· 1898 1897 struct vxlan_sock *vs; 1899 1898 int i; 1900 1899 1901 - dev->tstats = alloc_percpu(struct pcpu_tstats); 1900 + dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 1902 1901 if (!dev->tstats) 1903 1902 return -ENOMEM; 1904 1903 1905 1904 for_each_possible_cpu(i) { 1906 - struct pcpu_tstats *vxlan_stats; 1905 + struct pcpu_sw_netstats *vxlan_stats; 1907 1906 vxlan_stats = per_cpu_ptr(dev->tstats, i); 1908 1907 u64_stats_init(&vxlan_stats->syncp); 1909 1908 }
-9
include/linux/if_tunnel.h
··· 13 13 #define for_each_ip_tunnel_rcu(pos, start) \ 14 14 for (pos = rcu_dereference(start); pos; pos = rcu_dereference(pos->next)) 15 15 16 - /* often modified stats are per cpu, other are shared (netdev->stats) */ 17 - struct pcpu_tstats { 18 - u64 rx_packets; 19 - u64 rx_bytes; 20 - u64 tx_packets; 21 - u64 tx_bytes; 22 - struct u64_stats_sync syncp; 23 - }; 24 - 25 16 #endif /* _IF_TUNNEL_H_ */
+10 -1
include/linux/netdevice.h
··· 1409 1409 union { 1410 1410 void *ml_priv; 1411 1411 struct pcpu_lstats __percpu *lstats; /* loopback stats */ 1412 - struct pcpu_tstats __percpu *tstats; /* tunnel stats */ 1412 + struct pcpu_sw_netstats __percpu *tstats; 1413 1413 struct pcpu_dstats __percpu *dstats; /* dummy stats */ 1414 1414 struct pcpu_vstats __percpu *vstats; /* veth stats */ 1415 1415 }; ··· 1683 1683 __be16 type; /* This is really htons(ether_type). */ 1684 1684 struct offload_callbacks callbacks; 1685 1685 struct list_head list; 1686 + }; 1687 + 1688 + /* often modified stats are per cpu, other are shared (netdev->stats) */ 1689 + struct pcpu_sw_netstats { 1690 + u64 rx_packets; 1691 + u64 rx_bytes; 1692 + u64 tx_packets; 1693 + u64 tx_bytes; 1694 + struct u64_stats_sync syncp; 1686 1695 }; 1687 1696 1688 1697 #include <linux/notifier.h>
+1 -1
include/net/ip6_tunnel.h
··· 79 79 err = ip6_local_out(skb); 80 80 81 81 if (net_xmit_eval(err) == 0) { 82 - struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats); 82 + struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 83 83 u64_stats_update_begin(&tstats->syncp); 84 84 tstats->tx_bytes += pkt_len; 85 85 tstats->tx_packets++;
+2 -2
include/net/ip_tunnels.h
··· 162 162 163 163 static inline void iptunnel_xmit_stats(int err, 164 164 struct net_device_stats *err_stats, 165 - struct pcpu_tstats __percpu *stats) 165 + struct pcpu_sw_netstats __percpu *stats) 166 166 { 167 167 if (err > 0) { 168 - struct pcpu_tstats *tstats = this_cpu_ptr(stats); 168 + struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats); 169 169 170 170 u64_stats_update_begin(&tstats->syncp); 171 171 tstats->tx_bytes += err;
+5 -5
net/bridge/br_device.c
··· 32 32 const unsigned char *dest = skb->data; 33 33 struct net_bridge_fdb_entry *dst; 34 34 struct net_bridge_mdb_entry *mdst; 35 - struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); 35 + struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); 36 36 u16 vid = 0; 37 37 38 38 rcu_read_lock(); ··· 90 90 struct net_bridge *br = netdev_priv(dev); 91 91 int i; 92 92 93 - br->stats = alloc_percpu(struct br_cpu_netstats); 93 + br->stats = alloc_percpu(struct pcpu_sw_netstats); 94 94 if (!br->stats) 95 95 return -ENOMEM; 96 96 97 97 for_each_possible_cpu(i) { 98 - struct br_cpu_netstats *br_dev_stats; 98 + struct pcpu_sw_netstats *br_dev_stats; 99 99 br_dev_stats = per_cpu_ptr(br->stats, i); 100 100 u64_stats_init(&br_dev_stats->syncp); 101 101 } ··· 135 135 struct rtnl_link_stats64 *stats) 136 136 { 137 137 struct net_bridge *br = netdev_priv(dev); 138 - struct br_cpu_netstats tmp, sum = { 0 }; 138 + struct pcpu_sw_netstats tmp, sum = { 0 }; 139 139 unsigned int cpu; 140 140 141 141 for_each_possible_cpu(cpu) { 142 142 unsigned int start; 143 - const struct br_cpu_netstats *bstats 143 + const struct pcpu_sw_netstats *bstats 144 144 = per_cpu_ptr(br->stats, cpu); 145 145 do { 146 146 start = u64_stats_fetch_begin_bh(&bstats->syncp);
+1 -1
net/bridge/br_input.c
··· 28 28 { 29 29 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; 30 30 struct net_bridge *br = netdev_priv(brdev); 31 - struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); 31 + struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); 32 32 33 33 u64_stats_update_begin(&brstats->syncp); 34 34 brstats->rx_packets++;
+1 -9
net/bridge/br_private.h
··· 210 210 rtnl_dereference(dev->rx_handler_data) : NULL; 211 211 } 212 212 213 - struct br_cpu_netstats { 214 - u64 rx_packets; 215 - u64 rx_bytes; 216 - u64 tx_packets; 217 - u64 tx_bytes; 218 - struct u64_stats_sync syncp; 219 - }; 220 - 221 213 struct net_bridge 222 214 { 223 215 spinlock_t lock; 224 216 struct list_head port_list; 225 217 struct net_device *dev; 226 218 227 - struct br_cpu_netstats __percpu *stats; 219 + struct pcpu_sw_netstats __percpu *stats; 228 220 spinlock_t hash_lock; 229 221 struct hlist_head hash[BR_HASH_SIZE]; 230 222 #ifdef CONFIG_BRIDGE_NETFILTER
+5 -4
net/ipv4/ip_tunnel.c
··· 132 132 int i; 133 133 134 134 for_each_possible_cpu(i) { 135 - const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 135 + const struct pcpu_sw_netstats *tstats = 136 + per_cpu_ptr(dev->tstats, i); 136 137 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 137 138 unsigned int start; 138 139 ··· 461 460 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, 462 461 const struct tnl_ptk_info *tpi, bool log_ecn_error) 463 462 { 464 - struct pcpu_tstats *tstats; 463 + struct pcpu_sw_netstats *tstats; 465 464 const struct iphdr *iph = ip_hdr(skb); 466 465 int err; 467 466 ··· 1050 1049 int i, err; 1051 1050 1052 1051 dev->destructor = ip_tunnel_dev_free; 1053 - dev->tstats = alloc_percpu(struct pcpu_tstats); 1052 + dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 1054 1053 if (!dev->tstats) 1055 1054 return -ENOMEM; 1056 1055 1057 1056 for_each_possible_cpu(i) { 1058 - struct pcpu_tstats *ipt_stats; 1057 + struct pcpu_sw_netstats *ipt_stats; 1059 1058 ipt_stats = per_cpu_ptr(dev->tstats, i); 1060 1059 u64_stats_init(&ipt_stats->syncp); 1061 1060 }
+1 -1
net/ipv4/ip_vti.c
··· 60 60 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 61 61 iph->saddr, iph->daddr, 0); 62 62 if (tunnel != NULL) { 63 - struct pcpu_tstats *tstats; 63 + struct pcpu_sw_netstats *tstats; 64 64 u32 oldmark = skb->mark; 65 65 int ret; 66 66
+5 -5
net/ipv6/ip6_gre.c
··· 498 498 &ipv6h->saddr, &ipv6h->daddr, key, 499 499 gre_proto); 500 500 if (tunnel) { 501 - struct pcpu_tstats *tstats; 501 + struct pcpu_sw_netstats *tstats; 502 502 503 503 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 504 504 goto drop; ··· 1265 1265 if (ipv6_addr_any(&tunnel->parms.raddr)) 1266 1266 dev->header_ops = &ip6gre_header_ops; 1267 1267 1268 - dev->tstats = alloc_percpu(struct pcpu_tstats); 1268 + dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 1269 1269 if (!dev->tstats) 1270 1270 return -ENOMEM; 1271 1271 1272 1272 for_each_possible_cpu(i) { 1273 - struct pcpu_tstats *ip6gre_tunnel_stats; 1273 + struct pcpu_sw_netstats *ip6gre_tunnel_stats; 1274 1274 ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i); 1275 1275 u64_stats_init(&ip6gre_tunnel_stats->syncp); 1276 1276 } ··· 1466 1466 1467 1467 ip6gre_tnl_link_config(tunnel, 1); 1468 1468 1469 - dev->tstats = alloc_percpu(struct pcpu_tstats); 1469 + dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 1470 1470 if (!dev->tstats) 1471 1471 return -ENOMEM; 1472 1472 1473 1473 for_each_possible_cpu(i) { 1474 - struct pcpu_tstats *ip6gre_tap_stats; 1474 + struct pcpu_sw_netstats *ip6gre_tap_stats; 1475 1475 ip6gre_tap_stats = per_cpu_ptr(dev->tstats, i); 1476 1476 u64_stats_init(&ip6gre_tap_stats->syncp); 1477 1477 }
+6 -6
net/ipv6/ip6_tunnel.c
··· 29 29 #include <linux/if.h> 30 30 #include <linux/in.h> 31 31 #include <linux/ip.h> 32 - #include <linux/if_tunnel.h> 33 32 #include <linux/net.h> 34 33 #include <linux/in6.h> 35 34 #include <linux/netdevice.h> ··· 101 102 102 103 static struct net_device_stats *ip6_get_stats(struct net_device *dev) 103 104 { 104 - struct pcpu_tstats sum = { 0 }; 105 + struct pcpu_sw_netstats sum = { 0 }; 105 106 int i; 106 107 107 108 for_each_possible_cpu(i) { 108 - const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 109 + const struct pcpu_sw_netstats *tstats = 110 + per_cpu_ptr(dev->tstats, i); 109 111 110 112 sum.rx_packets += tstats->rx_packets; 111 113 sum.rx_bytes += tstats->rx_bytes; ··· 784 784 785 785 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, 786 786 &ipv6h->daddr)) != NULL) { 787 - struct pcpu_tstats *tstats; 787 + struct pcpu_sw_netstats *tstats; 788 788 789 789 if (t->parms.proto != ipproto && t->parms.proto != 0) { 790 790 rcu_read_unlock(); ··· 1497 1497 1498 1498 t->dev = dev; 1499 1499 t->net = dev_net(dev); 1500 - dev->tstats = alloc_percpu(struct pcpu_tstats); 1500 + dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 1501 1501 if (!dev->tstats) 1502 1502 return -ENOMEM; 1503 1503 1504 1504 for_each_possible_cpu(i) { 1505 - struct pcpu_tstats *ip6_tnl_stats; 1505 + struct pcpu_sw_netstats *ip6_tnl_stats; 1506 1506 ip6_tnl_stats = per_cpu_ptr(dev->tstats, i); 1507 1507 u64_stats_init(&ip6_tnl_stats->syncp); 1508 1508 }
+5 -5
net/ipv6/ip6_vti.c
··· 24 24 #include <linux/if.h> 25 25 #include <linux/in.h> 26 26 #include <linux/ip.h> 27 - #include <linux/if_tunnel.h> 28 27 #include <linux/net.h> 29 28 #include <linux/in6.h> 30 29 #include <linux/netdevice.h> ··· 76 77 77 78 static struct net_device_stats *vti6_get_stats(struct net_device *dev) 78 79 { 79 - struct pcpu_tstats sum = { 0 }; 80 + struct pcpu_sw_netstats sum = { 0 }; 80 81 int i; 81 82 82 83 for_each_possible_cpu(i) { 83 - const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 84 + const struct pcpu_sw_netstats *tstats = 85 + per_cpu_ptr(dev->tstats, i); 84 86 85 87 sum.rx_packets += tstats->rx_packets; 86 88 sum.rx_bytes += tstats->rx_bytes; ··· 312 312 313 313 if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, 314 314 &ipv6h->daddr)) != NULL) { 315 - struct pcpu_tstats *tstats; 315 + struct pcpu_sw_netstats *tstats; 316 316 317 317 if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) { 318 318 rcu_read_unlock(); ··· 753 753 754 754 t->dev = dev; 755 755 t->net = dev_net(dev); 756 - dev->tstats = alloc_percpu(struct pcpu_tstats); 756 + dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 757 757 if (!dev->tstats) 758 758 return -ENOMEM; 759 759 return 0;
+5 -5
net/ipv6/sit.c
··· 671 671 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, 672 672 iph->saddr, iph->daddr); 673 673 if (tunnel != NULL) { 674 - struct pcpu_tstats *tstats; 674 + struct pcpu_sw_netstats *tstats; 675 675 676 676 if (tunnel->parms.iph.protocol != IPPROTO_IPV6 && 677 677 tunnel->parms.iph.protocol != 0) ··· 1361 1361 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); 1362 1362 1363 1363 ipip6_tunnel_bind_dev(dev); 1364 - dev->tstats = alloc_percpu(struct pcpu_tstats); 1364 + dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 1365 1365 if (!dev->tstats) 1366 1366 return -ENOMEM; 1367 1367 1368 1368 for_each_possible_cpu(i) { 1369 - struct pcpu_tstats *ipip6_tunnel_stats; 1369 + struct pcpu_sw_netstats *ipip6_tunnel_stats; 1370 1370 ipip6_tunnel_stats = per_cpu_ptr(dev->tstats, i); 1371 1371 u64_stats_init(&ipip6_tunnel_stats->syncp); 1372 1372 } ··· 1391 1391 iph->ihl = 5; 1392 1392 iph->ttl = 64; 1393 1393 1394 - dev->tstats = alloc_percpu(struct pcpu_tstats); 1394 + dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 1395 1395 if (!dev->tstats) 1396 1396 return -ENOMEM; 1397 1397 1398 1398 for_each_possible_cpu(i) { 1399 - struct pcpu_tstats *ipip6_fb_stats; 1399 + struct pcpu_sw_netstats *ipip6_fb_stats; 1400 1400 ipip6_fb_stats = per_cpu_ptr(dev->tstats, i); 1401 1401 u64_stats_init(&ipip6_fb_stats->syncp); 1402 1402 }
+6 -6
net/openvswitch/vport.c
··· 136 136 vport->ops = ops; 137 137 INIT_HLIST_NODE(&vport->dp_hash_node); 138 138 139 - vport->percpu_stats = alloc_percpu(struct pcpu_tstats); 139 + vport->percpu_stats = alloc_percpu(struct pcpu_sw_netstats); 140 140 if (!vport->percpu_stats) { 141 141 kfree(vport); 142 142 return ERR_PTR(-ENOMEM); 143 143 } 144 144 145 145 for_each_possible_cpu(i) { 146 - struct pcpu_tstats *vport_stats; 146 + struct pcpu_sw_netstats *vport_stats; 147 147 vport_stats = per_cpu_ptr(vport->percpu_stats, i); 148 148 u64_stats_init(&vport_stats->syncp); 149 149 } ··· 275 275 spin_unlock_bh(&vport->stats_lock); 276 276 277 277 for_each_possible_cpu(i) { 278 - const struct pcpu_tstats *percpu_stats; 279 - struct pcpu_tstats local_stats; 278 + const struct pcpu_sw_netstats *percpu_stats; 279 + struct pcpu_sw_netstats local_stats; 280 280 unsigned int start; 281 281 282 282 percpu_stats = per_cpu_ptr(vport->percpu_stats, i); ··· 344 344 void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, 345 345 struct ovs_key_ipv4_tunnel *tun_key) 346 346 { 347 - struct pcpu_tstats *stats; 347 + struct pcpu_sw_netstats *stats; 348 348 349 349 stats = this_cpu_ptr(vport->percpu_stats); 350 350 u64_stats_update_begin(&stats->syncp); ··· 370 370 int sent = vport->ops->send(vport, skb); 371 371 372 372 if (likely(sent > 0)) { 373 - struct pcpu_tstats *stats; 373 + struct pcpu_sw_netstats *stats; 374 374 375 375 stats = this_cpu_ptr(vport->percpu_stats); 376 376
+1 -1
net/openvswitch/vport.h
··· 87 87 struct hlist_node dp_hash_node; 88 88 const struct vport_ops *ops; 89 89 90 - struct pcpu_tstats __percpu *percpu_stats; 90 + struct pcpu_sw_netstats __percpu *percpu_stats; 91 91 92 92 spinlock_t stats_lock; 93 93 struct vport_err_stats err_stats;