Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: Explicitly initialize u64_stats_sync structures for lockdep

In order to enable lockdep on seqcount/seqlock structures, we
must explicitly initialize any locks.

The u64_stats_sync structure, uses a seqcount, and thus we need
to introduce a u64_stats_init() function and use it to initialize
the structure.

This unfortunately adds a lot of fairly trivial initialization code
to a number of drivers. But the benefit of ensuring correctness makes
this worth while.

Because these changes are required for lockdep to be enabled, and the
changes are quite trivial, I've not yet split this patch out into 30-some
separate patches, as I figured it would be better to get the various
maintainers thoughts on how to best merge this change along with
the seqcount lockdep enablement.

Feedback would be appreciated!

Signed-off-by: John Stultz <john.stultz@linaro.org>
Acked-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
Cc: James Morris <jmorris@namei.org>
Cc: Jesse Gross <jesse@nicira.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Mirko Lindner <mlindner@marvell.com>
Cc: Patrick McHardy <kaber@trash.net>
Cc: Roger Luethi <rl@hellgate.ch>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Simon Horman <horms@verge.net.au>
Cc: Stephen Hemminger <stephen@networkplumber.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Cc: Wensong Zhang <wensong@linux-vs.org>
Cc: netdev@vger.kernel.org
Link: http://lkml.kernel.org/r/1381186321-4906-2-git-send-email-john.stultz@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

John Stultz and committed by
Ingo Molnar
827da44c 32cf7c3c

+253 -6
+6
drivers/net/dummy.c
··· 88 88 89 89 static int dummy_dev_init(struct net_device *dev) 90 90 { 91 + int i; 91 92 dev->dstats = alloc_percpu(struct pcpu_dstats); 92 93 if (!dev->dstats) 93 94 return -ENOMEM; 94 95 96 + for_each_possible_cpu(i) { 97 + struct pcpu_dstats *dstats; 98 + dstats = per_cpu_ptr(dev->dstats, i); 99 + u64_stats_init(&dstats->syncp); 100 + } 95 101 return 0; 96 102 } 97 103
+4
drivers/net/ethernet/emulex/benet/be_main.c
··· 2047 2047 if (status) 2048 2048 return status; 2049 2049 2050 + u64_stats_init(&txo->stats.sync); 2051 + u64_stats_init(&txo->stats.sync_compl); 2052 + 2050 2053 /* If num_evt_qs is less than num_tx_qs, then more than 2051 2054 * one txq share an eq 2052 2055 */ ··· 2111 2108 if (rc) 2112 2109 return rc; 2113 2110 2111 + u64_stats_init(&rxo->stats.sync); 2114 2112 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q; 2115 2113 rc = be_cmd_cq_create(adapter, cq, eq, false, 3); 2116 2114 if (rc)
+5
drivers/net/ethernet/intel/igb/igb_main.c
··· 1223 1223 ring->count = adapter->tx_ring_count; 1224 1224 ring->queue_index = txr_idx; 1225 1225 1226 + u64_stats_init(&ring->tx_syncp); 1227 + u64_stats_init(&ring->tx_syncp2); 1228 + 1226 1229 /* assign ring to adapter */ 1227 1230 adapter->tx_ring[txr_idx] = ring; 1228 1231 ··· 1258 1255 /* apply Rx specific ring traits */ 1259 1256 ring->count = adapter->rx_ring_count; 1260 1257 ring->queue_index = rxr_idx; 1258 + 1259 + u64_stats_init(&ring->rx_syncp); 1261 1260 1262 1261 /* assign ring to adapter */ 1263 1262 adapter->rx_ring[rxr_idx] = ring;
+4
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 4867 4867 if (!tx_ring->tx_buffer_info) 4868 4868 goto err; 4869 4869 4870 + u64_stats_init(&tx_ring->syncp); 4871 + 4870 4872 /* round up to nearest 4K */ 4871 4873 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 4872 4874 tx_ring->size = ALIGN(tx_ring->size, 4096); ··· 4950 4948 rx_ring->rx_buffer_info = vzalloc(size); 4951 4949 if (!rx_ring->rx_buffer_info) 4952 4950 goto err; 4951 + 4952 + u64_stats_init(&rx_ring->syncp); 4953 4953 4954 4954 /* Round up to nearest 4K */ 4955 4955 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+3
drivers/net/ethernet/marvell/mvneta.c
··· 2792 2792 2793 2793 pp = netdev_priv(dev); 2794 2794 2795 + u64_stats_init(&pp->tx_stats.syncp); 2796 + u64_stats_init(&pp->rx_stats.syncp); 2797 + 2795 2798 pp->weight = MVNETA_RX_POLL_WEIGHT; 2796 2799 pp->phy_node = phy_node; 2797 2800 pp->phy_interface = phy_mode;
+3
drivers/net/ethernet/marvell/sky2.c
··· 4763 4763 sky2->hw = hw; 4764 4764 sky2->msg_enable = netif_msg_init(debug, default_msg); 4765 4765 4766 + u64_stats_init(&sky2->tx_stats.syncp); 4767 + u64_stats_init(&sky2->rx_stats.syncp); 4768 + 4766 4769 /* Auto speed and flow control */ 4767 4770 sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE; 4768 4771 if (hw->chip_id != CHIP_ID_YUKON_XL)
+4
drivers/net/ethernet/neterion/vxge/vxge-main.c
··· 2072 2072 vdev->config.tx_steering_type; 2073 2073 vpath->fifo.ndev = vdev->ndev; 2074 2074 vpath->fifo.pdev = vdev->pdev; 2075 + 2076 + u64_stats_init(&vpath->fifo.stats.syncp); 2077 + u64_stats_init(&vpath->ring.stats.syncp); 2078 + 2075 2079 if (vdev->config.tx_steering_type) 2076 2080 vpath->fifo.txq = 2077 2081 netdev_get_tx_queue(vdev->ndev, i);
+2
drivers/net/ethernet/nvidia/forcedeth.c
··· 5619 5619 spin_lock_init(&np->lock); 5620 5620 spin_lock_init(&np->hwstats_lock); 5621 5621 SET_NETDEV_DEV(dev, &pci_dev->dev); 5622 + u64_stats_init(&np->swstats_rx_syncp); 5623 + u64_stats_init(&np->swstats_tx_syncp); 5622 5624 5623 5625 init_timer(&np->oom_kick); 5624 5626 np->oom_kick.data = (unsigned long) dev;
+3
drivers/net/ethernet/realtek/8139too.c
··· 791 791 792 792 pci_set_master (pdev); 793 793 794 + u64_stats_init(&tp->rx_stats.syncp); 795 + u64_stats_init(&tp->tx_stats.syncp); 796 + 794 797 retry: 795 798 /* PIO bar register comes first. */ 796 799 bar = !use_io;
+2
drivers/net/ethernet/tile/tilepro.c
··· 1008 1008 info->egress_timer.data = (long)info; 1009 1009 info->egress_timer.function = tile_net_handle_egress_timer; 1010 1010 1011 + u64_stats_init(&info->stats.syncp); 1012 + 1011 1013 priv->cpu[my_cpu] = info; 1012 1014 1013 1015 /*
+3
drivers/net/ethernet/via/via-rhine.c
··· 987 987 988 988 rp->base = ioaddr; 989 989 990 + u64_stats_init(&rp->tx_stats.syncp); 991 + u64_stats_init(&rp->rx_stats.syncp); 992 + 990 993 /* Get chip registers into a sane state */ 991 994 rhine_power_init(dev); 992 995 rhine_hw_init(dev, pioaddr);
+5
drivers/net/ifb.c
··· 265 265 static int __init ifb_init_one(int index) 266 266 { 267 267 struct net_device *dev_ifb; 268 + struct ifb_private *dp; 268 269 int err; 269 270 270 271 dev_ifb = alloc_netdev(sizeof(struct ifb_private), ··· 273 272 274 273 if (!dev_ifb) 275 274 return -ENOMEM; 275 + 276 + dp = netdev_priv(dev_ifb); 277 + u64_stats_init(&dp->rsync); 278 + u64_stats_init(&dp->tsync); 276 279 277 280 dev_ifb->rtnl_link_ops = &ifb_link_ops; 278 281 err = register_netdevice(dev_ifb);
+6
drivers/net/loopback.c
··· 137 137 138 138 static int loopback_dev_init(struct net_device *dev) 139 139 { 140 + int i; 140 141 dev->lstats = alloc_percpu(struct pcpu_lstats); 141 142 if (!dev->lstats) 142 143 return -ENOMEM; 143 144 145 + for_each_possible_cpu(i) { 146 + struct pcpu_lstats *lb_stats; 147 + lb_stats = per_cpu_ptr(dev->lstats, i); 148 + u64_stats_init(&lb_stats->syncp); 149 + } 144 150 return 0; 145 151 } 146 152
+7
drivers/net/macvlan.c
··· 501 501 { 502 502 struct macvlan_dev *vlan = netdev_priv(dev); 503 503 const struct net_device *lowerdev = vlan->lowerdev; 504 + int i; 504 505 505 506 dev->state = (dev->state & ~MACVLAN_STATE_MASK) | 506 507 (lowerdev->state & MACVLAN_STATE_MASK); ··· 516 515 vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats); 517 516 if (!vlan->pcpu_stats) 518 517 return -ENOMEM; 518 + 519 + for_each_possible_cpu(i) { 520 + struct macvlan_pcpu_stats *mvlstats; 521 + mvlstats = per_cpu_ptr(vlan->pcpu_stats, i); 522 + u64_stats_init(&mvlstats->syncp); 523 + } 519 524 520 525 return 0; 521 526 }
+8
drivers/net/nlmon.c
··· 47 47 48 48 static int nlmon_dev_init(struct net_device *dev) 49 49 { 50 + int i; 51 + 50 52 dev->lstats = alloc_percpu(struct pcpu_lstats); 53 + 54 + for_each_possible_cpu(i) { 55 + struct pcpu_lstats *nlmstats; 56 + nlmstats = per_cpu_ptr(dev->lstats, i); 57 + u64_stats_init(&nlmstats->syncp); 58 + } 51 59 52 60 return dev->lstats == NULL ? -ENOMEM : 0; 53 61 }
+6
drivers/net/team/team.c
··· 1540 1540 if (!team->pcpu_stats) 1541 1541 return -ENOMEM; 1542 1542 1543 + for_each_possible_cpu(i) { 1544 + struct team_pcpu_stats *team_stats; 1545 + team_stats = per_cpu_ptr(team->pcpu_stats, i); 1546 + u64_stats_init(&team_stats->syncp); 1547 + } 1548 + 1543 1549 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) 1544 1550 INIT_HLIST_HEAD(&team->en_port_hlist[i]); 1545 1551 INIT_LIST_HEAD(&team->port_list);
+8 -1
drivers/net/team/team_mode_loadbalance.c
··· 570 570 { 571 571 struct lb_priv *lb_priv = get_lb_priv(team); 572 572 lb_select_tx_port_func_t *func; 573 - int err; 573 + int i, err; 574 574 575 575 /* set default tx port selector */ 576 576 func = lb_select_tx_port_get_func("hash"); ··· 587 587 err = -ENOMEM; 588 588 goto err_alloc_pcpu_stats; 589 589 } 590 + 591 + for_each_possible_cpu(i) { 592 + struct lb_pcpu_stats *team_lb_stats; 593 + team_lb_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); 594 + u64_stats_init(&team_lb_stats->syncp); 595 + } 596 + 590 597 591 598 INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh); 592 599
+8
drivers/net/veth.c
··· 230 230 231 231 static int veth_dev_init(struct net_device *dev) 232 232 { 233 + int i; 234 + 233 235 dev->vstats = alloc_percpu(struct pcpu_vstats); 234 236 if (!dev->vstats) 235 237 return -ENOMEM; 238 + 239 + for_each_possible_cpu(i) { 240 + struct pcpu_vstats *veth_stats; 241 + veth_stats = per_cpu_ptr(dev->vstats, i); 242 + u64_stats_init(&veth_stats->syncp); 243 + } 236 244 237 245 return 0; 238 246 }
+8
drivers/net/virtio_net.c
··· 1569 1569 if (vi->stats == NULL) 1570 1570 goto free; 1571 1571 1572 + for_each_possible_cpu(i) { 1573 + struct virtnet_stats *virtnet_stats; 1574 + virtnet_stats = per_cpu_ptr(vi->stats, i); 1575 + u64_stats_init(&virtnet_stats->tx_syncp); 1576 + u64_stats_init(&virtnet_stats->rx_syncp); 1577 + } 1578 + 1579 + 1572 1580 vi->vq_index = alloc_percpu(int); 1573 1581 if (vi->vq_index == NULL) 1574 1582 goto free_stats;
+8
drivers/net/vxlan.c
··· 1884 1884 struct vxlan_dev *vxlan = netdev_priv(dev); 1885 1885 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 1886 1886 struct vxlan_sock *vs; 1887 + int i; 1887 1888 1888 1889 dev->tstats = alloc_percpu(struct pcpu_tstats); 1889 1890 if (!dev->tstats) 1890 1891 return -ENOMEM; 1892 + 1893 + for_each_possible_cpu(i) { 1894 + struct pcpu_tstats *vxlan_stats; 1895 + vxlan_stats = per_cpu_ptr(dev->tstats, i); 1896 + u64_stats_init(&vxlan_stats->syncp); 1897 + } 1898 + 1891 1899 1892 1900 spin_lock(&vn->sock_lock); 1893 1901 vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
+6
drivers/net/xen-netfront.c
··· 1338 1338 if (np->stats == NULL) 1339 1339 goto exit; 1340 1340 1341 + for_each_possible_cpu(i) { 1342 + struct netfront_stats *xen_nf_stats; 1343 + xen_nf_stats = per_cpu_ptr(np->stats, i); 1344 + u64_stats_init(&xen_nf_stats->syncp); 1345 + } 1346 + 1341 1347 /* Initialise tx_skbs as a free chain containing every entry. */ 1342 1348 np->tx_skb_freelist = 0; 1343 1349 for (i = 0; i < NET_TX_RING_SIZE; i++) {
+7
include/linux/u64_stats_sync.h
··· 67 67 #endif 68 68 }; 69 69 70 + 71 + #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) 72 + # define u64_stats_init(syncp) seqcount_init(syncp.seq) 73 + #else 74 + # define u64_stats_init(syncp) do { } while (0) 75 + #endif 76 + 70 77 static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) 71 78 { 72 79 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+8 -1
net/8021q/vlan_dev.c
··· 558 558 static int vlan_dev_init(struct net_device *dev) 559 559 { 560 560 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 561 - int subclass = 0; 561 + int subclass = 0, i; 562 562 563 563 netif_carrier_off(dev); 564 564 ··· 611 611 vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats); 612 612 if (!vlan_dev_priv(dev)->vlan_pcpu_stats) 613 613 return -ENOMEM; 614 + 615 + for_each_possible_cpu(i) { 616 + struct vlan_pcpu_stats *vlan_stat; 617 + vlan_stat = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); 618 + u64_stats_init(&vlan_stat->syncp); 619 + } 620 + 614 621 615 622 return 0; 616 623 }
+7
net/bridge/br_device.c
··· 88 88 static int br_dev_init(struct net_device *dev) 89 89 { 90 90 struct net_bridge *br = netdev_priv(dev); 91 + int i; 91 92 92 93 br->stats = alloc_percpu(struct br_cpu_netstats); 93 94 if (!br->stats) 94 95 return -ENOMEM; 96 + 97 + for_each_possible_cpu(i) { 98 + struct br_cpu_netstats *br_dev_stats; 99 + br_dev_stats = per_cpu_ptr(br->stats, i); 100 + u64_stats_init(&br_dev_stats->syncp); 101 + } 95 102 96 103 return 0; 97 104 }
+14
net/ipv4/af_inet.c
··· 1518 1518 ptr[0] = __alloc_percpu(mibsize, align); 1519 1519 if (!ptr[0]) 1520 1520 return -ENOMEM; 1521 + 1521 1522 #if SNMP_ARRAY_SZ == 2 1522 1523 ptr[1] = __alloc_percpu(mibsize, align); 1523 1524 if (!ptr[1]) { ··· 1562 1561 1563 1562 static __net_init int ipv4_mib_init_net(struct net *net) 1564 1563 { 1564 + int i; 1565 + 1565 1566 if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics, 1566 1567 sizeof(struct tcp_mib), 1567 1568 __alignof__(struct tcp_mib)) < 0) ··· 1572 1569 sizeof(struct ipstats_mib), 1573 1570 __alignof__(struct ipstats_mib)) < 0) 1574 1571 goto err_ip_mib; 1572 + 1573 + for_each_possible_cpu(i) { 1574 + struct ipstats_mib *af_inet_stats; 1575 + af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[0], i); 1576 + u64_stats_init(&af_inet_stats->syncp); 1577 + #if SNMP_ARRAY_SZ == 2 1578 + af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[1], i); 1579 + u64_stats_init(&af_inet_stats->syncp); 1580 + #endif 1581 + } 1582 + 1575 1583 if (snmp_mib_init((void __percpu **)net->mib.net_statistics, 1576 1584 sizeof(struct linux_mib), 1577 1585 __alignof__(struct linux_mib)) < 0)
+7 -1
net/ipv4/ip_tunnel.c
··· 976 976 { 977 977 struct ip_tunnel *tunnel = netdev_priv(dev); 978 978 struct iphdr *iph = &tunnel->parms.iph; 979 - int err; 979 + int i, err; 980 980 981 981 dev->destructor = ip_tunnel_dev_free; 982 982 dev->tstats = alloc_percpu(struct pcpu_tstats); 983 983 if (!dev->tstats) 984 984 return -ENOMEM; 985 + 986 + for_each_possible_cpu(i) { 987 + struct pcpu_tstats *ipt_stats; 988 + ipt_stats = per_cpu_ptr(dev->tstats, i); 989 + u64_stats_init(&ipt_stats->syncp); 990 + } 985 991 986 992 err = gro_cells_init(&tunnel->gro_cells, dev); 987 993 if (err) {
+14
net/ipv6/addrconf.c
··· 281 281 282 282 static int snmp6_alloc_dev(struct inet6_dev *idev) 283 283 { 284 + int i; 285 + 284 286 if (snmp_mib_init((void __percpu **)idev->stats.ipv6, 285 287 sizeof(struct ipstats_mib), 286 288 __alignof__(struct ipstats_mib)) < 0) 287 289 goto err_ip; 290 + 291 + for_each_possible_cpu(i) { 292 + struct ipstats_mib *addrconf_stats; 293 + addrconf_stats = per_cpu_ptr(idev->stats.ipv6[0], i); 294 + u64_stats_init(&addrconf_stats->syncp); 295 + #if SNMP_ARRAY_SZ == 2 296 + addrconf_stats = per_cpu_ptr(idev->stats.ipv6[1], i); 297 + u64_stats_init(&addrconf_stats->syncp); 298 + #endif 299 + } 300 + 301 + 288 302 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device), 289 303 GFP_KERNEL); 290 304 if (!idev->stats.icmpv6dev)
+14
net/ipv6/af_inet6.c
··· 719 719 720 720 static int __net_init ipv6_init_mibs(struct net *net) 721 721 { 722 + int i; 723 + 722 724 if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6, 723 725 sizeof(struct udp_mib), 724 726 __alignof__(struct udp_mib)) < 0) ··· 733 731 sizeof(struct ipstats_mib), 734 732 __alignof__(struct ipstats_mib)) < 0) 735 733 goto err_ip_mib; 734 + 735 + for_each_possible_cpu(i) { 736 + struct ipstats_mib *af_inet6_stats; 737 + af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[0], i); 738 + u64_stats_init(&af_inet6_stats->syncp); 739 + #if SNMP_ARRAY_SZ == 2 740 + af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[1], i); 741 + u64_stats_init(&af_inet6_stats->syncp); 742 + #endif 743 + } 744 + 745 + 736 746 if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics, 737 747 sizeof(struct icmpv6_mib), 738 748 __alignof__(struct icmpv6_mib)) < 0)
+15
net/ipv6/ip6_gre.c
··· 1252 1252 static int ip6gre_tunnel_init(struct net_device *dev) 1253 1253 { 1254 1254 struct ip6_tnl *tunnel; 1255 + int i; 1255 1256 1256 1257 tunnel = netdev_priv(dev); 1257 1258 ··· 1269 1268 dev->tstats = alloc_percpu(struct pcpu_tstats); 1270 1269 if (!dev->tstats) 1271 1270 return -ENOMEM; 1271 + 1272 + for_each_possible_cpu(i) { 1273 + struct pcpu_tstats *ip6gre_tunnel_stats; 1274 + ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i); 1275 + u64_stats_init(&ip6gre_tunnel_stats->syncp); 1276 + } 1277 + 1272 1278 1273 1279 return 0; 1274 1280 } ··· 1457 1449 static int ip6gre_tap_init(struct net_device *dev) 1458 1450 { 1459 1451 struct ip6_tnl *tunnel; 1452 + int i; 1460 1453 1461 1454 tunnel = netdev_priv(dev); 1462 1455 ··· 1470 1461 dev->tstats = alloc_percpu(struct pcpu_tstats); 1471 1462 if (!dev->tstats) 1472 1463 return -ENOMEM; 1464 + 1465 + for_each_possible_cpu(i) { 1466 + struct pcpu_tstats *ip6gre_tap_stats; 1467 + ip6gre_tap_stats = per_cpu_ptr(dev->tstats, i); 1468 + u64_stats_init(&ip6gre_tap_stats->syncp); 1469 + } 1473 1470 1474 1471 return 0; 1475 1472 }
+7
net/ipv6/ip6_tunnel.c
··· 1494 1494 ip6_tnl_dev_init_gen(struct net_device *dev) 1495 1495 { 1496 1496 struct ip6_tnl *t = netdev_priv(dev); 1497 + int i; 1497 1498 1498 1499 t->dev = dev; 1499 1500 t->net = dev_net(dev); 1500 1501 dev->tstats = alloc_percpu(struct pcpu_tstats); 1501 1502 if (!dev->tstats) 1502 1503 return -ENOMEM; 1504 + 1505 + for_each_possible_cpu(i) { 1506 + struct pcpu_tstats *ip6_tnl_stats; 1507 + ip6_tnl_stats = per_cpu_ptr(dev->tstats, i); 1508 + u64_stats_init(&ip6_tnl_stats->syncp); 1509 + } 1503 1510 return 0; 1504 1511 } 1505 1512
+15
net/ipv6/sit.c
··· 1310 1310 static int ipip6_tunnel_init(struct net_device *dev) 1311 1311 { 1312 1312 struct ip_tunnel *tunnel = netdev_priv(dev); 1313 + int i; 1313 1314 1314 1315 tunnel->dev = dev; 1315 1316 tunnel->net = dev_net(dev); ··· 1323 1322 if (!dev->tstats) 1324 1323 return -ENOMEM; 1325 1324 1325 + for_each_possible_cpu(i) { 1326 + struct pcpu_tstats *ipip6_tunnel_stats; 1327 + ipip6_tunnel_stats = per_cpu_ptr(dev->tstats, i); 1328 + u64_stats_init(&ipip6_tunnel_stats->syncp); 1329 + } 1330 + 1326 1331 return 0; 1327 1332 } 1328 1333 ··· 1338 1331 struct iphdr *iph = &tunnel->parms.iph; 1339 1332 struct net *net = dev_net(dev); 1340 1333 struct sit_net *sitn = net_generic(net, sit_net_id); 1334 + int i; 1341 1335 1342 1336 tunnel->dev = dev; 1343 1337 tunnel->net = dev_net(dev); ··· 1352 1344 dev->tstats = alloc_percpu(struct pcpu_tstats); 1353 1345 if (!dev->tstats) 1354 1346 return -ENOMEM; 1347 + 1348 + for_each_possible_cpu(i) { 1349 + struct pcpu_tstats *ipip6_fb_stats; 1350 + ipip6_fb_stats = per_cpu_ptr(dev->tstats, i); 1351 + u64_stats_init(&ipip6_fb_stats->syncp); 1352 + } 1353 + 1355 1354 dev_hold(dev); 1356 1355 rcu_assign_pointer(sitn->tunnels_wc[0], tunnel); 1357 1356 return 0;
+22 -3
net/netfilter/ipvs/ip_vs_ctl.c
··· 842 842 struct ip_vs_dest **dest_p) 843 843 { 844 844 struct ip_vs_dest *dest; 845 - unsigned int atype; 845 + unsigned int atype, i; 846 846 847 847 EnterFunction(2); 848 848 ··· 868 868 dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); 869 869 if (!dest->stats.cpustats) 870 870 goto err_alloc; 871 + 872 + for_each_possible_cpu(i) { 873 + struct ip_vs_cpu_stats *ip_vs_dest_stats; 874 + ip_vs_dest_stats = per_cpu_ptr(dest->stats.cpustats, i); 875 + u64_stats_init(&ip_vs_dest_stats->syncp); 876 + } 871 877 872 878 dest->af = svc->af; 873 879 dest->protocol = svc->protocol; ··· 1140 1134 ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, 1141 1135 struct ip_vs_service **svc_p) 1142 1136 { 1143 - int ret = 0; 1137 + int ret = 0, i; 1144 1138 struct ip_vs_scheduler *sched = NULL; 1145 1139 struct ip_vs_pe *pe = NULL; 1146 1140 struct ip_vs_service *svc = NULL; ··· 1189 1183 ret = -ENOMEM; 1190 1184 goto out_err; 1191 1185 } 1186 + 1187 + for_each_possible_cpu(i) { 1188 + struct ip_vs_cpu_stats *ip_vs_stats; 1189 + ip_vs_stats = per_cpu_ptr(svc->stats.cpustats, i); 1190 + u64_stats_init(&ip_vs_stats->syncp); 1191 + } 1192 + 1192 1193 1193 1194 /* I'm the first user of the service */ 1194 1195 atomic_set(&svc->refcnt, 0); ··· 3793 3780 3794 3781 int __net_init ip_vs_control_net_init(struct net *net) 3795 3782 { 3796 - int idx; 3783 + int i, idx; 3797 3784 struct netns_ipvs *ipvs = net_ipvs(net); 3798 3785 3799 3786 /* Initialize rs_table */ ··· 3811 3798 ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); 3812 3799 if (!ipvs->tot_stats.cpustats) 3813 3800 return -ENOMEM; 3801 + 3802 + for_each_possible_cpu(i) { 3803 + struct ip_vs_cpu_stats *ipvs_tot_stats; 3804 + ipvs_tot_stats = per_cpu_ptr(ipvs->tot_stats.cpustats, i); 3805 + u64_stats_init(&ipvs_tot_stats->syncp); 3806 + } 3814 3807 3815 3808 spin_lock_init(&ipvs->tot_stats.lock); 3816 3809
+6
net/openvswitch/datapath.c
··· 1698 1698 goto err_destroy_table; 1699 1699 } 1700 1700 1701 + for_each_possible_cpu(i) { 1702 + struct dp_stats_percpu *dpath_stats; 1703 + dpath_stats = per_cpu_ptr(dp->stats_percpu, i); 1704 + u64_stats_init(&dpath_stats->sync); 1705 + } 1706 + 1701 1707 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head), 1702 1708 GFP_KERNEL); 1703 1709 if (!dp->ports) {
+8
net/openvswitch/vport.c
··· 118 118 { 119 119 struct vport *vport; 120 120 size_t alloc_size; 121 + int i; 121 122 122 123 alloc_size = sizeof(struct vport); 123 124 if (priv_size) { ··· 141 140 kfree(vport); 142 141 return ERR_PTR(-ENOMEM); 143 142 } 143 + 144 + for_each_possible_cpu(i) { 145 + struct pcpu_tstats *vport_stats; 146 + vport_stats = per_cpu_ptr(vport->percpu_stats, i); 147 + u64_stats_init(&vport_stats->syncp); 148 + } 149 + 144 150 145 151 spin_lock_init(&vport->stats_lock); 146 152