Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: Replace u64_stats_fetch_begin_bh to u64_stats_fetch_begin_irq

Replace the bh safe variant with the hard irq safe variant.

We need a hard irq safe variant to deal with netpoll transmitting
packets from hard irq context, and we need it in most if not all of
the places using the bh safe variant.

Except on 32bit uni-processor the code is exactly the same so don't
bother with a bh variant, just have a hard irq safe variant that
everyone can use.

Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric W. Biederman and committed by
David S. Miller
57a7744e 85dcce7a

+132 -132
+4 -4
block/blk-cgroup.h
··· 435 435 uint64_t v; 436 436 437 437 do { 438 - start = u64_stats_fetch_begin_bh(&stat->syncp); 438 + start = u64_stats_fetch_begin_irq(&stat->syncp); 439 439 v = stat->cnt; 440 - } while (u64_stats_fetch_retry_bh(&stat->syncp, start)); 440 + } while (u64_stats_fetch_retry_irq(&stat->syncp, start)); 441 441 442 442 return v; 443 443 } ··· 508 508 struct blkg_rwstat tmp; 509 509 510 510 do { 511 - start = u64_stats_fetch_begin_bh(&rwstat->syncp); 511 + start = u64_stats_fetch_begin_irq(&rwstat->syncp); 512 512 tmp = *rwstat; 513 - } while (u64_stats_fetch_retry_bh(&rwstat->syncp, start)); 513 + } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start)); 514 514 515 515 return tmp; 516 516 }
+2 -2
drivers/net/dummy.c
··· 63 63 64 64 dstats = per_cpu_ptr(dev->dstats, i); 65 65 do { 66 - start = u64_stats_fetch_begin_bh(&dstats->syncp); 66 + start = u64_stats_fetch_begin_irq(&dstats->syncp); 67 67 tbytes = dstats->tx_bytes; 68 68 tpackets = dstats->tx_packets; 69 - } while (u64_stats_fetch_retry_bh(&dstats->syncp, start)); 69 + } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); 70 70 stats->tx_bytes += tbytes; 71 71 stats->tx_packets += tpackets; 72 72 }
+4 -4
drivers/net/ethernet/broadcom/b44.c
··· 1685 1685 unsigned int start; 1686 1686 1687 1687 do { 1688 - start = u64_stats_fetch_begin_bh(&hwstat->syncp); 1688 + start = u64_stats_fetch_begin_irq(&hwstat->syncp); 1689 1689 1690 1690 /* Convert HW stats into rtnl_link_stats64 stats. */ 1691 1691 nstat->rx_packets = hwstat->rx_pkts; ··· 1719 1719 /* Carrier lost counter seems to be broken for some devices */ 1720 1720 nstat->tx_carrier_errors = hwstat->tx_carrier_lost; 1721 1721 #endif 1722 - } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start)); 1722 + } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); 1723 1723 1724 1724 return nstat; 1725 1725 } ··· 2073 2073 do { 2074 2074 data_src = &hwstat->tx_good_octets; 2075 2075 data_dst = data; 2076 - start = u64_stats_fetch_begin_bh(&hwstat->syncp); 2076 + start = u64_stats_fetch_begin_irq(&hwstat->syncp); 2077 2077 2078 2078 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) 2079 2079 *data_dst++ = *data_src++; 2080 2080 2081 - } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start)); 2081 + } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); 2082 2082 } 2083 2083 2084 2084 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+6 -6
drivers/net/ethernet/emulex/benet/be_ethtool.c
··· 357 357 struct be_rx_stats *stats = rx_stats(rxo); 358 358 359 359 do { 360 - start = u64_stats_fetch_begin_bh(&stats->sync); 360 + start = u64_stats_fetch_begin_irq(&stats->sync); 361 361 data[base] = stats->rx_bytes; 362 362 data[base + 1] = stats->rx_pkts; 363 - } while (u64_stats_fetch_retry_bh(&stats->sync, start)); 363 + } while (u64_stats_fetch_retry_irq(&stats->sync, start)); 364 364 365 365 for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) { 366 366 p = (u8 *)stats + et_rx_stats[i].offset; ··· 373 373 struct be_tx_stats *stats = tx_stats(txo); 374 374 375 375 do { 376 - start = u64_stats_fetch_begin_bh(&stats->sync_compl); 376 + start = u64_stats_fetch_begin_irq(&stats->sync_compl); 377 377 data[base] = stats->tx_compl; 378 - } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start)); 378 + } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start)); 379 379 380 380 do { 381 - start = u64_stats_fetch_begin_bh(&stats->sync); 381 + start = u64_stats_fetch_begin_irq(&stats->sync); 382 382 for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) { 383 383 p = (u8 *)stats + et_tx_stats[i].offset; 384 384 data[base + i] = 385 385 (et_tx_stats[i].size == sizeof(u64)) ? 386 386 *(u64 *)p : *(u32 *)p; 387 387 } 388 - } while (u64_stats_fetch_retry_bh(&stats->sync, start)); 388 + } while (u64_stats_fetch_retry_irq(&stats->sync, start)); 389 389 base += ETHTOOL_TXSTATS_NUM; 390 390 } 391 391 }
+8 -8
drivers/net/ethernet/emulex/benet/be_main.c
··· 591 591 for_all_rx_queues(adapter, rxo, i) { 592 592 const struct be_rx_stats *rx_stats = rx_stats(rxo); 593 593 do { 594 - start = u64_stats_fetch_begin_bh(&rx_stats->sync); 594 + start = u64_stats_fetch_begin_irq(&rx_stats->sync); 595 595 pkts = rx_stats(rxo)->rx_pkts; 596 596 bytes = rx_stats(rxo)->rx_bytes; 597 - } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start)); 597 + } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start)); 598 598 stats->rx_packets += pkts; 599 599 stats->rx_bytes += bytes; 600 600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts; ··· 605 605 for_all_tx_queues(adapter, txo, i) { 606 606 const struct be_tx_stats *tx_stats = tx_stats(txo); 607 607 do { 608 - start = u64_stats_fetch_begin_bh(&tx_stats->sync); 608 + start = u64_stats_fetch_begin_irq(&tx_stats->sync); 609 609 pkts = tx_stats(txo)->tx_pkts; 610 610 bytes = tx_stats(txo)->tx_bytes; 611 - } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start)); 611 + } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start)); 612 612 stats->tx_packets += pkts; 613 613 stats->tx_bytes += bytes; 614 614 } ··· 1408 1408 1409 1409 rxo = &adapter->rx_obj[eqo->idx]; 1410 1410 do { 1411 - start = u64_stats_fetch_begin_bh(&rxo->stats.sync); 1411 + start = u64_stats_fetch_begin_irq(&rxo->stats.sync); 1412 1412 rx_pkts = rxo->stats.rx_pkts; 1413 - } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start)); 1413 + } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start)); 1414 1414 1415 1415 txo = &adapter->tx_obj[eqo->idx]; 1416 1416 do { 1417 - start = u64_stats_fetch_begin_bh(&txo->stats.sync); 1417 + start = u64_stats_fetch_begin_irq(&txo->stats.sync); 1418 1418 tx_pkts = txo->stats.tx_reqs; 1419 - } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start)); 1419 + } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start)); 1420 1420 1421 1421 1422 1422 /* Skip, if wrapped around or first calculation */
+4 -4
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
··· 653 653 654 654 /* process Tx ring statistics */ 655 655 do { 656 - start = u64_stats_fetch_begin_bh(&tx_ring->syncp); 656 + start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 657 657 data[i] = tx_ring->stats.packets; 658 658 data[i + 1] = tx_ring->stats.bytes; 659 - } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); 659 + } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 660 660 661 661 /* Rx ring is the 2nd half of the queue pair */ 662 662 rx_ring = &tx_ring[1]; 663 663 do { 664 - start = u64_stats_fetch_begin_bh(&rx_ring->syncp); 664 + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 665 665 data[i + 2] = rx_ring->stats.packets; 666 666 data[i + 3] = rx_ring->stats.bytes; 667 - } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); 667 + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 668 668 } 669 669 rcu_read_unlock(); 670 670 if (vsi == pf->vsi[pf->lan_vsi]) {
+8 -8
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 376 376 continue; 377 377 378 378 do { 379 - start = u64_stats_fetch_begin_bh(&tx_ring->syncp); 379 + start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 380 380 packets = tx_ring->stats.packets; 381 381 bytes = tx_ring->stats.bytes; 382 - } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); 382 + } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 383 383 384 384 stats->tx_packets += packets; 385 385 stats->tx_bytes += bytes; 386 386 rx_ring = &tx_ring[1]; 387 387 388 388 do { 389 - start = u64_stats_fetch_begin_bh(&rx_ring->syncp); 389 + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 390 390 packets = rx_ring->stats.packets; 391 391 bytes = rx_ring->stats.bytes; 392 - } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); 392 + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 393 393 394 394 stats->rx_packets += packets; 395 395 stats->rx_bytes += bytes; ··· 770 770 p = ACCESS_ONCE(vsi->tx_rings[q]); 771 771 772 772 do { 773 - start = u64_stats_fetch_begin_bh(&p->syncp); 773 + start = u64_stats_fetch_begin_irq(&p->syncp); 774 774 packets = p->stats.packets; 775 775 bytes = p->stats.bytes; 776 - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 776 + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 777 777 tx_b += bytes; 778 778 tx_p += packets; 779 779 tx_restart += p->tx_stats.restart_queue; ··· 782 782 /* Rx queue is part of the same block as Tx queue */ 783 783 p = &p[1]; 784 784 do { 785 - start = u64_stats_fetch_begin_bh(&p->syncp); 785 + start = u64_stats_fetch_begin_irq(&p->syncp); 786 786 packets = p->stats.packets; 787 787 bytes = p->stats.bytes; 788 - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 788 + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 789 789 rx_b += bytes; 790 790 rx_p += packets; 791 791 rx_buf += p->rx_stats.alloc_buff_failed;
+6 -6
drivers/net/ethernet/intel/igb/igb_ethtool.c
··· 2273 2273 2274 2274 ring = adapter->tx_ring[j]; 2275 2275 do { 2276 - start = u64_stats_fetch_begin_bh(&ring->tx_syncp); 2276 + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); 2277 2277 data[i] = ring->tx_stats.packets; 2278 2278 data[i+1] = ring->tx_stats.bytes; 2279 2279 data[i+2] = ring->tx_stats.restart_queue; 2280 - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); 2280 + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); 2281 2281 do { 2282 - start = u64_stats_fetch_begin_bh(&ring->tx_syncp2); 2282 + start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); 2283 2283 restart2 = ring->tx_stats.restart_queue2; 2284 - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start)); 2284 + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); 2285 2285 data[i+2] += restart2; 2286 2286 2287 2287 i += IGB_TX_QUEUE_STATS_LEN; ··· 2289 2289 for (j = 0; j < adapter->num_rx_queues; j++) { 2290 2290 ring = adapter->rx_ring[j]; 2291 2291 do { 2292 - start = u64_stats_fetch_begin_bh(&ring->rx_syncp); 2292 + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); 2293 2293 data[i] = ring->rx_stats.packets; 2294 2294 data[i+1] = ring->rx_stats.bytes; 2295 2295 data[i+2] = ring->rx_stats.drops; 2296 2296 data[i+3] = ring->rx_stats.csum_err; 2297 2297 data[i+4] = ring->rx_stats.alloc_failed; 2298 - } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); 2298 + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 2299 2299 i += IGB_RX_QUEUE_STATS_LEN; 2300 2300 } 2301 2301 spin_unlock(&adapter->stats64_lock);
+4 -4
drivers/net/ethernet/intel/igb/igb_main.c
··· 5168 5168 } 5169 5169 5170 5170 do { 5171 - start = u64_stats_fetch_begin_bh(&ring->rx_syncp); 5171 + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); 5172 5172 _bytes = ring->rx_stats.bytes; 5173 5173 _packets = ring->rx_stats.packets; 5174 - } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); 5174 + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 5175 5175 bytes += _bytes; 5176 5176 packets += _packets; 5177 5177 } ··· 5184 5184 for (i = 0; i < adapter->num_tx_queues; i++) { 5185 5185 struct igb_ring *ring = adapter->tx_ring[i]; 5186 5186 do { 5187 - start = u64_stats_fetch_begin_bh(&ring->tx_syncp); 5187 + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); 5188 5188 _bytes = ring->tx_stats.bytes; 5189 5189 _packets = ring->tx_stats.packets; 5190 - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); 5190 + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); 5191 5191 bytes += _bytes; 5192 5192 packets += _packets; 5193 5193 }
+4 -4
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
··· 1128 1128 } 1129 1129 1130 1130 do { 1131 - start = u64_stats_fetch_begin_bh(&ring->syncp); 1131 + start = u64_stats_fetch_begin_irq(&ring->syncp); 1132 1132 data[i] = ring->stats.packets; 1133 1133 data[i+1] = ring->stats.bytes; 1134 - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1134 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1135 1135 i += 2; 1136 1136 #ifdef BP_EXTENDED_STATS 1137 1137 data[i] = ring->stats.yields; ··· 1156 1156 } 1157 1157 1158 1158 do { 1159 - start = u64_stats_fetch_begin_bh(&ring->syncp); 1159 + start = u64_stats_fetch_begin_irq(&ring->syncp); 1160 1160 data[i] = ring->stats.packets; 1161 1161 data[i+1] = ring->stats.bytes; 1162 - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1162 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 1163 1163 i += 2; 1164 1164 #ifdef BP_EXTENDED_STATS 1165 1165 data[i] = ring->stats.yields;
+4 -4
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 7293 7293 7294 7294 if (ring) { 7295 7295 do { 7296 - start = u64_stats_fetch_begin_bh(&ring->syncp); 7296 + start = u64_stats_fetch_begin_irq(&ring->syncp); 7297 7297 packets = ring->stats.packets; 7298 7298 bytes = ring->stats.bytes; 7299 - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 7299 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 7300 7300 stats->rx_packets += packets; 7301 7301 stats->rx_bytes += bytes; 7302 7302 } ··· 7309 7309 7310 7310 if (ring) { 7311 7311 do { 7312 - start = u64_stats_fetch_begin_bh(&ring->syncp); 7312 + start = u64_stats_fetch_begin_irq(&ring->syncp); 7313 7313 packets = ring->stats.packets; 7314 7314 bytes = ring->stats.bytes; 7315 - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 7315 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 7316 7316 stats->tx_packets += packets; 7317 7317 stats->tx_bytes += bytes; 7318 7318 }
+4 -4
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 3337 3337 for (i = 0; i < adapter->num_rx_queues; i++) { 3338 3338 ring = adapter->rx_ring[i]; 3339 3339 do { 3340 - start = u64_stats_fetch_begin_bh(&ring->syncp); 3340 + start = u64_stats_fetch_begin_irq(&ring->syncp); 3341 3341 bytes = ring->stats.bytes; 3342 3342 packets = ring->stats.packets; 3343 - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3343 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 3344 3344 stats->rx_bytes += bytes; 3345 3345 stats->rx_packets += packets; 3346 3346 } ··· 3348 3348 for (i = 0; i < adapter->num_tx_queues; i++) { 3349 3349 ring = adapter->tx_ring[i]; 3350 3350 do { 3351 - start = u64_stats_fetch_begin_bh(&ring->syncp); 3351 + start = u64_stats_fetch_begin_irq(&ring->syncp); 3352 3352 bytes = ring->stats.bytes; 3353 3353 packets = ring->stats.packets; 3354 - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3354 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 3355 3355 stats->tx_bytes += bytes; 3356 3356 stats->tx_packets += packets; 3357 3357 }
+2 -2
drivers/net/ethernet/marvell/mvneta.c
··· 508 508 509 509 cpu_stats = per_cpu_ptr(pp->stats, cpu); 510 510 do { 511 - start = u64_stats_fetch_begin_bh(&cpu_stats->syncp); 511 + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 512 512 rx_packets = cpu_stats->rx_packets; 513 513 rx_bytes = cpu_stats->rx_bytes; 514 514 tx_packets = cpu_stats->tx_packets; 515 515 tx_bytes = cpu_stats->tx_bytes; 516 - } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start)); 516 + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 517 517 518 518 stats->rx_packets += rx_packets; 519 519 stats->rx_bytes += rx_bytes;
+4 -4
drivers/net/ethernet/marvell/sky2.c
··· 3908 3908 u64 _bytes, _packets; 3909 3909 3910 3910 do { 3911 - start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp); 3911 + start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp); 3912 3912 _bytes = sky2->rx_stats.bytes; 3913 3913 _packets = sky2->rx_stats.packets; 3914 - } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start)); 3914 + } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start)); 3915 3915 3916 3916 stats->rx_packets = _packets; 3917 3917 stats->rx_bytes = _bytes; 3918 3918 3919 3919 do { 3920 - start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp); 3920 + start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp); 3921 3921 _bytes = sky2->tx_stats.bytes; 3922 3922 _packets = sky2->tx_stats.packets; 3923 - } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start)); 3923 + } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start)); 3924 3924 3925 3925 stats->tx_packets = _packets; 3926 3926 stats->tx_bytes = _bytes;
+4 -4
drivers/net/ethernet/neterion/vxge/vxge-main.c
··· 3134 3134 u64 packets, bytes, multicast; 3135 3135 3136 3136 do { 3137 - start = u64_stats_fetch_begin_bh(&rxstats->syncp); 3137 + start = u64_stats_fetch_begin_irq(&rxstats->syncp); 3138 3138 3139 3139 packets = rxstats->rx_frms; 3140 3140 multicast = rxstats->rx_mcast; 3141 3141 bytes = rxstats->rx_bytes; 3142 - } while (u64_stats_fetch_retry_bh(&rxstats->syncp, start)); 3142 + } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); 3143 3143 3144 3144 net_stats->rx_packets += packets; 3145 3145 net_stats->rx_bytes += bytes; ··· 3149 3149 net_stats->rx_dropped += rxstats->rx_dropped; 3150 3150 3151 3151 do { 3152 - start = u64_stats_fetch_begin_bh(&txstats->syncp); 3152 + start = u64_stats_fetch_begin_irq(&txstats->syncp); 3153 3153 3154 3154 packets = txstats->tx_frms; 3155 3155 bytes = txstats->tx_bytes; 3156 - } while (u64_stats_fetch_retry_bh(&txstats->syncp, start)); 3156 + } while (u64_stats_fetch_retry_irq(&txstats->syncp, start)); 3157 3157 3158 3158 net_stats->tx_packets += packets; 3159 3159 net_stats->tx_bytes += bytes;
+4 -4
drivers/net/ethernet/nvidia/forcedeth.c
··· 1753 1753 1754 1754 /* software stats */ 1755 1755 do { 1756 - syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp); 1756 + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); 1757 1757 storage->rx_packets = np->stat_rx_packets; 1758 1758 storage->rx_bytes = np->stat_rx_bytes; 1759 1759 storage->rx_dropped = np->stat_rx_dropped; 1760 1760 storage->rx_missed_errors = np->stat_rx_missed_errors; 1761 - } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start)); 1761 + } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); 1762 1762 1763 1763 do { 1764 - syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp); 1764 + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); 1765 1765 storage->tx_packets = np->stat_tx_packets; 1766 1766 storage->tx_bytes = np->stat_tx_bytes; 1767 1767 storage->tx_dropped = np->stat_tx_dropped; 1768 - } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start)); 1768 + } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); 1769 1769 1770 1770 /* If the nic supports hw counters then retrieve latest values */ 1771 1771 if (np->driver_data & DEV_HAS_STATISTICS_V123) {
+4 -4
drivers/net/ethernet/realtek/8139too.c
··· 2522 2522 netdev_stats_to_stats64(stats, &dev->stats); 2523 2523 2524 2524 do { 2525 - start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); 2525 + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); 2526 2526 stats->rx_packets = tp->rx_stats.packets; 2527 2527 stats->rx_bytes = tp->rx_stats.bytes; 2528 - } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); 2528 + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); 2529 2529 2530 2530 do { 2531 - start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); 2531 + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); 2532 2532 stats->tx_packets = tp->tx_stats.packets; 2533 2533 stats->tx_bytes = tp->tx_stats.bytes; 2534 - } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); 2534 + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); 2535 2535 2536 2536 return stats; 2537 2537 }
+4 -4
drivers/net/ethernet/realtek/r8169.c
··· 6590 6590 rtl8169_rx_missed(dev, ioaddr); 6591 6591 6592 6592 do { 6593 - start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); 6593 + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); 6594 6594 stats->rx_packets = tp->rx_stats.packets; 6595 6595 stats->rx_bytes = tp->rx_stats.bytes; 6596 - } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); 6596 + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); 6597 6597 6598 6598 6599 6599 do { 6600 - start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); 6600 + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); 6601 6601 stats->tx_packets = tp->tx_stats.packets; 6602 6602 stats->tx_bytes = tp->tx_stats.bytes; 6603 - } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); 6603 + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); 6604 6604 6605 6605 stats->rx_dropped = dev->stats.rx_dropped; 6606 6606 stats->tx_dropped = dev->stats.tx_dropped;
+2 -2
drivers/net/ethernet/tile/tilepro.c
··· 2068 2068 cpu_stats = &priv->cpu[i]->stats; 2069 2069 2070 2070 do { 2071 - start = u64_stats_fetch_begin_bh(&cpu_stats->syncp); 2071 + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 2072 2072 trx_packets = cpu_stats->rx_packets; 2073 2073 ttx_packets = cpu_stats->tx_packets; 2074 2074 trx_bytes = cpu_stats->rx_bytes; 2075 2075 ttx_bytes = cpu_stats->tx_bytes; 2076 2076 trx_errors = cpu_stats->rx_errors; 2077 2077 trx_dropped = cpu_stats->rx_dropped; 2078 - } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start)); 2078 + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 2079 2079 2080 2080 rx_packets += trx_packets; 2081 2081 tx_packets += ttx_packets;
+4 -4
drivers/net/ethernet/via/via-rhine.c
··· 2070 2070 netdev_stats_to_stats64(stats, &dev->stats); 2071 2071 2072 2072 do { 2073 - start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp); 2073 + start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); 2074 2074 stats->rx_packets = rp->rx_stats.packets; 2075 2075 stats->rx_bytes = rp->rx_stats.bytes; 2076 - } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start)); 2076 + } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); 2077 2077 2078 2078 do { 2079 - start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp); 2079 + start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); 2080 2080 stats->tx_packets = rp->tx_stats.packets; 2081 2081 stats->tx_bytes = rp->tx_stats.bytes; 2082 - } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start)); 2082 + } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); 2083 2083 2084 2084 return stats; 2085 2085 }
+4 -4
drivers/net/ifb.c
··· 136 136 unsigned int start; 137 137 138 138 do { 139 - start = u64_stats_fetch_begin_bh(&dp->rsync); 139 + start = u64_stats_fetch_begin_irq(&dp->rsync); 140 140 stats->rx_packets = dp->rx_packets; 141 141 stats->rx_bytes = dp->rx_bytes; 142 - } while (u64_stats_fetch_retry_bh(&dp->rsync, start)); 142 + } while (u64_stats_fetch_retry_irq(&dp->rsync, start)); 143 143 144 144 do { 145 - start = u64_stats_fetch_begin_bh(&dp->tsync); 145 + start = u64_stats_fetch_begin_irq(&dp->tsync); 146 146 147 147 stats->tx_packets = dp->tx_packets; 148 148 stats->tx_bytes = dp->tx_bytes; 149 149 150 - } while (u64_stats_fetch_retry_bh(&dp->tsync, start)); 150 + } while (u64_stats_fetch_retry_irq(&dp->tsync, start)); 151 151 152 152 stats->rx_dropped = dev->stats.rx_dropped; 153 153 stats->tx_dropped = dev->stats.tx_dropped;
+2 -2
drivers/net/loopback.c
··· 111 111 112 112 lb_stats = per_cpu_ptr(dev->lstats, i); 113 113 do { 114 - start = u64_stats_fetch_begin_bh(&lb_stats->syncp); 114 + start = u64_stats_fetch_begin_irq(&lb_stats->syncp); 115 115 tbytes = lb_stats->bytes; 116 116 tpackets = lb_stats->packets; 117 - } while (u64_stats_fetch_retry_bh(&lb_stats->syncp, start)); 117 + } while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start)); 118 118 bytes += tbytes; 119 119 packets += tpackets; 120 120 }
+2 -2
drivers/net/macvlan.c
··· 582 582 for_each_possible_cpu(i) { 583 583 p = per_cpu_ptr(vlan->pcpu_stats, i); 584 584 do { 585 - start = u64_stats_fetch_begin_bh(&p->syncp); 585 + start = u64_stats_fetch_begin_irq(&p->syncp); 586 586 rx_packets = p->rx_packets; 587 587 rx_bytes = p->rx_bytes; 588 588 rx_multicast = p->rx_multicast; 589 589 tx_packets = p->tx_packets; 590 590 tx_bytes = p->tx_bytes; 591 - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 591 + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 592 592 593 593 stats->rx_packets += rx_packets; 594 594 stats->rx_bytes += rx_bytes;
+2 -2
drivers/net/nlmon.c
··· 90 90 nl_stats = per_cpu_ptr(dev->lstats, i); 91 91 92 92 do { 93 - start = u64_stats_fetch_begin_bh(&nl_stats->syncp); 93 + start = u64_stats_fetch_begin_irq(&nl_stats->syncp); 94 94 tbytes = nl_stats->bytes; 95 95 tpackets = nl_stats->packets; 96 - } while (u64_stats_fetch_retry_bh(&nl_stats->syncp, start)); 96 + } while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start)); 97 97 98 98 packets += tpackets; 99 99 bytes += tbytes;
+2 -2
drivers/net/team/team.c
··· 1761 1761 for_each_possible_cpu(i) { 1762 1762 p = per_cpu_ptr(team->pcpu_stats, i); 1763 1763 do { 1764 - start = u64_stats_fetch_begin_bh(&p->syncp); 1764 + start = u64_stats_fetch_begin_irq(&p->syncp); 1765 1765 rx_packets = p->rx_packets; 1766 1766 rx_bytes = p->rx_bytes; 1767 1767 rx_multicast = p->rx_multicast; 1768 1768 tx_packets = p->tx_packets; 1769 1769 tx_bytes = p->tx_bytes; 1770 - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 1770 + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 1771 1771 1772 1772 stats->rx_packets += rx_packets; 1773 1773 stats->rx_bytes += rx_bytes;
+2 -2
drivers/net/team/team_mode_loadbalance.c
··· 432 432 struct lb_stats tmp; 433 433 434 434 do { 435 - start = u64_stats_fetch_begin_bh(syncp); 435 + start = u64_stats_fetch_begin_irq(syncp); 436 436 tmp.tx_bytes = cpu_stats->tx_bytes; 437 - } while (u64_stats_fetch_retry_bh(syncp, start)); 437 + } while (u64_stats_fetch_retry_irq(syncp, start)); 438 438 acc_stats->tx_bytes += tmp.tx_bytes; 439 439 } 440 440
+2 -2
drivers/net/veth.c
··· 156 156 unsigned int start; 157 157 158 158 do { 159 - start = u64_stats_fetch_begin_bh(&stats->syncp); 159 + start = u64_stats_fetch_begin_irq(&stats->syncp); 160 160 packets = stats->packets; 161 161 bytes = stats->bytes; 162 - } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); 162 + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 163 163 result->packets += packets; 164 164 result->bytes += bytes; 165 165 }
+4 -4
drivers/net/virtio_net.c
··· 1000 1000 u64 tpackets, tbytes, rpackets, rbytes; 1001 1001 1002 1002 do { 1003 - start = u64_stats_fetch_begin_bh(&stats->tx_syncp); 1003 + start = u64_stats_fetch_begin_irq(&stats->tx_syncp); 1004 1004 tpackets = stats->tx_packets; 1005 1005 tbytes = stats->tx_bytes; 1006 - } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); 1006 + } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); 1007 1007 1008 1008 do { 1009 - start = u64_stats_fetch_begin_bh(&stats->rx_syncp); 1009 + start = u64_stats_fetch_begin_irq(&stats->rx_syncp); 1010 1010 rpackets = stats->rx_packets; 1011 1011 rbytes = stats->rx_bytes; 1012 - } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); 1012 + } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); 1013 1013 1014 1014 tot->rx_packets += rpackets; 1015 1015 tot->tx_packets += tpackets;
+2 -2
drivers/net/xen-netfront.c
··· 1060 1060 unsigned int start; 1061 1061 1062 1062 do { 1063 - start = u64_stats_fetch_begin_bh(&stats->syncp); 1063 + start = u64_stats_fetch_begin_irq(&stats->syncp); 1064 1064 1065 1065 rx_packets = stats->rx_packets; 1066 1066 tx_packets = stats->tx_packets; 1067 1067 rx_bytes = stats->rx_bytes; 1068 1068 tx_bytes = stats->tx_bytes; 1069 - } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); 1069 + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1070 1070 1071 1071 tot->rx_packets += rx_packets; 1072 1072 tot->tx_packets += tx_packets;
+8 -8
include/linux/u64_stats_sync.h
··· 27 27 * (On UP, there is no seqcount_t protection, a reader allowing interrupts could 28 28 * read partial values) 29 29 * 30 - * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and 31 - * u64_stats_fetch_retry_bh() helpers 30 + * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and 31 + * u64_stats_fetch_retry_irq() helpers 32 32 * 33 33 * Usage : 34 34 * ··· 114 114 } 115 115 116 116 /* 117 - * In case softirq handlers can update u64 counters, readers can use following helpers 117 + * In case irq handlers can update u64 counters, readers can use following helpers 118 118 * - SMP 32bit arches use seqcount protection, irq safe. 119 - * - UP 32bit must disable BH. 119 + * - UP 32bit must disable irqs. 120 120 * - 64bit have no problem atomically reading u64 values, irq safe. 121 121 */ 122 - static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) 122 + static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) 123 123 { 124 124 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 125 125 return read_seqcount_begin(&syncp->seq); 126 126 #else 127 127 #if BITS_PER_LONG==32 128 - local_bh_disable(); 128 + local_irq_disable(); 129 129 #endif 130 130 return 0; 131 131 #endif 132 132 } 133 133 134 - static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, 134 + static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, 135 135 unsigned int start) 136 136 { 137 137 #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 138 138 return read_seqcount_retry(&syncp->seq, start); 139 139 #else 140 140 #if BITS_PER_LONG==32 141 - local_bh_enable(); 141 + local_irq_enable(); 142 142 #endif 143 143 return false; 144 144 #endif
+2 -2
net/8021q/vlan_dev.c
··· 678 678 679 679 p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); 680 680 do { 681 - start = u64_stats_fetch_begin_bh(&p->syncp); 681 + start = u64_stats_fetch_begin_irq(&p->syncp); 682 682 rxpackets = p->rx_packets; 683 683 rxbytes = p->rx_bytes; 684 684 rxmulticast = p->rx_multicast; 685 685 txpackets = p->tx_packets; 686 686 txbytes = p->tx_bytes; 687 - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 687 + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); 688 688 689 689 stats->rx_packets += rxpackets; 690 690 stats->rx_bytes += rxbytes;
+2 -2
net/bridge/br_device.c
··· 136 136 const struct pcpu_sw_netstats *bstats 137 137 = per_cpu_ptr(br->stats, cpu); 138 138 do { 139 - start = u64_stats_fetch_begin_bh(&bstats->syncp); 139 + start = u64_stats_fetch_begin_irq(&bstats->syncp); 140 140 memcpy(&tmp, bstats, sizeof(tmp)); 141 - } while (u64_stats_fetch_retry_bh(&bstats->syncp, start)); 141 + } while (u64_stats_fetch_retry_irq(&bstats->syncp, start)); 142 142 sum.tx_bytes += tmp.tx_bytes; 143 143 sum.tx_packets += tmp.tx_packets; 144 144 sum.rx_bytes += tmp.rx_bytes;
+2 -2
net/ipv4/af_inet.c
··· 1505 1505 bhptr = per_cpu_ptr(mib[0], cpu); 1506 1506 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset); 1507 1507 do { 1508 - start = u64_stats_fetch_begin_bh(syncp); 1508 + start = u64_stats_fetch_begin_irq(syncp); 1509 1509 v = *(((u64 *) bhptr) + offt); 1510 - } while (u64_stats_fetch_retry_bh(syncp, start)); 1510 + } while (u64_stats_fetch_retry_irq(syncp, start)); 1511 1511 1512 1512 res += v; 1513 1513 }
+2 -2
net/ipv4/ip_tunnel_core.c
··· 161 161 unsigned int start; 162 162 163 163 do { 164 - start = u64_stats_fetch_begin_bh(&tstats->syncp); 164 + start = u64_stats_fetch_begin_irq(&tstats->syncp); 165 165 rx_packets = tstats->rx_packets; 166 166 tx_packets = tstats->tx_packets; 167 167 rx_bytes = tstats->rx_bytes; 168 168 tx_bytes = tstats->tx_bytes; 169 - } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); 169 + } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); 170 170 171 171 tot->rx_packets += rx_packets; 172 172 tot->tx_packets += tx_packets;
+2 -2
net/ipv6/ip6_tunnel.c
··· 108 108 per_cpu_ptr(dev->tstats, i); 109 109 110 110 do { 111 - start = u64_stats_fetch_begin_bh(&tstats->syncp); 111 + start = u64_stats_fetch_begin_irq(&tstats->syncp); 112 112 tmp.rx_packets = tstats->rx_packets; 113 113 tmp.rx_bytes = tstats->rx_bytes; 114 114 tmp.tx_packets = tstats->tx_packets; 115 115 tmp.tx_bytes = tstats->tx_bytes; 116 - } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); 116 + } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); 117 117 118 118 sum.rx_packets += tmp.rx_packets; 119 119 sum.rx_bytes += tmp.rx_bytes;
+2 -2
net/netfilter/ipvs/ip_vs_ctl.c
··· 2177 2177 __u64 inbytes, outbytes; 2178 2178 2179 2179 do { 2180 - start = u64_stats_fetch_begin_bh(&u->syncp); 2180 + start = u64_stats_fetch_begin_irq(&u->syncp); 2181 2181 inbytes = u->ustats.inbytes; 2182 2182 outbytes = u->ustats.outbytes; 2183 - } while (u64_stats_fetch_retry_bh(&u->syncp, start)); 2183 + } while (u64_stats_fetch_retry_irq(&u->syncp, start)); 2184 2184 2185 2185 seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n", 2186 2186 i, u->ustats.conns, u->ustats.inpkts,
+2 -2
net/openvswitch/datapath.c
··· 606 606 percpu_stats = per_cpu_ptr(dp->stats_percpu, i); 607 607 608 608 do { 609 - start = u64_stats_fetch_begin_bh(&percpu_stats->syncp); 609 + start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); 610 610 local_stats = *percpu_stats; 611 - } while (u64_stats_fetch_retry_bh(&percpu_stats->syncp, start)); 611 + } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); 612 612 613 613 stats->n_hit += local_stats.n_hit; 614 614 stats->n_missed += local_stats.n_missed;
+2 -2
net/openvswitch/vport.c
··· 277 277 percpu_stats = per_cpu_ptr(vport->percpu_stats, i); 278 278 279 279 do { 280 - start = u64_stats_fetch_begin_bh(&percpu_stats->syncp); 280 + start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); 281 281 local_stats = *percpu_stats; 282 - } while (u64_stats_fetch_retry_bh(&percpu_stats->syncp, start)); 282 + } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); 283 283 284 284 stats->rx_bytes += local_stats.rx_bytes; 285 285 stats->rx_packets += local_stats.rx_packets;