Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

openvswitch: Remove vport stats.

Since all vport types are now backed by netdev, we can directly
use netdev stats. Following patch removes redundant stat
from vport.

Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Pravin B Shelar and committed by
David S. Miller
8c876639 3eedb41f

+56 -170
+25 -32
net/openvswitch/vport-internal_dev.c
··· 43 43 return netdev_priv(netdev); 44 44 } 45 45 46 - /* This function is only called by the kernel network layer.*/ 47 - static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev, 48 - struct rtnl_link_stats64 *stats) 49 - { 50 - struct vport *vport = ovs_internal_dev_get_vport(netdev); 51 - struct ovs_vport_stats vport_stats; 52 - 53 - ovs_vport_get_stats(vport, &vport_stats); 54 - 55 - /* The tx and rx stats need to be swapped because the 56 - * switch and host OS have opposite perspectives. */ 57 - stats->rx_packets = vport_stats.tx_packets; 58 - stats->tx_packets = vport_stats.rx_packets; 59 - stats->rx_bytes = vport_stats.tx_bytes; 60 - stats->tx_bytes = vport_stats.rx_bytes; 61 - stats->rx_errors = vport_stats.tx_errors; 62 - stats->tx_errors = vport_stats.rx_errors; 63 - stats->rx_dropped = vport_stats.tx_dropped; 64 - stats->tx_dropped = vport_stats.rx_dropped; 65 - 66 - return stats; 67 - } 68 - 69 46 /* Called with rcu_read_lock_bh. */ 70 47 static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) 71 48 { 49 + int len, err; 50 + 51 + len = skb->len; 72 52 rcu_read_lock(); 73 - ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL); 53 + err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL); 74 54 rcu_read_unlock(); 55 + 56 + if (likely(!err)) { 57 + struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev->tstats); 58 + 59 + u64_stats_update_begin(&tstats->syncp); 60 + tstats->tx_bytes += len; 61 + tstats->tx_packets++; 62 + u64_stats_update_end(&tstats->syncp); 63 + } else { 64 + netdev->stats.tx_errors++; 65 + } 75 66 return 0; 76 67 } 77 68 ··· 112 121 .ndo_start_xmit = internal_dev_xmit, 113 122 .ndo_set_mac_address = eth_mac_addr, 114 123 .ndo_change_mtu = internal_dev_change_mtu, 115 - .ndo_get_stats64 = internal_dev_get_stats, 116 124 }; 117 125 118 126 static struct rtnl_link_ops internal_dev_link_ops __read_mostly = { ··· 202 212 rtnl_unlock(); 203 213 } 204 214 205 - static int internal_dev_recv(struct vport *vport, struct sk_buff *skb) 215 + static void internal_dev_recv(struct vport *vport, struct sk_buff *skb) 206 216 { 207 217 struct net_device *netdev = vport->dev; 208 - int len; 218 + struct pcpu_sw_netstats *stats; 209 219 210 220 if (unlikely(!(netdev->flags & IFF_UP))) { 211 221 kfree_skb(skb); 212 - return 0; 222 + netdev->stats.rx_dropped++; 223 + return; 213 224 } 214 - 215 - len = skb->len; 216 225 217 226 skb_dst_drop(skb); 218 227 nf_reset(skb); ··· 222 233 skb->protocol = eth_type_trans(skb, netdev); 223 234 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 224 235 225 - netif_rx(skb); 236 + stats = this_cpu_ptr(netdev->tstats); 237 + u64_stats_update_begin(&stats->syncp); 238 + stats->rx_packets++; 239 + stats->rx_bytes += skb->len; 240 + u64_stats_update_end(&stats->syncp); 226 241 227 - return len; 242 + netif_rx(skb); 228 243 } 229 244 230 245 static struct vport_ops ovs_internal_vport_ops = {
+8 -14
net/openvswitch/vport-netdev.c
··· 39 39 static struct vport_ops ovs_netdev_vport_ops; 40 40 41 41 /* Must be called with rcu_read_lock. */ 42 - static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) 42 + static void netdev_port_receive(struct sk_buff *skb) 43 43 { 44 + struct vport *vport; 45 + 46 + vport = ovs_netdev_get_vport(skb->dev); 44 47 if (unlikely(!vport)) 45 48 goto error; 46 49 ··· 59 56 60 57 skb_push(skb, ETH_HLEN); 61 58 ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN); 62 - 63 59 ovs_vport_receive(vport, skb, skb_tunnel_info(skb)); 64 60 return; 65 - 66 61 error: 67 62 kfree_skb(skb); 68 63 } ··· 69 68 static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb) 70 69 { 71 70 struct sk_buff *skb = *pskb; 72 - struct vport *vport; 73 71 74 72 if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) 75 73 return RX_HANDLER_PASS; 76 74 77 - vport = ovs_netdev_get_vport(skb->dev); 78 - 79 - netdev_port_receive(vport, skb); 80 - 75 + netdev_port_receive(skb); 81 76 return RX_HANDLER_CONSUMED; 82 77 } 83 78 ··· 200 203 return length; 201 204 } 202 205 203 - int ovs_netdev_send(struct vport *vport, struct sk_buff *skb) 206 + void ovs_netdev_send(struct vport *vport, struct sk_buff *skb) 204 207 { 205 208 int mtu = vport->dev->mtu; 206 - int len; 207 209 208 210 if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { 209 211 net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n", 210 212 vport->dev->name, 211 213 packet_length(skb), mtu); 214 + vport->dev->stats.tx_errors++; 212 215 goto drop; 213 216 } 214 217 215 218 skb->dev = vport->dev; 216 - len = skb->len; 217 219 dev_queue_xmit(skb); 218 - 219 - return len; 220 + return; 220 221 221 222 drop: 222 223 kfree_skb(skb); 223 - return 0; 224 224 } 225 225 EXPORT_SYMBOL_GPL(ovs_netdev_send); 226 226
+1 -1
net/openvswitch/vport-netdev.h
··· 27 27 struct vport *ovs_netdev_get_vport(struct net_device *dev); 28 28 29 29 struct vport *ovs_netdev_link(struct vport *vport, const char *name); 30 - int ovs_netdev_send(struct vport *vport, struct sk_buff *skb); 30 + void ovs_netdev_send(struct vport *vport, struct sk_buff *skb); 31 31 void ovs_netdev_detach_dev(struct vport *); 32 32 33 33 int __init ovs_netdev_init(void);
+12 -97
net/openvswitch/vport.c
··· 34 34 #include "vport.h" 35 35 #include "vport-internal_dev.h" 36 36 37 - static void ovs_vport_record_error(struct vport *, 38 - enum vport_err_type err_type); 39 - 40 37 static LIST_HEAD(vport_ops_list); 41 38 42 39 /* Protected by RCU read lock for reading, ovs_mutex for writing. */ ··· 154 157 return ERR_PTR(-EINVAL); 155 158 } 156 159 157 - vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 158 - if (!vport->percpu_stats) { 159 - kfree(vport); 160 - return ERR_PTR(-ENOMEM); 161 - } 162 - 163 160 return vport; 164 161 } 165 162 EXPORT_SYMBOL_GPL(ovs_vport_alloc); ··· 174 183 * it is safe to use raw dereference. 175 184 */ 176 185 kfree(rcu_dereference_raw(vport->upcall_portids)); 177 - free_percpu(vport->percpu_stats); 178 186 kfree(vport); 179 187 } 180 188 EXPORT_SYMBOL_GPL(ovs_vport_free); ··· 280 290 */ 281 291 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) 282 292 { 293 + struct net_device *dev = vport->dev; 283 294 int i; 284 295 285 296 memset(stats, 0, sizeof(*stats)); 297 + stats->rx_errors = dev->stats.rx_errors; 298 + stats->tx_errors = dev->stats.tx_errors; 299 + stats->tx_dropped = dev->stats.tx_dropped; 300 + stats->rx_dropped = dev->stats.rx_dropped; 286 301 287 - /* We potentially have 2 sources of stats that need to be combined: 288 - * those we have collected (split into err_stats and percpu_stats) from 289 - * set_stats() and device error stats from netdev->get_stats() (for 290 - * errors that happen downstream and therefore aren't reported through 291 - * our vport_record_error() function). 292 - * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS). 293 - * netdev-stats can be directly read over netlink-ioctl. 294 - */ 295 - 296 - stats->rx_errors = atomic_long_read(&vport->err_stats.rx_errors); 297 - stats->tx_errors = atomic_long_read(&vport->err_stats.tx_errors); 298 - stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped); 299 - stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped); 302 + stats->rx_dropped += atomic_long_read(&dev->rx_dropped); 303 + stats->tx_dropped += atomic_long_read(&dev->tx_dropped); 300 304 301 305 for_each_possible_cpu(i) { 302 306 const struct pcpu_sw_netstats *percpu_stats; 303 307 struct pcpu_sw_netstats local_stats; 304 308 unsigned int start; 305 309 306 - percpu_stats = per_cpu_ptr(vport->percpu_stats, i); 310 + percpu_stats = per_cpu_ptr(dev->tstats, i); 307 311 308 312 do { 309 313 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); ··· 452 468 * Must be called with rcu_read_lock. The packet cannot be shared and 453 469 * skb->data should point to the Ethernet header. 454 470 */ 455 - void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, 456 - const struct ip_tunnel_info *tun_info) 471 + int ovs_vport_receive(struct vport *vport, struct sk_buff *skb, 472 + const struct ip_tunnel_info *tun_info) 457 473 { 458 - struct pcpu_sw_netstats *stats; 459 474 struct sw_flow_key key; 460 475 int error; 461 - 462 - stats = this_cpu_ptr(vport->percpu_stats); 463 - u64_stats_update_begin(&stats->syncp); 464 - stats->rx_packets++; 465 - stats->rx_bytes += skb->len + 466 - (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); 467 - u64_stats_update_end(&stats->syncp); 468 476 469 477 OVS_CB(skb)->input_vport = vport; 470 478 OVS_CB(skb)->mru = 0; ··· 464 488 error = ovs_flow_key_extract(tun_info, skb, &key); 465 489 if (unlikely(error)) { 466 490 kfree_skb(skb); 467 - return; 491 + return error; 468 492 } 469 493 ovs_dp_process_packet(skb, &key); 494 + return 0; 470 495 } 471 496 EXPORT_SYMBOL_GPL(ovs_vport_receive); 472 - 473 - /** 474 - * ovs_vport_send - send a packet on a device 475 - * 476 - * @vport: vport on which to send the packet 477 - * @skb: skb to send 478 - * 479 - * Sends the given packet and returns the length of data sent. Either ovs 480 - * lock or rcu_read_lock must be held. 481 - */ 482 - int ovs_vport_send(struct vport *vport, struct sk_buff *skb) 483 - { 484 - int sent = vport->ops->send(vport, skb); 485 - 486 - if (likely(sent > 0)) { 487 - struct pcpu_sw_netstats *stats; 488 - 489 - stats = this_cpu_ptr(vport->percpu_stats); 490 - 491 - u64_stats_update_begin(&stats->syncp); 492 - stats->tx_packets++; 493 - stats->tx_bytes += sent; 494 - u64_stats_update_end(&stats->syncp); 495 - } else if (sent < 0) { 496 - ovs_vport_record_error(vport, VPORT_E_TX_ERROR); 497 - } else { 498 - ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); 499 - } 500 - return sent; 501 - } 502 - 503 - /** 504 - * ovs_vport_record_error - indicate device error to generic stats layer 505 - * 506 - * @vport: vport that encountered the error 507 - * @err_type: one of enum vport_err_type types to indicate the error type 508 - * 509 - * If using the vport generic stats layer indicate that an error of the given 510 - * type has occurred. 511 - */ 512 - static void ovs_vport_record_error(struct vport *vport, 513 - enum vport_err_type err_type) 514 - { 515 - switch (err_type) { 516 - case VPORT_E_RX_DROPPED: 517 - atomic_long_inc(&vport->err_stats.rx_dropped); 518 - break; 519 - 520 - case VPORT_E_RX_ERROR: 521 - atomic_long_inc(&vport->err_stats.rx_errors); 522 - break; 523 - 524 - case VPORT_E_TX_DROPPED: 525 - atomic_long_inc(&vport->err_stats.tx_dropped); 526 - break; 527 - 528 - case VPORT_E_TX_ERROR: 529 - atomic_long_inc(&vport->err_stats.tx_errors); 530 - break; 531 - } 532 - 533 - } 534 497 535 498 static void free_vport_rcu(struct rcu_head *rcu) 536 499 {
+10 -26
net/openvswitch/vport.h
··· 57 57 int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *); 58 58 u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *); 59 59 60 - int ovs_vport_send(struct vport *, struct sk_buff *); 61 - 62 60 int ovs_tunnel_get_egress_info(struct ip_tunnel_info *egress_tun_info, 63 61 struct net *net, 64 62 struct sk_buff *, ··· 66 68 int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, 67 69 struct ip_tunnel_info *info); 68 70 69 - /* The following definitions are for implementers of vport devices: */ 70 - 71 - struct vport_err_stats { 72 - atomic_long_t rx_dropped; 73 - atomic_long_t rx_errors; 74 - atomic_long_t tx_dropped; 75 - atomic_long_t tx_errors; 76 - }; 77 71 /** 78 72 * struct vport_portids - array of netlink portids of a vport. 79 73 * must be protected by rcu. ··· 91 101 * @hash_node: Element in @dev_table hash table in vport.c. 92 102 * @dp_hash_node: Element in @datapath->ports hash table in datapath.c. 93 103 * @ops: Class structure. 94 - * @percpu_stats: Points to per-CPU statistics used and maintained by vport 95 - * @err_stats: Points to error statistics used and maintained by vport 96 104 * @detach_list: list used for detaching vport in net-exit call. 97 105 */ 98 106 struct vport { ··· 103 115 struct hlist_node dp_hash_node; 104 116 const struct vport_ops *ops; 105 117 106 - struct pcpu_sw_netstats __percpu *percpu_stats; 107 - 108 - struct vport_err_stats err_stats; 109 118 struct list_head detach_list; 110 119 struct rcu_head rcu; 111 120 }; ··· 141 156 * @get_options: Appends vport-specific attributes for the configuration of an 142 157 * existing vport to a &struct sk_buff. May be %NULL for a vport that does not 143 158 * have any configuration. 144 - * @send: Send a packet on the device. Returns the length of the packet sent, 159 + * @send: Send a packet on the device. 145 160 * zero for dropped packets or negative for error. 146 161 * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for 147 162 * a packet. ··· 156 171 int (*set_options)(struct vport *, struct nlattr *); 157 172 int (*get_options)(const struct vport *, struct sk_buff *); 158 173 159 - int (*send)(struct vport *, struct sk_buff *); 174 + void (*send)(struct vport *, struct sk_buff *); 160 175 int (*get_egress_tun_info)(struct vport *, struct sk_buff *, 161 176 struct ip_tunnel_info *); 162 177 163 178 struct module *owner; 164 179 struct list_head list; 165 - }; 166 - 167 - enum vport_err_type { 168 - VPORT_E_RX_DROPPED, 169 - VPORT_E_RX_ERROR, 170 - VPORT_E_TX_DROPPED, 171 - VPORT_E_TX_ERROR, 172 180 }; 173 181 174 182 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *, ··· 200 222 return (struct vport *)((u8 *)priv - ALIGN(sizeof(struct vport), VPORT_ALIGN)); 201 223 } 202 224 203 - void ovs_vport_receive(struct vport *, struct sk_buff *, 204 - const struct ip_tunnel_info *); 225 + int ovs_vport_receive(struct vport *, struct sk_buff *, 226 + const struct ip_tunnel_info *); 205 227 206 228 static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, 207 229 const void *start, unsigned int len) ··· 236 258 rt = ip_route_output_key(net, fl); 237 259 return rt; 238 260 } 261 + 262 + static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb) 263 + { 264 + vport->ops->send(vport, skb); 265 + } 266 + 239 267 #endif /* vport.h */