Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: openvswitch: add masks cache hit counter

Add a counter that counts the number of masks cache hits, and
export it through the megaflow netlink statistics.

Reviewed-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eelco Chaudron and committed by
David S. Miller
9d2f627b d6526926

+24 -8
+1 -1
include/uapi/linux/openvswitch.h
··· 102 102 __u64 n_mask_hit; /* Number of masks used for flow lookups. */ 103 103 __u32 n_masks; /* Number of masks for the datapath. */ 104 104 __u32 pad0; /* Pad for future expension. */ 105 + __u64 n_cache_hit; /* Number of cache matches for flow lookups. */ 105 106 __u64 pad1; /* Pad for future expension. */ 106 - __u64 pad2; /* Pad for future expension. */ 107 107 }; 108 108 109 109 struct ovs_vport_stats {
+4 -1
net/openvswitch/datapath.c
··· 225 225 struct dp_stats_percpu *stats; 226 226 u64 *stats_counter; 227 227 u32 n_mask_hit; 228 + u32 n_cache_hit; 228 229 int error; 229 230 230 231 stats = this_cpu_ptr(dp->stats_percpu); 231 232 232 233 /* Look up flow. */ 233 234 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb), 234 - &n_mask_hit); 235 + &n_mask_hit, &n_cache_hit); 235 236 if (unlikely(!flow)) { 236 237 struct dp_upcall_info upcall; 237 238 ··· 263 262 u64_stats_update_begin(&stats->syncp); 264 263 (*stats_counter)++; 265 264 stats->n_mask_hit += n_mask_hit; 265 + stats->n_cache_hit += n_cache_hit; 266 266 u64_stats_update_end(&stats->syncp); 267 267 } 268 268 ··· 701 699 stats->n_missed += local_stats.n_missed; 702 700 stats->n_lost += local_stats.n_lost; 703 701 mega_stats->n_mask_hit += local_stats.n_mask_hit; 702 + mega_stats->n_cache_hit += local_stats.n_cache_hit; 704 703 } 705 704 } 706 705
+3
net/openvswitch/datapath.h
··· 38 38 * @n_mask_hit: Number of masks looked up for flow match. 39 39 * @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked 40 40 * up per packet. 41 + * @n_cache_hit: The number of received packets that had their mask found using 42 + * the mask cache. 41 43 */ 42 44 struct dp_stats_percpu { 43 45 u64 n_hit; 44 46 u64 n_missed; 45 47 u64 n_lost; 46 48 u64 n_mask_hit; 49 + u64 n_cache_hit; 47 50 struct u64_stats_sync syncp; 48 51 }; 49 52
+14 -5
net/openvswitch/flow_table.c
··· 667 667 struct mask_array *ma, 668 668 const struct sw_flow_key *key, 669 669 u32 *n_mask_hit, 670 + u32 *n_cache_hit, 670 671 u32 *index) 671 672 { 672 673 u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr); ··· 683 682 u64_stats_update_begin(&ma->syncp); 684 683 usage_counters[*index]++; 685 684 u64_stats_update_end(&ma->syncp); 685 + (*n_cache_hit)++; 686 686 return flow; 687 687 } 688 688 } ··· 721 719 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, 722 720 const struct sw_flow_key *key, 723 721 u32 skb_hash, 724 - u32 *n_mask_hit) 722 + u32 *n_mask_hit, 723 + u32 *n_cache_hit) 725 724 { 726 725 struct mask_array *ma = rcu_dereference(tbl->mask_array); 727 726 struct table_instance *ti = rcu_dereference(tbl->ti); ··· 732 729 int seg; 733 730 734 731 *n_mask_hit = 0; 732 + *n_cache_hit = 0; 735 733 if (unlikely(!skb_hash)) { 736 734 u32 mask_index = 0; 735 + u32 cache = 0; 737 736 738 - return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index); 737 + return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache, 738 + &mask_index); 739 739 } 740 740 741 741 /* Pre and post recirulation flows usually have the same skb_hash ··· 759 753 e = &entries[index]; 760 754 if (e->skb_hash == skb_hash) { 761 755 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, 762 - &e->mask_index); 756 + n_cache_hit, &e->mask_index); 763 757 if (!flow) 764 758 e->skb_hash = 0; 765 759 return flow; ··· 772 766 } 773 767 774 768 /* Cache miss, do full lookup. */ 775 - flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index); 769 + flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit, 770 + &ce->mask_index); 776 771 if (flow) 777 772 ce->skb_hash = skb_hash; 778 773 774 + *n_cache_hit = 0; 779 775 return flow; 780 776 } 781 777 ··· 787 779 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); 788 780 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); 789 781 u32 __always_unused n_mask_hit; 782 + u32 __always_unused n_cache_hit; 790 783 u32 index = 0; 791 784 792 - return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index); 785 + return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index); 793 786 } 794 787 795 788 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+2 -1
net/openvswitch/flow_table.h
··· 82 82 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *, 83 83 const struct sw_flow_key *, 84 84 u32 skb_hash, 85 - u32 *n_mask_hit); 85 + u32 *n_mask_hit, 86 + u32 *n_cache_hit); 86 87 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, 87 88 const struct sw_flow_key *); 88 89 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,