Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-mvpp2-cls-Add-classification'

Maxime Chevallier says:

====================
net: mvpp2: cls: Add classification

This series is a rework of the previously standalone patch adding
classification support for mvpp2 :

https://lore.kernel.org/netdev/20190423075031.26074-1-maxime.chevallier@bootlin.com/

This patch has been reworked according to Saeed's review, to make sure
that the location of the rule is always respected and serves as a way to
prioritize rules between each other. This the 3rd iteration of this
submission, but since it's now a series, I reset the revision numbering.

This series implements that in a limited configuration for now, since we
limit the total number of rules per port to 4.

The main factors for this limitation are that :
- We share the classification tables between all ports (4 max, although
one is only used for internal loopback), hence we have to perform a
logical separation between rules, which is done today by dedicated
ranges for each port in each table

- The "Flow table", which dictates which lookups operations are
performed for an ingress packet, in subdivided into 22 "sub flows",
each corresponding to a traffic type based on the L3 proto, L4
proto, the presence or not of a VLAN tag and the L3 fragmentation.

This makes so that when adding a rule, it has to be added into each
of these subflows, introducing duplications of entries and limiting
our max number of entries.

These limitations can be overcomed in several ways, but for readability
sake, I'd rather submit basic classification offload support for now,
and improve it gradually.

This series also adds a small cosmetic cleanup patch (1), and also adds
support for the "Drop" action compared to the first submission of this
feature. It is simple enough to be added with this basic support.

Compared to the first submissions, the NETIF_F_NTUPLE flag was also
removed, following Saeed's comment.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+545 -84
+42
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
··· 14 14 #include <linux/netdevice.h> 15 15 #include <linux/phy.h> 16 16 #include <linux/phylink.h> 17 + #include <net/flow_offload.h> 17 18 18 19 /* Fifo Registers */ 19 20 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) ··· 127 126 #define MVPP22_CLS_C2_TCAM_DATA4 0x1b20 128 127 #define MVPP22_CLS_C2_LU_TYPE(lu) ((lu) & 0x3f) 129 128 #define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8) 129 + #define MVPP22_CLS_C2_PORT_MASK (0xff << 8) 130 130 #define MVPP22_CLS_C2_TCAM_INV 0x1b24 131 131 #define MVPP22_CLS_C2_TCAM_INV_BIT BIT(31) 132 132 #define MVPP22_CLS_C2_HIT_CTR 0x1b50 ··· 136 134 #define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13) 137 135 #define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11) 138 136 #define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9) 137 + #define MVPP22_CLS_C2_ACT_COLOR(act) ((act) & 0x7) 139 138 #define MVPP22_CLS_C2_ATTR0 0x1b64 140 139 #define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24) 141 140 #define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f ··· 618 615 #define MVPP2_BIT_IN_WORD(bit) ((bit) % 32) 619 616 620 617 #define MVPP2_N_PRS_FLOWS 52 618 + #define MVPP2_N_RFS_ENTRIES_PER_FLOW 4 619 + 620 + /* There are 7 supported high-level flows */ 621 + #define MVPP2_N_RFS_RULES (MVPP2_N_RFS_ENTRIES_PER_FLOW * 7) 621 622 622 623 /* RSS constants */ 623 624 #define MVPP22_RSS_TABLE_ENTRIES 32 ··· 819 812 struct cpumask *mask; 820 813 }; 821 814 815 + /* Internal represention of a Flow Steering rule */ 816 + struct mvpp2_rfs_rule { 817 + /* Rule location inside the flow*/ 818 + int loc; 819 + 820 + /* Flow type, such as TCP_V4_FLOW, IP6_FLOW, etc. */ 821 + int flow_type; 822 + 823 + /* Index of the C2 TCAM entry handling this rule */ 824 + int c2_index; 825 + 826 + /* Header fields that needs to be extracted to match this flow */ 827 + u16 hek_fields; 828 + 829 + /* CLS engine : only c2 is supported for now. */ 830 + u8 engine; 831 + 832 + /* TCAM key and mask for C2-based steering. These fields should be 833 + * encapsulated in a union should we add more engines. 834 + */ 835 + u64 c2_tcam; 836 + u64 c2_tcam_mask; 837 + 838 + struct flow_rule *flow; 839 + }; 840 + 841 + struct mvpp2_ethtool_fs { 842 + struct mvpp2_rfs_rule rule; 843 + struct ethtool_rxnfc rxnfc; 844 + }; 845 + 822 846 struct mvpp2_port { 823 847 u8 id; 824 848 ··· 921 883 922 884 /* RSS indirection table */ 923 885 u32 indir[MVPP22_RSS_TABLE_ENTRIES]; 886 + 887 + /* List of steering rules active on that port */ 888 + struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_RULES]; 889 + int n_rfs_rules; 924 890 }; 925 891 926 892 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
+425 -72
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
··· 24 24 25 25 static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = { 26 26 /* TCP over IPv4 flows, Not fragmented, no vlan tag */ 27 - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, 27 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG, 28 28 MVPP22_CLS_HEK_IP4_5T, 29 29 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | 30 30 MVPP2_PRS_RI_L4_TCP, 31 31 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 32 32 33 - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, 33 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG, 34 34 MVPP22_CLS_HEK_IP4_5T, 35 35 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | 36 36 MVPP2_PRS_RI_L4_TCP, 37 37 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 38 38 39 - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, 39 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG, 40 40 MVPP22_CLS_HEK_IP4_5T, 41 41 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | 42 42 MVPP2_PRS_RI_L4_TCP, 43 43 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 44 44 45 45 /* TCP over IPv4 flows, Not fragmented, with vlan tag */ 46 - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, 46 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG, 47 47 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, 48 48 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, 49 49 MVPP2_PRS_IP_MASK), 50 50 51 - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, 51 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG, 52 52 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, 53 53 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, 54 54 MVPP2_PRS_IP_MASK), 55 55 56 - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, 56 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG, 57 57 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, 58 58 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, 59 59 MVPP2_PRS_IP_MASK), 60 60 61 61 /* TCP over IPv4 flows, fragmented, no vlan tag */ 62 - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, 62 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG, 63 63 MVPP22_CLS_HEK_IP4_2T, 64 64 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | 65 65 MVPP2_PRS_RI_L4_TCP, 66 66 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 67 67 68 - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, 68 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG, 69 69 MVPP22_CLS_HEK_IP4_2T, 70 70 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | 71 71 MVPP2_PRS_RI_L4_TCP, 72 72 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 73 73 74 - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, 74 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG, 75 75 MVPP22_CLS_HEK_IP4_2T, 76 76 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | 77 77 MVPP2_PRS_RI_L4_TCP, 78 78 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 79 79 80 80 /* TCP over IPv4 flows, fragmented, with vlan tag */ 81 - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, 81 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG, 82 82 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, 83 83 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, 84 84 MVPP2_PRS_IP_MASK), 85 85 86 - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, 86 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG, 87 87 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, 88 88 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, 89 89 MVPP2_PRS_IP_MASK), 90 90 91 - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, 91 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG, 92 92 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, 93 93 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, 94 94 MVPP2_PRS_IP_MASK), 95 95 96 96 /* UDP over IPv4 flows, Not fragmented, no vlan tag */ 97 - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, 97 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG, 98 98 MVPP22_CLS_HEK_IP4_5T, 99 99 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | 100 100 MVPP2_PRS_RI_L4_UDP, 101 101 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 102 102 103 - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, 103 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG, 104 104 MVPP22_CLS_HEK_IP4_5T, 105 105 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | 106 106 MVPP2_PRS_RI_L4_UDP, 107 107 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 108 108 109 - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, 109 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG, 110 110 MVPP22_CLS_HEK_IP4_5T, 111 111 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | 112 112 MVPP2_PRS_RI_L4_UDP, 113 113 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 114 114 115 115 /* UDP over IPv4 flows, Not fragmented, with vlan tag */ 116 - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, 116 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG, 117 117 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, 118 118 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, 119 119 MVPP2_PRS_IP_MASK), 120 120 121 - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, 121 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG, 122 122 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, 123 123 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, 124 124 MVPP2_PRS_IP_MASK), 125 125 126 - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, 126 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG, 127 127 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, 128 128 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, 129 129 MVPP2_PRS_IP_MASK), 130 130 131 131 /* UDP over IPv4 flows, fragmented, no vlan tag */ 132 - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, 132 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG, 133 133 MVPP22_CLS_HEK_IP4_2T, 134 134 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | 135 135 MVPP2_PRS_RI_L4_UDP, 136 136 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 137 137 138 - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, 138 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG, 139 139 MVPP22_CLS_HEK_IP4_2T, 140 140 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | 141 141 MVPP2_PRS_RI_L4_UDP, 142 142 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 143 143 144 - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, 144 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG, 145 145 MVPP22_CLS_HEK_IP4_2T, 146 146 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | 147 147 MVPP2_PRS_RI_L4_UDP, 148 148 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 149 149 150 150 /* UDP over IPv4 flows, fragmented, with vlan tag */ 151 - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, 151 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG, 152 152 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, 153 153 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, 154 154 MVPP2_PRS_IP_MASK), 155 155 156 - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, 156 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG, 157 157 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, 158 158 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, 159 159 MVPP2_PRS_IP_MASK), 160 160 161 - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, 161 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG, 162 162 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, 163 163 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, 164 164 MVPP2_PRS_IP_MASK), 165 165 166 166 /* TCP over IPv6 flows, not fragmented, no vlan tag */ 167 - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG, 167 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG, 168 168 MVPP22_CLS_HEK_IP6_5T, 169 169 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | 170 170 MVPP2_PRS_RI_L4_TCP, 171 171 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 172 172 173 - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG, 173 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG, 174 174 MVPP22_CLS_HEK_IP6_5T, 175 175 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | 176 176 MVPP2_PRS_RI_L4_TCP, 177 177 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 178 178 179 179 /* TCP over IPv6 flows, not fragmented, with vlan tag */ 180 - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG, 180 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG, 181 181 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, 182 182 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP, 183 183 MVPP2_PRS_IP_MASK), 184 184 185 - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG, 185 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG, 186 186 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, 187 187 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP, 188 188 MVPP2_PRS_IP_MASK), 189 189 190 190 /* TCP over IPv6 flows, fragmented, no vlan tag */ 191 - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG, 191 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG, 192 192 MVPP22_CLS_HEK_IP6_2T, 193 193 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | 194 194 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, 195 195 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 196 196 197 - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG, 197 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG, 198 198 MVPP22_CLS_HEK_IP6_2T, 199 199 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | 200 200 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, 201 201 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 202 202 203 203 /* TCP over IPv6 flows, fragmented, with vlan tag */ 204 - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG, 204 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG, 205 205 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, 206 206 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | 207 207 MVPP2_PRS_RI_L4_TCP, 208 208 MVPP2_PRS_IP_MASK), 209 209 210 - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG, 210 + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG, 211 211 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, 212 212 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | 213 213 MVPP2_PRS_RI_L4_TCP, 214 214 MVPP2_PRS_IP_MASK), 215 215 216 216 /* UDP over IPv6 flows, not fragmented, no vlan tag */ 217 - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG, 217 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG, 218 218 MVPP22_CLS_HEK_IP6_5T, 219 219 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | 220 220 MVPP2_PRS_RI_L4_UDP, 221 221 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 222 222 223 - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG, 223 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG, 224 224 MVPP22_CLS_HEK_IP6_5T, 225 225 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | 226 226 MVPP2_PRS_RI_L4_UDP, 227 227 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 228 228 229 229 /* UDP over IPv6 flows, not fragmented, with vlan tag */ 230 - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG, 230 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG, 231 231 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, 232 232 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP, 233 233 MVPP2_PRS_IP_MASK), 234 234 235 - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG, 235 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG, 236 236 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, 237 237 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP, 238 238 MVPP2_PRS_IP_MASK), 239 239 240 240 /* UDP over IPv6 flows, fragmented, no vlan tag */ 241 - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG, 241 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG, 242 242 MVPP22_CLS_HEK_IP6_2T, 243 243 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | 244 244 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, 245 245 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 246 246 247 - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG, 247 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG, 248 248 MVPP22_CLS_HEK_IP6_2T, 249 249 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | 250 250 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, 251 251 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), 252 252 253 253 /* UDP over IPv6 flows, fragmented, with vlan tag */ 254 - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG, 254 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG, 255 255 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, 256 256 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | 257 257 MVPP2_PRS_RI_L4_UDP, 258 258 MVPP2_PRS_IP_MASK), 259 259 260 - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG, 260 + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG, 261 261 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, 262 262 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | 263 263 MVPP2_PRS_RI_L4_UDP, 264 264 MVPP2_PRS_IP_MASK), 265 265 266 266 /* IPv4 flows, no vlan tag */ 267 - MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, 267 + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG, 268 268 MVPP22_CLS_HEK_IP4_2T, 269 269 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4, 270 270 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), 271 - MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, 271 + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG, 272 272 MVPP22_CLS_HEK_IP4_2T, 273 273 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT, 274 274 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), 275 - MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, 275 + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG, 276 276 MVPP22_CLS_HEK_IP4_2T, 277 277 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER, 278 278 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), 279 279 280 280 /* IPv4 flows, with vlan tag */ 281 - MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, 281 + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG, 282 282 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, 283 283 MVPP2_PRS_RI_L3_IP4, 284 284 MVPP2_PRS_RI_L3_PROTO_MASK), 285 - MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, 285 + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG, 286 286 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, 287 287 MVPP2_PRS_RI_L3_IP4_OPT, 288 288 MVPP2_PRS_RI_L3_PROTO_MASK), 289 - MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, 289 + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG, 290 290 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, 291 291 MVPP2_PRS_RI_L3_IP4_OTHER, 292 292 MVPP2_PRS_RI_L3_PROTO_MASK), 293 293 294 294 /* IPv6 flows, no vlan tag */ 295 - MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG, 295 + MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG, 296 296 MVPP22_CLS_HEK_IP6_2T, 297 297 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, 298 298 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), 299 - MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG, 299 + MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG, 300 300 MVPP22_CLS_HEK_IP6_2T, 301 301 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, 302 302 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), 303 303 304 304 /* IPv6 flows, with vlan tag */ 305 - MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG, 305 + MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG, 306 306 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, 307 307 MVPP2_PRS_RI_L3_IP6, 308 308 MVPP2_PRS_RI_L3_PROTO_MASK), 309 - MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG, 309 + MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG, 310 310 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, 311 311 MVPP2_PRS_RI_L3_IP6, 312 312 MVPP2_PRS_RI_L3_PROTO_MASK), 313 313 314 314 /* Non IP flow, no vlan tag */ 315 - MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG, 315 + MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG, 316 316 0, 317 317 MVPP2_PRS_RI_VLAN_NONE, 318 318 MVPP2_PRS_RI_VLAN_MASK), 319 319 /* Non IP flow, with vlan tag */ 320 - MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG, 320 + MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG, 321 321 MVPP22_CLS_HEK_OPT_VLAN, 322 322 0, 0), 323 323 }; ··· 344 344 struct mvpp2_cls_flow_entry *fe) 345 345 { 346 346 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); 347 - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); 348 - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); 349 - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); 347 + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); 348 + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); 349 + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); 350 350 } 351 351 352 352 u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index) ··· 448 448 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port); 449 449 } 450 450 451 + static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe, 452 + u32 port) 453 + { 454 + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port); 455 + } 456 + 451 457 static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe, 452 458 u8 lu_type) 453 459 { ··· 545 539 c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT); 546 540 } 547 541 542 + static int mvpp2_cls_ethtool_flow_to_type(int flow_type) 543 + { 544 + switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { 545 + case TCP_V4_FLOW: 546 + return MVPP22_FLOW_TCP4; 547 + case TCP_V6_FLOW: 548 + return MVPP22_FLOW_TCP6; 549 + case UDP_V4_FLOW: 550 + return MVPP22_FLOW_UDP4; 551 + case UDP_V6_FLOW: 552 + return MVPP22_FLOW_UDP6; 553 + case IPV4_FLOW: 554 + return MVPP22_FLOW_IP4; 555 + case IPV6_FLOW: 556 + return MVPP22_FLOW_IP6; 557 + default: 558 + return -EOPNOTSUPP; 559 + } 560 + } 561 + 562 + static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc) 563 + { 564 + return MVPP22_CLS_C2_RFS_LOC(port->id, loc); 565 + } 566 + 548 567 /* Initialize the flow table entries for the given flow */ 549 568 static void mvpp2_cls_flow_init(struct mvpp2 *priv, 550 569 const struct mvpp2_cls_flow *flow) ··· 596 565 597 566 mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2); 598 567 mvpp2_cls_flow_port_id_sel(&fe, true); 599 - mvpp2_cls_flow_lu_type_set(&fe, MVPP2_CLS_LU_ALL); 568 + mvpp2_cls_flow_lu_type_set(&fe, MVPP22_FLOW_ETHERNET); 600 569 601 570 /* Add all ports */ 602 571 for (i = 0; i < MVPP2_MAX_PORTS; i++) ··· 681 650 } 682 651 683 652 return 0; 653 + } 654 + 655 + /* Returns the size, in bits, of the corresponding HEK field */ 656 + static int mvpp2_cls_hek_field_size(u32 field) 657 + { 658 + switch (field) { 659 + case MVPP22_CLS_HEK_OPT_MAC_DA: 660 + return 48; 661 + case MVPP22_CLS_HEK_OPT_IP4SA: 662 + case MVPP22_CLS_HEK_OPT_IP4DA: 663 + return 32; 664 + case MVPP22_CLS_HEK_OPT_IP6SA: 665 + case MVPP22_CLS_HEK_OPT_IP6DA: 666 + return 128; 667 + case MVPP22_CLS_HEK_OPT_L4SIP: 668 + case MVPP22_CLS_HEK_OPT_L4DIP: 669 + return 16; 670 + default: 671 + return -1; 672 + } 684 673 } 685 674 686 675 const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) ··· 861 810 862 811 /* Match on Lookup Type */ 863 812 c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK)); 864 - c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_ALL); 813 + c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_FLOW_ETHERNET); 865 814 866 815 /* Update RSS status after matching this entry */ 867 816 c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK); ··· 995 944 mvpp2_rss_port_c2_disable(port); 996 945 } 997 946 947 + static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry) 948 + { 949 + struct mvpp2_cls_c2_entry c2; 950 + 951 + mvpp2_cls_c2_read(port->priv, entry, &c2); 952 + 953 + /* Clear the port map so that the entry doesn't match anymore */ 954 + c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id))); 955 + 956 + mvpp2_cls_c2_write(port->priv, &c2); 957 + } 958 + 998 959 /* Set CPU queue number for oversize packets */ 999 960 void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) 1000 961 { ··· 1021 958 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 1022 959 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 1023 960 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 961 + } 962 + 963 + static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port, 964 + struct mvpp2_rfs_rule *rule) 965 + { 966 + struct flow_action_entry *act; 967 + struct mvpp2_cls_c2_entry c2; 968 + u8 qh, ql, pmap; 969 + 970 + memset(&c2, 0, sizeof(c2)); 971 + 972 + c2.index = mvpp2_cls_c2_port_flow_index(port, rule->loc); 973 + if (c2.index < 0) 974 + return -EINVAL; 975 + 976 + act = &rule->flow->action.entries[0]; 977 + 978 + rule->c2_index = c2.index; 979 + 980 + c2.tcam[0] = (rule->c2_tcam & 0xffff) | 981 + ((rule->c2_tcam_mask & 0xffff) << 16); 982 + c2.tcam[1] = ((rule->c2_tcam >> 16) & 0xffff) | 983 + (((rule->c2_tcam_mask >> 16) & 0xffff) << 16); 984 + c2.tcam[2] = ((rule->c2_tcam >> 32) & 0xffff) | 985 + (((rule->c2_tcam_mask >> 32) & 0xffff) << 16); 986 + c2.tcam[3] = ((rule->c2_tcam >> 48) & 0xffff) | 987 + (((rule->c2_tcam_mask >> 48) & 0xffff) << 16); 988 + 989 + pmap = BIT(port->id); 990 + c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap); 991 + c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap)); 992 + 993 + /* Match on Lookup Type */ 994 + c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK)); 995 + c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc); 996 + 997 + if (act->id == FLOW_ACTION_DROP) { 998 + c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK); 999 + } else { 1000 + /* We want to keep the default color derived from the Header 1001 + * Parser drop entries, for VLAN and MAC filtering. This will 1002 + * assign a default color of Green or Red, and we want matches 1003 + * with a non-drop action to keep that color. 1004 + */ 1005 + c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK); 1006 + 1007 + /* Mark packet as "forwarded to software", needed for RSS */ 1008 + c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK); 1009 + 1010 + c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) | 1011 + MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK); 1012 + 1013 + qh = ((act->queue.index + port->first_rxq) >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; 1014 + ql = (act->queue.index + port->first_rxq) & MVPP22_CLS_C2_ATTR0_QLOW_MASK; 1015 + 1016 + c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | 1017 + MVPP22_CLS_C2_ATTR0_QLOW(ql); 1018 + } 1019 + 1020 + c2.valid = true; 1021 + 1022 + mvpp2_cls_c2_write(port->priv, &c2); 1023 + 1024 + return 0; 1025 + } 1026 + 1027 + static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port, 1028 + struct mvpp2_rfs_rule *rule) 1029 + { 1030 + return mvpp2_port_c2_tcam_rule_add(port, rule); 1031 + } 1032 + 1033 + static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port, 1034 + struct mvpp2_rfs_rule *rule) 1035 + { 1036 + const struct mvpp2_cls_flow *flow; 1037 + struct mvpp2_cls_flow_entry fe; 1038 + int index, i; 1039 + 1040 + for_each_cls_flow_id_containing_type(i, rule->flow_type) { 1041 + flow = mvpp2_cls_flow_get(i); 1042 + if (!flow) 1043 + return 0; 1044 + 1045 + index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc); 1046 + 1047 + mvpp2_cls_flow_read(port->priv, index, &fe); 1048 + mvpp2_cls_flow_port_remove(&fe, BIT(port->id)); 1049 + mvpp2_cls_flow_write(port->priv, &fe); 1050 + } 1051 + 1052 + if (rule->c2_index >= 0) 1053 + mvpp22_port_c2_lookup_disable(port, rule->c2_index); 1054 + 1055 + return 0; 1056 + } 1057 + 1058 + static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port, 1059 + struct mvpp2_rfs_rule *rule) 1060 + { 1061 + const struct mvpp2_cls_flow *flow; 1062 + struct mvpp2 *priv = port->priv; 1063 + struct mvpp2_cls_flow_entry fe; 1064 + int index, ret, i; 1065 + 1066 + if (rule->engine != MVPP22_CLS_ENGINE_C2) 1067 + return -EOPNOTSUPP; 1068 + 1069 + ret = mvpp2_port_c2_rfs_rule_insert(port, rule); 1070 + if (ret) 1071 + return ret; 1072 + 1073 + for_each_cls_flow_id_containing_type(i, rule->flow_type) { 1074 + flow = mvpp2_cls_flow_get(i); 1075 + if (!flow) 1076 + return 0; 1077 + 1078 + index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc); 1079 + 1080 + mvpp2_cls_flow_read(priv, index, &fe); 1081 + mvpp2_cls_flow_eng_set(&fe, rule->engine); 1082 + mvpp2_cls_flow_port_id_sel(&fe, true); 1083 + mvpp2_flow_set_hek_fields(&fe, rule->hek_fields); 1084 + mvpp2_cls_flow_lu_type_set(&fe, rule->loc); 1085 + mvpp2_cls_flow_port_add(&fe, 0xf); 1086 + 1087 + mvpp2_cls_flow_write(priv, &fe); 1088 + } 1089 + 1090 + return 0; 1091 + } 1092 + 1093 + static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule) 1094 + { 1095 + struct flow_rule *flow = rule->flow; 1096 + struct flow_action_entry *act; 1097 + int offs = 64; 1098 + 1099 + act = &flow->action.entries[0]; 1100 + 1101 + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { 1102 + struct flow_match_ports match; 1103 + 1104 + flow_rule_match_ports(flow, &match); 1105 + if (match.mask->src) { 1106 + rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP; 1107 + offs -= mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP); 1108 + 1109 + rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs; 1110 + rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs; 1111 + } 1112 + 1113 + if (match.mask->dst) { 1114 + rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP; 1115 + offs -= mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP); 1116 + 1117 + rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs; 1118 + rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs; 1119 + } 1120 + } 1121 + 1122 + if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS) 1123 + return -EOPNOTSUPP; 1124 + 1125 + return 0; 1126 + } 1127 + 1128 + static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule) 1129 + { 1130 + struct flow_rule *flow = rule->flow; 1131 + struct flow_action_entry *act; 1132 + 1133 + act = &flow->action.entries[0]; 1134 + if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP) 1135 + return -EOPNOTSUPP; 1136 + 1137 + /* For now, only use the C2 engine which has a HEK size limited to 64 1138 + * bits for TCAM matching. 1139 + */ 1140 + rule->engine = MVPP22_CLS_ENGINE_C2; 1141 + 1142 + if (mvpp2_cls_c2_build_match(rule)) 1143 + return -EINVAL; 1144 + 1145 + return 0; 1146 + } 1147 + 1148 + int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port, 1149 + struct ethtool_rxnfc *rxnfc) 1150 + { 1151 + struct mvpp2_ethtool_fs *efs; 1152 + 1153 + if (rxnfc->fs.location >= MVPP2_N_RFS_RULES) 1154 + return -EINVAL; 1155 + 1156 + efs = port->rfs_rules[rxnfc->fs.location]; 1157 + if (!efs) 1158 + return -ENOENT; 1159 + 1160 + memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc)); 1161 + 1162 + return 0; 1163 + } 1164 + 1165 + int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port, 1166 + struct ethtool_rxnfc *info) 1167 + { 1168 + struct ethtool_rx_flow_spec_input input = {}; 1169 + struct ethtool_rx_flow_rule *ethtool_rule; 1170 + struct mvpp2_ethtool_fs *efs, *old_efs; 1171 + int ret = 0; 1172 + 1173 + if (info->fs.location >= 4 || 1174 + info->fs.location < 0) 1175 + return -EINVAL; 1176 + 1177 + efs = kzalloc(sizeof(*efs), GFP_KERNEL); 1178 + if (!efs) 1179 + return -ENOMEM; 1180 + 1181 + input.fs = &info->fs; 1182 + 1183 + ethtool_rule = ethtool_rx_flow_rule_create(&input); 1184 + if (IS_ERR(ethtool_rule)) { 1185 + ret = PTR_ERR(ethtool_rule); 1186 + goto clean_rule; 1187 + } 1188 + 1189 + efs->rule.flow = ethtool_rule->rule; 1190 + efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type); 1191 + 1192 + ret = mvpp2_cls_rfs_parse_rule(&efs->rule); 1193 + if (ret) 1194 + goto clean_eth_rule; 1195 + 1196 + efs->rule.loc = info->fs.location; 1197 + 1198 + /* Replace an already existing rule */ 1199 + if (port->rfs_rules[efs->rule.loc]) { 1200 + old_efs = port->rfs_rules[efs->rule.loc]; 1201 + ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule); 1202 + if (ret) 1203 + goto clean_eth_rule; 1204 + kfree(old_efs); 1205 + port->n_rfs_rules--; 1206 + } 1207 + 1208 + ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule); 1209 + if (ret) 1210 + goto clean_eth_rule; 1211 + 1212 + memcpy(&efs->rxnfc, info, sizeof(*info)); 1213 + port->rfs_rules[efs->rule.loc] = efs; 1214 + port->n_rfs_rules++; 1215 + 1216 + return ret; 1217 + 1218 + clean_eth_rule: 1219 + ethtool_rx_flow_rule_destroy(ethtool_rule); 1220 + clean_rule: 1221 + kfree(efs); 1222 + return ret; 1223 + } 1224 + 1225 + int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port, 1226 + struct ethtool_rxnfc *info) 1227 + { 1228 + struct mvpp2_ethtool_fs *efs; 1229 + int ret; 1230 + 1231 + efs = port->rfs_rules[info->fs.location]; 1232 + if (!efs) 1233 + return -EINVAL; 1234 + 1235 + /* Remove the rule from the engines. */ 1236 + ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule); 1237 + if (ret) 1238 + return ret; 1239 + 1240 + port->n_rfs_rules--; 1241 + port->rfs_rules[info->fs.location] = NULL; 1242 + kfree(efs); 1243 + 1244 + return 0; 1024 1245 } 1025 1246 1026 1247 static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq) ··· 1344 997 int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) 1345 998 { 1346 999 u16 hash_opts = 0; 1000 + u32 flow_type; 1347 1001 1348 - switch (info->flow_type) { 1349 - case TCP_V4_FLOW: 1350 - case UDP_V4_FLOW: 1351 - case TCP_V6_FLOW: 1352 - case UDP_V6_FLOW: 1002 + flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type); 1003 + 1004 + switch (flow_type) { 1005 + case MVPP22_FLOW_TCP4: 1006 + case MVPP22_FLOW_UDP4: 1007 + case MVPP22_FLOW_TCP6: 1008 + case MVPP22_FLOW_UDP6: 1353 1009 if (info->data & RXH_L4_B_0_1) 1354 1010 hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; 1355 1011 if (info->data & RXH_L4_B_2_3) 1356 1012 hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; 1357 1013 /* Fallthrough */ 1358 - case IPV4_FLOW: 1359 - case IPV6_FLOW: 1014 + case MVPP22_FLOW_IP4: 1015 + case MVPP22_FLOW_IP6: 1360 1016 if (info->data & RXH_L2DA) 1361 1017 hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; 1362 1018 if (info->data & RXH_VLAN) ··· 1376 1026 default: return -EOPNOTSUPP; 1377 1027 } 1378 1028 1379 - return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts); 1029 + return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts); 1380 1030 } 1381 1031 1382 1032 int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info) 1383 1033 { 1384 1034 unsigned long hash_opts; 1035 + u32 flow_type; 1385 1036 int i; 1386 1037 1387 - hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type); 1038 + flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type); 1039 + 1040 + hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type); 1388 1041 info->data = 0; 1389 1042 1390 1043 for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { ··· 1450 1097 mvpp22_rss_fill_table(port, port->id); 1451 1098 1452 1099 /* Configure default flows */ 1453 - mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T); 1454 - mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T); 1455 - mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T); 1456 - mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T); 1457 - mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T); 1458 - mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T); 1100 + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T); 1101 + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T); 1102 + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T); 1103 + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T); 1104 + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T); 1105 + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T); 1459 1106 }
+59 -11
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
··· 92 92 MVPP22_C2_FWD_HW_LOW_LAT_LOCK, 93 93 }; 94 94 95 + enum mvpp22_cls_c2_color_action { 96 + MVPP22_C2_COL_NO_UPD = 0, 97 + MVPP22_C2_COL_NO_UPD_LOCK, 98 + MVPP22_C2_COL_GREEN, 99 + MVPP22_C2_COL_GREEN_LOCK, 100 + MVPP22_C2_COL_YELLOW, 101 + MVPP22_C2_COL_YELLOW_LOCK, 102 + MVPP22_C2_COL_RED, /* Drop */ 103 + MVPP22_C2_COL_RED_LOCK, /* Drop */ 104 + }; 105 + 95 106 #define MVPP2_CLS_C2_TCAM_WORDS 5 96 107 #define MVPP2_CLS_C2_ATTR_WORDS 5 97 108 ··· 118 107 u8 valid; 119 108 }; 120 109 110 + #define MVPP22_FLOW_ETHER_BIT BIT(0) 111 + #define MVPP22_FLOW_IP4_BIT BIT(1) 112 + #define MVPP22_FLOW_IP6_BIT BIT(2) 113 + #define MVPP22_FLOW_TCP_BIT BIT(3) 114 + #define MVPP22_FLOW_UDP_BIT BIT(4) 115 + 116 + #define MVPP22_FLOW_TCP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_TCP_BIT) 117 + #define MVPP22_FLOW_TCP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_TCP_BIT) 118 + #define MVPP22_FLOW_UDP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_UDP_BIT) 119 + #define MVPP22_FLOW_UDP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_UDP_BIT) 120 + #define MVPP22_FLOW_IP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT) 121 + #define MVPP22_FLOW_IP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT) 122 + #define MVPP22_FLOW_ETHERNET (MVPP22_FLOW_ETHER_BIT) 123 + 121 124 /* Classifier C2 engine entries */ 122 125 #define MVPP22_CLS_C2_N_ENTRIES 256 123 126 124 127 /* Number of per-port dedicated entries in the C2 TCAM */ 125 - #define MVPP22_CLS_C2_PORT_RANGE 8 128 + #define MVPP22_CLS_C2_PORT_N_FLOWS MVPP2_N_RFS_ENTRIES_PER_FLOW 126 129 127 - #define MVPP22_CLS_C2_PORT_FIRST(p) (MVPP22_CLS_C2_N_ENTRIES - \ 128 - ((p) * MVPP22_CLS_C2_PORT_RANGE)) 129 - #define MVPP22_CLS_C2_RSS_ENTRY(p) (MVPP22_CLS_C2_PORT_FIRST(p) - 1) 130 + /* Each port has oen range per flow type + one entry controling the global RSS 131 + * setting and the default rx queue 132 + */ 133 + #define MVPP22_CLS_C2_PORT_RANGE (MVPP22_CLS_C2_PORT_N_FLOWS + 1) 134 + #define MVPP22_CLS_C2_PORT_FIRST(p) ((p) * MVPP22_CLS_C2_PORT_RANGE) 135 + #define MVPP22_CLS_C2_RSS_ENTRY(p) (MVPP22_CLS_C2_PORT_FIRST((p) + 1) - 1) 136 + 137 + #define MVPP22_CLS_C2_PORT_FLOW_FIRST(p) (MVPP22_CLS_C2_PORT_FIRST(p)) 138 + 139 + #define MVPP22_CLS_C2_RFS_LOC(p, loc) (MVPP22_CLS_C2_PORT_FLOW_FIRST(p) + (loc)) 130 140 131 141 /* Packet flow ID */ 132 142 enum mvpp2_prs_flow { ··· 177 145 MVPP2_FL_LAST, 178 146 }; 179 147 180 - enum mvpp2_cls_lu_type { 181 - MVPP2_CLS_LU_ALL = 0, 182 - }; 183 - 184 148 /* LU Type defined for all engines, and specified in the flow table */ 185 149 #define MVPP2_CLS_LU_TYPE_MASK 0x3f 186 150 ··· 196 168 struct mvpp2_prs_result_info prs_ri; 197 169 }; 198 170 199 - #define MVPP2_CLS_FLT_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1) 171 + #define MVPP2_CLS_FLT_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1 + 16) 200 172 #define MVPP2_CLS_FLT_FIRST(id) (((id) - MVPP2_FL_START) * \ 201 173 MVPP2_CLS_FLT_ENTRIES_PER_FLOW) 202 - #define MVPP2_CLS_FLT_C2_RSS_ENTRY(id) (MVPP2_CLS_FLT_FIRST(id)) 203 - #define MVPP2_CLS_FLT_HASH_ENTRY(port, id) (MVPP2_CLS_FLT_C2_RSS_ENTRY(id) + (port) + 1) 174 + 175 + #define MVPP2_CLS_FLT_C2_RFS(port, id, rfs_n) (MVPP2_CLS_FLT_FIRST(id) + \ 176 + ((port) * MVPP2_MAX_PORTS) + \ 177 + (rfs_n)) 178 + 179 + #define MVPP2_CLS_FLT_C2_RSS_ENTRY(id) (MVPP2_CLS_FLT_C2_RFS(MVPP2_MAX_PORTS, id, 0)) 180 + #define MVPP2_CLS_FLT_HASH_ENTRY(port, id) (MVPP2_CLS_FLT_C2_RSS_ENTRY(id) + 1 + (port)) 204 181 #define MVPP2_CLS_FLT_LAST(id) (MVPP2_CLS_FLT_FIRST(id) + \ 205 182 MVPP2_CLS_FLT_ENTRIES_PER_FLOW - 1) 206 183 ··· 229 196 #define for_each_cls_flow_id_with_type(i, type) \ 230 197 for_each_cls_flow_id((i)) \ 231 198 if (cls_flows[(i)].flow_type != (type)) \ 199 + continue; \ 200 + else 201 + 202 + #define for_each_cls_flow_id_containing_type(i, type) \ 203 + for_each_cls_flow_id((i)) \ 204 + if ((cls_flows[(i)].flow_type & (type)) != (type)) \ 232 205 continue; \ 233 206 else 234 207 ··· 284 245 285 246 void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, 286 247 struct mvpp2_cls_c2_entry *c2); 248 + 249 + int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port, 250 + struct ethtool_rxnfc *rxnfc); 251 + 252 + int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port, 253 + struct ethtool_rxnfc *info); 254 + 255 + int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port, 256 + struct ethtool_rxnfc *info); 287 257 288 258 #endif
+19 -1
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 3937 3937 struct ethtool_rxnfc *info, u32 *rules) 3938 3938 { 3939 3939 struct mvpp2_port *port = netdev_priv(dev); 3940 - int ret = 0; 3940 + int ret = 0, i, loc = 0; 3941 3941 3942 3942 if (!mvpp22_rss_is_supported()) 3943 3943 return -EOPNOTSUPP; ··· 3948 3948 break; 3949 3949 case ETHTOOL_GRXRINGS: 3950 3950 info->data = port->nrxqs; 3951 + break; 3952 + case ETHTOOL_GRXCLSRLCNT: 3953 + info->rule_cnt = port->n_rfs_rules; 3954 + break; 3955 + case ETHTOOL_GRXCLSRULE: 3956 + ret = mvpp2_ethtool_cls_rule_get(port, info); 3957 + break; 3958 + case ETHTOOL_GRXCLSRLALL: 3959 + for (i = 0; i < MVPP2_N_RFS_RULES; i++) { 3960 + if (port->rfs_rules[i]) 3961 + rules[loc++] = i; 3962 + } 3951 3963 break; 3952 3964 default: 3953 3965 return -ENOTSUPP; ··· 3980 3968 switch (info->cmd) { 3981 3969 case ETHTOOL_SRXFH: 3982 3970 ret = mvpp2_ethtool_rxfh_set(port, info); 3971 + break; 3972 + case ETHTOOL_SRXCLSRLINS: 3973 + ret = mvpp2_ethtool_cls_rule_ins(port, info); 3974 + break; 3975 + case ETHTOOL_SRXCLSRLDEL: 3976 + ret = mvpp2_ethtool_cls_rule_del(port, info); 3983 3977 break; 3984 3978 default: 3985 3979 return -EOPNOTSUPP;