Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'Support-tunnels-over-VLAN-in-NFP'

John Hurley says:

====================
Support tunnels over VLAN in NFP

This patchset deals with tunnel encap and decap when the end-point IP
address is on an internal port (for example and OvS VLAN port). Tunnel
encap without VLAN is already supported in the NFP driver. This patchset
extends that to include a push VLAN along with tunnel header push.

Patches 1-4 extend the flow_offload IR API to include actions that use
skbedit to set the ptype of an SKB and that send a packet to port ingress
from the act_mirred module. Such actions are used in flower rules that
forward tunnel packets to internal ports where they can be decapsulated.
OvS and its TC API is an example of a user-space app that produces such
rules.

Patch 5 modifies the encap offload code to allow the pushing of a VLAN
header after a tunnel header push.

Patches 6-10 deal with tunnel decap when the end-point is on an internal
port. They detect 'pre-tunnel rules' which do not deal with tunnels
themselves but, rather, forward packets to internal ports where they
can be decapped if required. Such rules are offloaded to a table in HW
along with an indication of whether packets need to be passed to this
table of not (based on their destination MAC address). Matching against
this table prior to decapsulation in HW allows the correct parsing and
handling of outer VLANs on tunnelled packets and the correct updating of
stats for said 'pre-tunnel' rules.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+476 -32
+33 -7
drivers/net/ethernet/netronome/nfp/flower/action.c
··· 173 173 struct nfp_fl_payload *nfp_flow, 174 174 bool last, struct net_device *in_dev, 175 175 enum nfp_flower_tun_type tun_type, int *tun_out_cnt, 176 - struct netlink_ext_ack *extack) 176 + bool pkt_host, struct netlink_ext_ack *extack) 177 177 { 178 178 size_t act_size = sizeof(struct nfp_fl_output); 179 179 struct nfp_flower_priv *priv = app->priv; ··· 218 218 return gid; 219 219 } 220 220 output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid); 221 + } else if (nfp_flower_internal_port_can_offload(app, out_dev)) { 222 + if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) { 223 + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware"); 224 + return -EOPNOTSUPP; 225 + } 226 + 227 + if (nfp_flow->pre_tun_rule.dev || !pkt_host) { 228 + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action"); 229 + return -EOPNOTSUPP; 230 + } 231 + 232 + nfp_flow->pre_tun_rule.dev = out_dev; 233 + 234 + return 0; 221 235 } else { 222 236 /* Set action output parameters. */ 223 237 output->flags = cpu_to_be16(tmp_flags); ··· 899 885 struct nfp_fl_payload *nfp_fl, int *a_len, 900 886 struct net_device *netdev, bool last, 901 887 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, 902 - int *out_cnt, u32 *csum_updated, 888 + int *out_cnt, u32 *csum_updated, bool pkt_host, 903 889 struct netlink_ext_ack *extack) 904 890 { 905 891 struct nfp_flower_priv *priv = app->priv; ··· 921 907 922 908 output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; 923 909 err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type, 924 - tun_out_cnt, extack); 910 + tun_out_cnt, pkt_host, extack); 925 911 if (err) 926 912 return err; 927 913 ··· 953 939 struct net_device *netdev, 954 940 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, 955 941 int *out_cnt, u32 *csum_updated, 956 - struct nfp_flower_pedit_acts *set_act, 942 + struct nfp_flower_pedit_acts *set_act, bool *pkt_host, 957 943 struct netlink_ext_ack *extack, int act_idx) 958 944 { 959 945 struct nfp_fl_set_ipv4_tun *set_tun; ··· 969 955 case FLOW_ACTION_DROP: 970 956 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); 971 957 break; 958 + case FLOW_ACTION_REDIRECT_INGRESS: 972 959 case FLOW_ACTION_REDIRECT: 973 960 err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, 974 961 true, tun_type, tun_out_cnt, 975 - out_cnt, csum_updated, extack); 962 + out_cnt, csum_updated, *pkt_host, 963 + extack); 976 964 if (err) 977 965 return err; 978 966 break; 967 + case FLOW_ACTION_MIRRED_INGRESS: 979 968 case FLOW_ACTION_MIRRED: 980 969 err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, 981 970 false, tun_type, tun_out_cnt, 982 - out_cnt, csum_updated, extack); 971 + out_cnt, csum_updated, *pkt_host, 972 + extack); 983 973 if (err) 984 974 return err; 985 975 break; ··· 1113 1095 nfp_fl_set_mpls(set_m, act); 1114 1096 *a_len += sizeof(struct nfp_fl_set_mpls); 1115 1097 break; 1098 + case FLOW_ACTION_PTYPE: 1099 + /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */ 1100 + if (act->ptype != PACKET_HOST) 1101 + return -EOPNOTSUPP; 1102 + 1103 + *pkt_host = true; 1104 + break; 1116 1105 default: 1117 1106 /* Currently we do not handle any other actions. */ 1118 1107 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list"); ··· 1175 1150 struct nfp_flower_pedit_acts set_act; 1176 1151 enum nfp_flower_tun_type tun_type; 1177 1152 struct flow_action_entry *act; 1153 + bool pkt_host = false; 1178 1154 u32 csum_updated = 0; 1179 1155 1180 1156 memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); ··· 1192 1166 err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len, 1193 1167 netdev, &tun_type, &tun_out_cnt, 1194 1168 &out_cnt, &csum_updated, 1195 - &set_act, extack, i); 1169 + &set_act, &pkt_host, extack, i); 1196 1170 if (err) 1197 1171 return err; 1198 1172 act_cnt++;
+3 -1
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
··· 220 220 __be16 tun_flags; 221 221 u8 ttl; 222 222 u8 tos; 223 - __be32 extra; 223 + __be16 outer_vlan_tpid; 224 + __be16 outer_vlan_tci; 224 225 u8 tun_len; 225 226 u8 res2; 226 227 __be16 tun_proto; ··· 484 483 NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18, 485 484 NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19, 486 485 NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20, 486 + NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21, 487 487 NFP_FLOWER_CMSG_TYPE_MAX = 32, 488 488 }; 489 489
+1
drivers/net/ethernet/netronome/nfp/flower/main.c
··· 781 781 782 782 INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); 783 783 INIT_LIST_HEAD(&app_priv->non_repr_priv); 784 + app_priv->pre_tun_rule_cnt = 0; 784 785 785 786 return 0; 786 787
+19
drivers/net/ethernet/netronome/nfp/flower/main.h
··· 42 42 #define NFP_FL_FEATS_VLAN_PCP BIT(3) 43 43 #define NFP_FL_FEATS_VF_RLIM BIT(4) 44 44 #define NFP_FL_FEATS_FLOW_MOD BIT(5) 45 + #define NFP_FL_FEATS_PRE_TUN_RULES BIT(6) 45 46 #define NFP_FL_FEATS_FLOW_MERGE BIT(30) 46 47 #define NFP_FL_FEATS_LAG BIT(31) 47 48 ··· 163 162 * @qos_stats_work: Workqueue for qos stats processing 164 163 * @qos_rate_limiters: Current active qos rate limiters 165 164 * @qos_stats_lock: Lock on qos stats updates 165 + * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded 166 166 */ 167 167 struct nfp_flower_priv { 168 168 struct nfp_app *app; ··· 195 193 struct delayed_work qos_stats_work; 196 194 unsigned int qos_rate_limiters; 197 195 spinlock_t qos_stats_lock; /* Protect the qos stats */ 196 + int pre_tun_rule_cnt; 198 197 }; 199 198 200 199 /** ··· 221 218 * @block_shared: Flag indicating if offload applies to shared blocks 222 219 * @mac_list: List entry of reprs that share the same offloaded MAC 223 220 * @qos_table: Stored info on filters implementing qos 221 + * @on_bridge: Indicates if the repr is attached to a bridge 224 222 */ 225 223 struct nfp_flower_repr_priv { 226 224 struct nfp_repr *nfp_repr; ··· 231 227 bool block_shared; 232 228 struct list_head mac_list; 233 229 struct nfp_fl_qos qos_table; 230 + bool on_bridge; 234 231 }; 235 232 236 233 /** ··· 285 280 char *action_data; 286 281 struct list_head linked_flows; 287 282 bool in_hw; 283 + struct { 284 + struct net_device *dev; 285 + __be16 vlan_tci; 286 + __be16 port_idx; 287 + } pre_tun_rule; 288 288 }; 289 289 290 290 struct nfp_fl_payload_link { ··· 341 331 static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay) 342 332 { 343 333 return flow_pay->tc_flower_cookie == (unsigned long)flow_pay; 334 + } 335 + 336 + static inline bool nfp_flower_is_supported_bridge(struct net_device *netdev) 337 + { 338 + return netif_is_ovs_master(netdev); 344 339 } 345 340 346 341 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, ··· 430 415 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev); 431 416 u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, 432 417 struct net_device *netdev); 418 + int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, 419 + struct nfp_fl_payload *flow); 420 + int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, 421 + struct nfp_fl_payload *flow); 433 422 #endif
+178 -8
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 61 61 NFP_FLOWER_LAYER_IPV4 | \ 62 62 NFP_FLOWER_LAYER_IPV6) 63 63 64 + #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \ 65 + (NFP_FLOWER_LAYER_PORT | \ 66 + NFP_FLOWER_LAYER_MAC | \ 67 + NFP_FLOWER_LAYER_IPV4) 68 + 64 69 struct nfp_flower_merge_check { 65 70 union { 66 71 struct { ··· 494 489 flow_pay->meta.flags = 0; 495 490 INIT_LIST_HEAD(&flow_pay->linked_flows); 496 491 flow_pay->in_hw = false; 492 + flow_pay->pre_tun_rule.dev = NULL; 497 493 498 494 return flow_pay; 499 495 ··· 738 732 return act_off; 739 733 } 740 734 741 - static int nfp_fl_verify_post_tun_acts(char *acts, int len) 735 + static int 736 + nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan) 742 737 { 743 738 struct nfp_fl_act_head *a; 744 739 unsigned int act_off = 0; 745 740 746 741 while (act_off < len) { 747 742 a = (struct nfp_fl_act_head *)&acts[act_off]; 748 - if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) 743 + 744 + if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off) 745 + *vlan = (struct nfp_fl_push_vlan *)a; 746 + else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) 749 747 return -EOPNOTSUPP; 750 748 751 749 act_off += a->len_lw << NFP_FL_LW_SIZ; 752 750 } 753 751 752 + /* Ensure any VLAN push also has an egress action. */ 753 + if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan)) 754 + return -EOPNOTSUPP; 755 + 754 756 return 0; 757 + } 758 + 759 + static int 760 + nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan) 761 + { 762 + struct nfp_fl_set_ipv4_tun *tun; 763 + struct nfp_fl_act_head *a; 764 + unsigned int act_off = 0; 765 + 766 + while (act_off < len) { 767 + a = (struct nfp_fl_act_head *)&acts[act_off]; 768 + 769 + if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) { 770 + tun = (struct nfp_fl_set_ipv4_tun *)a; 771 + tun->outer_vlan_tpid = vlan->vlan_tpid; 772 + tun->outer_vlan_tci = vlan->vlan_tci; 773 + 774 + return 0; 775 + } 776 + 777 + act_off += a->len_lw << NFP_FL_LW_SIZ; 778 + } 779 + 780 + /* Return error if no tunnel action is found. */ 781 + return -EOPNOTSUPP; 755 782 } 756 783 757 784 static int ··· 793 754 struct nfp_fl_payload *merge_flow) 794 755 { 795 756 unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2; 757 + struct nfp_fl_push_vlan *post_tun_push_vlan = NULL; 796 758 bool tunnel_act = false; 797 759 char *merge_act; 798 760 int err; ··· 830 790 sub2_act_len -= pre_off2; 831 791 832 792 /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes 833 - * a tunnel, sub_flow 2 can only have output actions for a valid merge. 793 + * a tunnel, there are restrictions on what sub_flow 2 actions lead to a 794 + * valid merge. 834 795 */ 835 796 if (tunnel_act) { 836 797 char *post_tun_acts = &sub_flow2->action_data[pre_off2]; 837 798 838 - err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len); 799 + err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len, 800 + &post_tun_push_vlan); 839 801 if (err) 840 802 return err; 803 + 804 + if (post_tun_push_vlan) { 805 + pre_off2 += sizeof(*post_tun_push_vlan); 806 + sub2_act_len -= sizeof(*post_tun_push_vlan); 807 + } 841 808 } 842 809 843 810 /* Copy remaining actions from sub_flows 1 and 2. */ 844 811 memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); 812 + 813 + if (post_tun_push_vlan) { 814 + /* Update tunnel action in merge to include VLAN push. */ 815 + err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len, 816 + post_tun_push_vlan); 817 + if (err) 818 + return err; 819 + 820 + merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan); 821 + } 822 + 845 823 merge_act += sub1_act_len; 846 824 memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len); 847 825 ··· 1003 945 } 1004 946 1005 947 /** 948 + * nfp_flower_validate_pre_tun_rule() 949 + * @app: Pointer to the APP handle 950 + * @flow: Pointer to NFP flow representation of rule 951 + * @extack: Netlink extended ACK report 952 + * 953 + * Verifies the flow as a pre-tunnel rule. 954 + * 955 + * Return: negative value on error, 0 if verified. 956 + */ 957 + static int 958 + nfp_flower_validate_pre_tun_rule(struct nfp_app *app, 959 + struct nfp_fl_payload *flow, 960 + struct netlink_ext_ack *extack) 961 + { 962 + struct nfp_flower_meta_tci *meta_tci; 963 + struct nfp_flower_mac_mpls *mac; 964 + struct nfp_fl_act_head *act; 965 + u8 *mask = flow->mask_data; 966 + bool vlan = false; 967 + int act_offset; 968 + u8 key_layer; 969 + 970 + meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data; 971 + if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { 972 + u16 vlan_tci = be16_to_cpu(meta_tci->tci); 973 + 974 + vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; 975 + flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); 976 + vlan = true; 977 + } else { 978 + flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); 979 + } 980 + 981 + key_layer = meta_tci->nfp_flow_key_layer; 982 + if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) { 983 + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields"); 984 + return -EOPNOTSUPP; 985 + } 986 + 987 + if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { 988 + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required"); 989 + return -EOPNOTSUPP; 990 + } 991 + 992 + /* Skip fields known to exist. */ 993 + mask += sizeof(struct nfp_flower_meta_tci); 994 + mask += sizeof(struct nfp_flower_in_port); 995 + 996 + /* Ensure destination MAC address is fully matched. */ 997 + mac = (struct nfp_flower_mac_mpls *)mask; 998 + if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { 999 + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked"); 1000 + return -EOPNOTSUPP; 1001 + } 1002 + 1003 + if (key_layer & NFP_FLOWER_LAYER_IPV4) { 1004 + int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags); 1005 + int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto); 1006 + int i; 1007 + 1008 + mask += sizeof(struct nfp_flower_mac_mpls); 1009 + 1010 + /* Ensure proto and flags are the only IP layer fields. */ 1011 + for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++) 1012 + if (mask[i] && i != ip_flags && i != ip_proto) { 1013 + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header"); 1014 + return -EOPNOTSUPP; 1015 + } 1016 + } 1017 + 1018 + /* Action must be a single egress or pop_vlan and egress. */ 1019 + act_offset = 0; 1020 + act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; 1021 + if (vlan) { 1022 + if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) { 1023 + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action"); 1024 + return -EOPNOTSUPP; 1025 + } 1026 + 1027 + act_offset += act->len_lw << NFP_FL_LW_SIZ; 1028 + act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; 1029 + } 1030 + 1031 + if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) { 1032 + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected"); 1033 + return -EOPNOTSUPP; 1034 + } 1035 + 1036 + act_offset += act->len_lw << NFP_FL_LW_SIZ; 1037 + 1038 + /* Ensure there are no more actions after egress. */ 1039 + if (act_offset != flow->meta.act_len) { 1040 + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action"); 1041 + return -EOPNOTSUPP; 1042 + } 1043 + 1044 + return 0; 1045 + } 1046 + 1047 + /** 1006 1048 * nfp_flower_add_offload() - Adds a new flow to hardware. 1007 1049 * @app: Pointer to the APP handle 1008 1050 * @netdev: netdev structure. ··· 1152 994 if (err) 1153 995 goto err_destroy_flow; 1154 996 997 + if (flow_pay->pre_tun_rule.dev) { 998 + err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack); 999 + if (err) 1000 + goto err_destroy_flow; 1001 + } 1002 + 1155 1003 err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack); 1156 1004 if (err) 1157 1005 goto err_destroy_flow; ··· 1170 1006 goto err_release_metadata; 1171 1007 } 1172 1008 1173 - err = nfp_flower_xmit_flow(app, flow_pay, 1174 - NFP_FLOWER_CMSG_TYPE_FLOW_ADD); 1009 + if (flow_pay->pre_tun_rule.dev) 1010 + err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); 1011 + else 1012 + err = nfp_flower_xmit_flow(app, flow_pay, 1013 + NFP_FLOWER_CMSG_TYPE_FLOW_ADD); 1175 1014 if (err) 1176 1015 goto err_remove_rhash; 1177 1016 ··· 1316 1149 goto err_free_merge_flow; 1317 1150 } 1318 1151 1319 - err = nfp_flower_xmit_flow(app, nfp_flow, 1320 - NFP_FLOWER_CMSG_TYPE_FLOW_DEL); 1152 + if (nfp_flow->pre_tun_rule.dev) 1153 + err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow); 1154 + else 1155 + err = nfp_flower_xmit_flow(app, nfp_flow, 1156 + NFP_FLOWER_CMSG_TYPE_FLOW_DEL); 1321 1157 /* Fall through on error. */ 1322 1158 1323 1159 err_free_merge_flow:
+184 -16
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
··· 15 15 16 16 #define NFP_FL_MAX_ROUTES 32 17 17 18 + #define NFP_TUN_PRE_TUN_RULE_LIMIT 32 19 + #define NFP_TUN_PRE_TUN_RULE_DEL 0x1 20 + #define NFP_TUN_PRE_TUN_IDX_BIT 0x8 21 + 22 + /** 23 + * struct nfp_tun_pre_run_rule - rule matched before decap 24 + * @flags: options for the rule offset 25 + * @port_idx: index of destination MAC address for the rule 26 + * @vlan_tci: VLAN info associated with MAC 27 + * @host_ctx_id: stats context of rule to update 28 + */ 29 + struct nfp_tun_pre_tun_rule { 30 + __be32 flags; 31 + __be16 port_idx; 32 + __be16 vlan_tci; 33 + __be32 host_ctx_id; 34 + }; 35 + 18 36 /** 19 37 * struct nfp_tun_active_tuns - periodic message of active tunnels 20 38 * @seq: sequence number of the message ··· 142 124 143 125 /** 144 126 * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC 145 - * @ht_node: Hashtable entry 146 - * @addr: Offloaded MAC address 147 - * @index: Offloaded index for given MAC address 148 - * @ref_count: Number of devs using this MAC address 149 - * @repr_list: List of reprs sharing this MAC address 127 + * @ht_node: Hashtable entry 128 + * @addr: Offloaded MAC address 129 + * @index: Offloaded index for given MAC address 130 + * @ref_count: Number of devs using this MAC address 131 + * @repr_list: List of reprs sharing this MAC address 132 + * @bridge_count: Number of bridge/internal devs with MAC 150 133 */ 151 134 struct nfp_tun_offloaded_mac { 152 135 struct rhash_head ht_node; ··· 155 136 u16 index; 156 137 int ref_count; 157 138 struct list_head repr_list; 139 + int bridge_count; 158 140 }; 159 141 160 142 static const struct rhashtable_params offloaded_macs_params = { ··· 576 556 list_del(&repr_priv->mac_list); 577 557 578 558 list_add_tail(&repr_priv->mac_list, &entry->repr_list); 559 + } else if (nfp_flower_is_supported_bridge(netdev)) { 560 + entry->bridge_count++; 579 561 } 580 562 581 563 entry->ref_count++; ··· 594 572 595 573 entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); 596 574 if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { 597 - nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); 598 - return 0; 575 + if (entry->bridge_count || 576 + !nfp_flower_is_supported_bridge(netdev)) { 577 + nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, 578 + netdev, mod); 579 + return 0; 580 + } 581 + 582 + /* MAC is global but matches need to go to pre_tun table. */ 583 + nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT; 599 584 } 600 585 601 - /* Assign a global index if non-repr or MAC address is now shared. */ 602 - if (entry || !port) { 603 - ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, 604 - NFP_MAX_MAC_INDEX, GFP_KERNEL); 605 - if (ida_idx < 0) 606 - return ida_idx; 586 + if (!nfp_mac_idx) { 587 + /* Assign a global index if non-repr or MAC is now shared. */ 588 + if (entry || !port) { 589 + ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, 590 + NFP_MAX_MAC_INDEX, GFP_KERNEL); 591 + if (ida_idx < 0) 592 + return ida_idx; 607 593 608 - nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); 609 - } else { 610 - nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); 594 + nfp_mac_idx = 595 + nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); 596 + 597 + if (nfp_flower_is_supported_bridge(netdev)) 598 + nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT; 599 + 600 + } else { 601 + nfp_mac_idx = 602 + nfp_tunnel_get_mac_idx_from_phy_port_id(port); 603 + } 611 604 } 612 605 613 606 if (!entry) { ··· 691 654 list_del(&repr_priv->mac_list); 692 655 } 693 656 657 + if (nfp_flower_is_supported_bridge(netdev)) { 658 + entry->bridge_count--; 659 + 660 + if (!entry->bridge_count && entry->ref_count) { 661 + u16 nfp_mac_idx; 662 + 663 + nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; 664 + if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, 665 + false)) { 666 + nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", 667 + netdev_name(netdev)); 668 + return 0; 669 + } 670 + 671 + entry->index = nfp_mac_idx; 672 + return 0; 673 + } 674 + } 675 + 694 676 /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ 695 677 if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { 696 678 u16 nfp_mac_idx; ··· 769 713 return 0; 770 714 771 715 repr_priv = repr->app_priv; 716 + if (repr_priv->on_bridge) 717 + return 0; 718 + 772 719 mac_offloaded = &repr_priv->mac_offloaded; 773 720 off_mac = &repr_priv->offloaded_mac_addr[0]; 774 721 port = nfp_repr_get_port_id(netdev); ··· 887 828 if (err) 888 829 nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", 889 830 netdev_name(netdev)); 831 + } else if (event == NETDEV_CHANGEUPPER) { 832 + /* If a repr is attached to a bridge then tunnel packets 833 + * entering the physical port are directed through the bridge 834 + * datapath and cannot be directly detunneled. Therefore, 835 + * associated offloaded MACs and indexes should not be used 836 + * by fw for detunneling. 837 + */ 838 + struct netdev_notifier_changeupper_info *info = ptr; 839 + struct net_device *upper = info->upper_dev; 840 + struct nfp_flower_repr_priv *repr_priv; 841 + struct nfp_repr *repr; 842 + 843 + if (!nfp_netdev_is_nfp_repr(netdev) || 844 + !nfp_flower_is_supported_bridge(upper)) 845 + return NOTIFY_OK; 846 + 847 + repr = netdev_priv(netdev); 848 + if (repr->app != app) 849 + return NOTIFY_OK; 850 + 851 + repr_priv = repr->app_priv; 852 + 853 + if (info->linking) { 854 + if (nfp_tunnel_offload_mac(app, netdev, 855 + NFP_TUNNEL_MAC_OFFLOAD_DEL)) 856 + nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n", 857 + netdev_name(netdev)); 858 + repr_priv->on_bridge = true; 859 + } else { 860 + repr_priv->on_bridge = false; 861 + 862 + if (!(netdev->flags & IFF_UP)) 863 + return NOTIFY_OK; 864 + 865 + if (nfp_tunnel_offload_mac(app, netdev, 866 + NFP_TUNNEL_MAC_OFFLOAD_ADD)) 867 + nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", 868 + netdev_name(netdev)); 869 + } 890 870 } 891 871 return NOTIFY_OK; 872 + } 873 + 874 + int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, 875 + struct nfp_fl_payload *flow) 876 + { 877 + struct nfp_flower_priv *app_priv = app->priv; 878 + struct nfp_tun_offloaded_mac *mac_entry; 879 + struct nfp_tun_pre_tun_rule payload; 880 + struct net_device *internal_dev; 881 + int err; 882 + 883 + if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT) 884 + return -ENOSPC; 885 + 886 + memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); 887 + 888 + internal_dev = flow->pre_tun_rule.dev; 889 + payload.vlan_tci = flow->pre_tun_rule.vlan_tci; 890 + payload.host_ctx_id = flow->meta.host_ctx_id; 891 + 892 + /* Lookup MAC index for the pre-tunnel rule egress device. 893 + * Note that because the device is always an internal port, it will 894 + * have a constant global index so does not need to be tracked. 895 + */ 896 + mac_entry = nfp_tunnel_lookup_offloaded_macs(app, 897 + internal_dev->dev_addr); 898 + if (!mac_entry) 899 + return -ENOENT; 900 + 901 + payload.port_idx = cpu_to_be16(mac_entry->index); 902 + 903 + /* Copy mac id and vlan to flow - dev may not exist at delete time. */ 904 + flow->pre_tun_rule.vlan_tci = payload.vlan_tci; 905 + flow->pre_tun_rule.port_idx = payload.port_idx; 906 + 907 + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, 908 + sizeof(struct nfp_tun_pre_tun_rule), 909 + (unsigned char *)&payload, GFP_KERNEL); 910 + if (err) 911 + return err; 912 + 913 + app_priv->pre_tun_rule_cnt++; 914 + 915 + return 0; 916 + } 917 + 918 + int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, 919 + struct nfp_fl_payload *flow) 920 + { 921 + struct nfp_flower_priv *app_priv = app->priv; 922 + struct nfp_tun_pre_tun_rule payload; 923 + u32 tmp_flags = 0; 924 + int err; 925 + 926 + memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); 927 + 928 + tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL; 929 + payload.flags = cpu_to_be32(tmp_flags); 930 + payload.vlan_tci = flow->pre_tun_rule.vlan_tci; 931 + payload.port_idx = flow->pre_tun_rule.port_idx; 932 + 933 + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, 934 + sizeof(struct nfp_tun_pre_tun_rule), 935 + (unsigned char *)&payload, GFP_KERNEL); 936 + if (err) 937 + return err; 938 + 939 + app_priv->pre_tun_rule_cnt--; 940 + 941 + return 0; 892 942 } 893 943 894 944 int nfp_tunnel_config_start(struct nfp_app *app)
+4
include/net/flow_offload.h
··· 117 117 FLOW_ACTION_GOTO, 118 118 FLOW_ACTION_REDIRECT, 119 119 FLOW_ACTION_MIRRED, 120 + FLOW_ACTION_REDIRECT_INGRESS, 121 + FLOW_ACTION_MIRRED_INGRESS, 120 122 FLOW_ACTION_VLAN_PUSH, 121 123 FLOW_ACTION_VLAN_POP, 122 124 FLOW_ACTION_VLAN_MANGLE, ··· 128 126 FLOW_ACTION_ADD, 129 127 FLOW_ACTION_CSUM, 130 128 FLOW_ACTION_MARK, 129 + FLOW_ACTION_PTYPE, 131 130 FLOW_ACTION_WAKE, 132 131 FLOW_ACTION_QUEUE, 133 132 FLOW_ACTION_SAMPLE, ··· 171 168 const struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */ 172 169 u32 csum_flags; /* FLOW_ACTION_CSUM */ 173 170 u32 mark; /* FLOW_ACTION_MARK */ 171 + u16 ptype; /* FLOW_ACTION_PTYPE */ 174 172 struct { /* FLOW_ACTION_QUEUE */ 175 173 u32 ctx; 176 174 u32 index;
+18
include/net/tc_act/tc_mirred.h
··· 32 32 return false; 33 33 } 34 34 35 + static inline bool is_tcf_mirred_ingress_redirect(const struct tc_action *a) 36 + { 37 + #ifdef CONFIG_NET_CLS_ACT 38 + if (a->ops && a->ops->id == TCA_ID_MIRRED) 39 + return to_mirred(a)->tcfm_eaction == TCA_INGRESS_REDIR; 40 + #endif 41 + return false; 42 + } 43 + 44 + static inline bool is_tcf_mirred_ingress_mirror(const struct tc_action *a) 45 + { 46 + #ifdef CONFIG_NET_CLS_ACT 47 + if (a->ops && a->ops->id == TCA_ID_MIRRED) 48 + return to_mirred(a)->tcfm_eaction == TCA_INGRESS_MIRROR; 49 + #endif 50 + return false; 51 + } 52 + 35 53 static inline struct net_device *tcf_mirred_dev(const struct tc_action *a) 36 54 { 37 55 return rtnl_dereference(to_mirred(a)->tcfm_dev);
+27
include/net/tc_act/tc_skbedit.h
··· 54 54 return mark; 55 55 } 56 56 57 + /* Return true iff action is ptype */ 58 + static inline bool is_tcf_skbedit_ptype(const struct tc_action *a) 59 + { 60 + #ifdef CONFIG_NET_CLS_ACT 61 + u32 flags; 62 + 63 + if (a->ops && a->ops->id == TCA_ID_SKBEDIT) { 64 + rcu_read_lock(); 65 + flags = rcu_dereference(to_skbedit(a)->params)->flags; 66 + rcu_read_unlock(); 67 + return flags == SKBEDIT_F_PTYPE; 68 + } 69 + #endif 70 + return false; 71 + } 72 + 73 + static inline u32 tcf_skbedit_ptype(const struct tc_action *a) 74 + { 75 + u16 ptype; 76 + 77 + rcu_read_lock(); 78 + ptype = rcu_dereference(to_skbedit(a)->params)->ptype; 79 + rcu_read_unlock(); 80 + 81 + return ptype; 82 + } 83 + 57 84 #endif /* __NET_TC_SKBEDIT_H */
+9
net/sched/cls_api.c
··· 3205 3205 } else if (is_tcf_mirred_egress_mirror(act)) { 3206 3206 entry->id = FLOW_ACTION_MIRRED; 3207 3207 entry->dev = tcf_mirred_dev(act); 3208 + } else if (is_tcf_mirred_ingress_redirect(act)) { 3209 + entry->id = FLOW_ACTION_REDIRECT_INGRESS; 3210 + entry->dev = tcf_mirred_dev(act); 3211 + } else if (is_tcf_mirred_ingress_mirror(act)) { 3212 + entry->id = FLOW_ACTION_MIRRED_INGRESS; 3213 + entry->dev = tcf_mirred_dev(act); 3208 3214 } else if (is_tcf_vlan(act)) { 3209 3215 switch (tcf_vlan_action(act)) { 3210 3216 case TCA_VLAN_ACT_PUSH: ··· 3300 3294 default: 3301 3295 goto err_out; 3302 3296 } 3297 + } else if (is_tcf_skbedit_ptype(act)) { 3298 + entry->id = FLOW_ACTION_PTYPE; 3299 + entry->ptype = tcf_skbedit_ptype(act); 3303 3300 } else { 3304 3301 goto err_out; 3305 3302 }