Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'flow-mpls'

Guillaume Nault says:

====================
flow_dissector, cls_flower: Add support for multiple MPLS Label Stack Entries

Currently, the flow dissector and the Flower classifier can only handle
the first entry of an MPLS label stack. This patch series generalises
the code to allow parsing and matching the Label Stack Entries that
follow.

Patch 1 extends the flow dissector to parse MPLS LSEs until the Bottom
Of Stack bit is reached. The number of parsed LSEs is capped at
FLOW_DIS_MPLS_MAX (arbitrarily set to 7). Flower and the NFP driver
are updated to take into account the new layout of struct
flow_dissector_key_mpls.

Patch 2 extends Flower. It defines new netlink attributes, which are
independent from the previous MPLS ones. Mixing the old and the new
attributes in a same filter is not allowed. For backward compatibility,
the old attributes are used when dumping filters that don't require the
new ones.

Changes since v2:
* Fix compilation with the new MLX5 bareudp tunnel code.

Changes since v1:
* Fix compilation of NFP driver (kbuild test robot).
* Fix sparse warning with entropy label (kbuild test robot).
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+399 -55
+21 -10
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
··· 101 101 102 102 flow_rule_match_mpls(rule, &match); 103 103 104 - MLX5_SET(fte_match_set_misc2, misc2_c, 105 - outer_first_mpls_over_udp.mpls_label, match.mask->mpls_label); 106 - MLX5_SET(fte_match_set_misc2, misc2_v, 107 - outer_first_mpls_over_udp.mpls_label, match.key->mpls_label); 104 + /* Only support matching the first LSE */ 105 + if (match.mask->used_lses != 1) 106 + return -EOPNOTSUPP; 108 107 109 108 MLX5_SET(fte_match_set_misc2, misc2_c, 110 - outer_first_mpls_over_udp.mpls_exp, match.mask->mpls_tc); 109 + outer_first_mpls_over_udp.mpls_label, 110 + match.mask->ls[0].mpls_label); 111 111 MLX5_SET(fte_match_set_misc2, misc2_v, 112 - outer_first_mpls_over_udp.mpls_exp, match.key->mpls_tc); 112 + outer_first_mpls_over_udp.mpls_label, 113 + match.key->ls[0].mpls_label); 113 114 114 115 MLX5_SET(fte_match_set_misc2, misc2_c, 115 - outer_first_mpls_over_udp.mpls_s_bos, match.mask->mpls_bos); 116 + outer_first_mpls_over_udp.mpls_exp, 117 + match.mask->ls[0].mpls_tc); 116 118 MLX5_SET(fte_match_set_misc2, misc2_v, 117 - outer_first_mpls_over_udp.mpls_s_bos, match.key->mpls_bos); 119 + outer_first_mpls_over_udp.mpls_exp, match.key->ls[0].mpls_tc); 118 120 119 121 MLX5_SET(fte_match_set_misc2, misc2_c, 120 - outer_first_mpls_over_udp.mpls_ttl, match.mask->mpls_ttl); 122 + outer_first_mpls_over_udp.mpls_s_bos, 123 + match.mask->ls[0].mpls_bos); 121 124 MLX5_SET(fte_match_set_misc2, misc2_v, 122 - outer_first_mpls_over_udp.mpls_ttl, match.key->mpls_ttl); 125 + outer_first_mpls_over_udp.mpls_s_bos, 126 + match.key->ls[0].mpls_bos); 127 + 128 + MLX5_SET(fte_match_set_misc2, misc2_c, 129 + outer_first_mpls_over_udp.mpls_ttl, 130 + match.mask->ls[0].mpls_ttl); 131 + MLX5_SET(fte_match_set_misc2, misc2_v, 132 + outer_first_mpls_over_udp.mpls_ttl, 133 + match.key->ls[0].mpls_ttl); 123 134 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 124 135 125 136 return 0;
+31 -11
drivers/net/ethernet/netronome/nfp/flower/match.c
··· 74 74 return 0; 75 75 } 76 76 77 - static void 77 + static int 78 78 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, 79 - struct nfp_flower_mac_mpls *msk, struct flow_rule *rule) 79 + struct nfp_flower_mac_mpls *msk, struct flow_rule *rule, 80 + struct netlink_ext_ack *extack) 80 81 { 81 82 memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); 82 83 memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); ··· 98 97 u32 t_mpls; 99 98 100 99 flow_rule_match_mpls(rule, &match); 101 - t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) | 102 - FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) | 103 - FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) | 100 + 101 + /* Only support matching the first LSE */ 102 + if (match.mask->used_lses != 1) { 103 + NL_SET_ERR_MSG_MOD(extack, 104 + "unsupported offload: invalid LSE depth for MPLS match offload"); 105 + return -EOPNOTSUPP; 106 + } 107 + 108 + t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, 109 + match.key->ls[0].mpls_label) | 110 + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, 111 + match.key->ls[0].mpls_tc) | 112 + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, 113 + match.key->ls[0].mpls_bos) | 104 114 NFP_FLOWER_MASK_MPLS_Q; 105 115 ext->mpls_lse = cpu_to_be32(t_mpls); 106 - t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) | 107 - FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) | 108 - FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) | 116 + t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, 117 + match.mask->ls[0].mpls_label) | 118 + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, 119 + match.mask->ls[0].mpls_tc) | 120 + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, 121 + match.mask->ls[0].mpls_bos) | 109 122 NFP_FLOWER_MASK_MPLS_Q; 110 123 msk->mpls_lse = cpu_to_be32(t_mpls); 111 124 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { ··· 136 121 msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); 137 122 } 138 123 } 124 + 125 + return 0; 139 126 } 140 127 141 128 static void ··· 478 461 msk += sizeof(struct nfp_flower_in_port); 479 462 480 463 if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { 481 - nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, 482 - (struct nfp_flower_mac_mpls *)msk, 483 - rule); 464 + err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, 465 + (struct nfp_flower_mac_mpls *)msk, 466 + rule, extack); 467 + if (err) 468 + return err; 469 + 484 470 ext += sizeof(struct nfp_flower_mac_mpls); 485 471 msk += sizeof(struct nfp_flower_mac_mpls); 486 472 }
+13 -1
include/net/flow_dissector.h
··· 59 59 __be16 vlan_tpid; 60 60 }; 61 61 62 - struct flow_dissector_key_mpls { 62 + struct flow_dissector_mpls_lse { 63 63 u32 mpls_ttl:8, 64 64 mpls_bos:1, 65 65 mpls_tc:3, 66 66 mpls_label:20; 67 67 }; 68 + 69 + #define FLOW_DIS_MPLS_MAX 7 70 + struct flow_dissector_key_mpls { 71 + struct flow_dissector_mpls_lse ls[FLOW_DIS_MPLS_MAX]; /* Label Stack */ 72 + u8 used_lses; /* One bit set for each Label Stack Entry in use */ 73 + }; 74 + 75 + static inline void dissector_set_mpls_lse(struct flow_dissector_key_mpls *mpls, 76 + int lse_index) 77 + { 78 + mpls->used_lses |= 1 << lse_index; 79 + } 68 80 69 81 #define FLOW_DIS_TUN_OPTS_MAX 255 70 82 /**
+23
include/uapi/linux/pkt_cls.h
··· 576 576 TCA_FLOWER_KEY_CT_LABELS, /* u128 */ 577 577 TCA_FLOWER_KEY_CT_LABELS_MASK, /* u128 */ 578 578 579 + TCA_FLOWER_KEY_MPLS_OPTS, 580 + 579 581 __TCA_FLOWER_MAX, 580 582 }; 581 583 ··· 641 639 642 640 #define TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX \ 643 641 (__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1) 642 + 643 + enum { 644 + TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC, 645 + TCA_FLOWER_KEY_MPLS_OPTS_LSE, 646 + __TCA_FLOWER_KEY_MPLS_OPTS_MAX, 647 + }; 648 + 649 + #define TCA_FLOWER_KEY_MPLS_OPTS_MAX (__TCA_FLOWER_KEY_MPLS_OPTS_MAX - 1) 650 + 651 + enum { 652 + TCA_FLOWER_KEY_MPLS_OPT_LSE_UNSPEC, 653 + TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH, 654 + TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL, 655 + TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS, 656 + TCA_FLOWER_KEY_MPLS_OPT_LSE_TC, 657 + TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, 658 + __TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, 659 + }; 660 + 661 + #define TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX \ 662 + (__TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX - 1) 644 663 645 664 enum { 646 665 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
+33 -16
net/core/flow_dissector.c
··· 480 480 static enum flow_dissect_ret 481 481 __skb_flow_dissect_mpls(const struct sk_buff *skb, 482 482 struct flow_dissector *flow_dissector, 483 - void *target_container, void *data, int nhoff, int hlen) 483 + void *target_container, void *data, int nhoff, int hlen, 484 + int lse_index, bool *entropy_label) 484 485 { 485 - struct flow_dissector_key_keyid *key_keyid; 486 - struct mpls_label *hdr, _hdr[2]; 487 - u32 entry, label; 486 + struct mpls_label *hdr, _hdr; 487 + u32 entry, label, bos; 488 488 489 489 if (!dissector_uses_key(flow_dissector, 490 490 FLOW_DISSECTOR_KEY_MPLS_ENTROPY) && 491 491 !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) 492 + return FLOW_DISSECT_RET_OUT_GOOD; 493 + 494 + if (lse_index >= FLOW_DIS_MPLS_MAX) 492 495 return FLOW_DISSECT_RET_OUT_GOOD; 493 496 494 497 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, ··· 499 496 if (!hdr) 500 497 return FLOW_DISSECT_RET_OUT_BAD; 501 498 502 - entry = ntohl(hdr[0].entry); 499 + entry = ntohl(hdr->entry); 503 500 label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT; 501 + bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT; 504 502 505 503 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) { 506 504 struct flow_dissector_key_mpls *key_mpls; 505 + struct flow_dissector_mpls_lse *lse; 507 506 508 507 key_mpls = skb_flow_dissector_target(flow_dissector, 509 508 FLOW_DISSECTOR_KEY_MPLS, 510 509 target_container); 511 - key_mpls->mpls_label = label; 512 - key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK) 513 - >> MPLS_LS_TTL_SHIFT; 514 - key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK) 515 - >> MPLS_LS_TC_SHIFT; 516 - key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK) 517 - >> MPLS_LS_S_SHIFT; 510 + lse = &key_mpls->ls[lse_index]; 511 + 512 + lse->mpls_ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 513 + lse->mpls_bos = bos; 514 + lse->mpls_tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; 515 + lse->mpls_label = label; 516 + dissector_set_mpls_lse(key_mpls, lse_index); 518 517 } 519 518 520 - if (label == MPLS_LABEL_ENTROPY) { 519 + if (*entropy_label && 520 + dissector_uses_key(flow_dissector, 521 + FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) { 522 + struct flow_dissector_key_keyid *key_keyid; 523 + 521 524 key_keyid = skb_flow_dissector_target(flow_dissector, 522 525 FLOW_DISSECTOR_KEY_MPLS_ENTROPY, 523 526 target_container); 524 - key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK); 527 + key_keyid->keyid = cpu_to_be32(label); 525 528 } 526 - return FLOW_DISSECT_RET_OUT_GOOD; 529 + 530 + *entropy_label = label == MPLS_LABEL_ENTROPY; 531 + 532 + return bos ? FLOW_DISSECT_RET_OUT_GOOD : FLOW_DISSECT_RET_PROTO_AGAIN; 527 533 } 528 534 529 535 static enum flow_dissect_ret ··· 991 979 struct bpf_prog *attached = NULL; 992 980 enum flow_dissect_ret fdret; 993 981 enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX; 982 + bool mpls_el = false; 983 + int mpls_lse = 0; 994 984 int num_hdrs = 0; 995 985 u8 ip_proto = 0; 996 986 bool ret; ··· 1292 1278 case htons(ETH_P_MPLS_MC): 1293 1279 fdret = __skb_flow_dissect_mpls(skb, flow_dissector, 1294 1280 target_container, data, 1295 - nhoff, hlen); 1281 + nhoff, hlen, mpls_lse, 1282 + &mpls_el); 1283 + nhoff += sizeof(struct mpls_label); 1284 + mpls_lse++; 1296 1285 break; 1297 1286 case htons(ETH_P_FCOE): 1298 1287 if ((hlen - nhoff) < FCOE_HEADER_LEN) {
+278 -17
net/sched/cls_flower.c
··· 668 668 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, 669 669 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, 670 670 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, 671 + [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED }, 671 672 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, 672 673 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, 673 674 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 }, ··· 727 726 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, 728 727 }; 729 728 729 + static const struct nla_policy 730 + mpls_opts_policy[TCA_FLOWER_KEY_MPLS_OPTS_MAX + 1] = { 731 + [TCA_FLOWER_KEY_MPLS_OPTS_LSE] = { .type = NLA_NESTED }, 732 + }; 733 + 734 + static const struct nla_policy 735 + mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { 736 + [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 }, 737 + [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 }, 738 + [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 }, 739 + [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 }, 740 + [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 }, 741 + }; 742 + 730 743 static void fl_set_key_val(struct nlattr **tb, 731 744 void *val, int val_type, 732 745 void *mask, int mask_type, int len) ··· 791 776 return 0; 792 777 } 793 778 779 + static int fl_set_key_mpls_lse(const struct nlattr *nla_lse, 780 + struct flow_dissector_key_mpls *key_val, 781 + struct flow_dissector_key_mpls *key_mask, 782 + struct netlink_ext_ack *extack) 783 + { 784 + struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1]; 785 + struct flow_dissector_mpls_lse *lse_mask; 786 + struct flow_dissector_mpls_lse *lse_val; 787 + u8 lse_index; 788 + u8 depth; 789 + int err; 790 + 791 + err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse, 792 + mpls_stack_entry_policy, extack); 793 + if (err < 0) 794 + return err; 795 + 796 + if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) { 797 + NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\""); 798 + return -EINVAL; 799 + } 800 + 801 + depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]); 802 + 803 + /* LSE depth starts at 1, for consistency with terminology used by 804 + * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets. 805 + */ 806 + if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) { 807 + NL_SET_ERR_MSG_ATTR(extack, 808 + tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH], 809 + "Invalid MPLS depth"); 810 + return -EINVAL; 811 + } 812 + lse_index = depth - 1; 813 + 814 + dissector_set_mpls_lse(key_val, lse_index); 815 + dissector_set_mpls_lse(key_mask, lse_index); 816 + 817 + lse_val = &key_val->ls[lse_index]; 818 + lse_mask = &key_mask->ls[lse_index]; 819 + 820 + if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) { 821 + lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]); 822 + lse_mask->mpls_ttl = MPLS_TTL_MASK; 823 + } 824 + if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) { 825 + u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]); 826 + 827 + if (bos & ~MPLS_BOS_MASK) { 828 + NL_SET_ERR_MSG_ATTR(extack, 829 + tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS], 830 + "Bottom Of Stack (BOS) must be 0 or 1"); 831 + return -EINVAL; 832 + } 833 + lse_val->mpls_bos = bos; 834 + lse_mask->mpls_bos = MPLS_BOS_MASK; 835 + } 836 + if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) { 837 + u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]); 838 + 839 + if (tc & ~MPLS_TC_MASK) { 840 + NL_SET_ERR_MSG_ATTR(extack, 841 + tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC], 842 + "Traffic Class (TC) must be between 0 and 7"); 843 + return -EINVAL; 844 + } 845 + lse_val->mpls_tc = tc; 846 + lse_mask->mpls_tc = MPLS_TC_MASK; 847 + } 848 + if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) { 849 + u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]); 850 + 851 + if (label & ~MPLS_LABEL_MASK) { 852 + NL_SET_ERR_MSG_ATTR(extack, 853 + tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL], 854 + "Label must be between 0 and 1048575"); 855 + return -EINVAL; 856 + } 857 + lse_val->mpls_label = label; 858 + lse_mask->mpls_label = MPLS_LABEL_MASK; 859 + } 860 + 861 + return 0; 862 + } 863 + 864 + static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts, 865 + struct flow_dissector_key_mpls *key_val, 866 + struct flow_dissector_key_mpls *key_mask, 867 + struct netlink_ext_ack *extack) 868 + { 869 + struct nlattr *nla_lse; 870 + int rem; 871 + int err; 872 + 873 + if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) { 874 + NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts, 875 + "NLA_F_NESTED is missing"); 876 + return -EINVAL; 877 + } 878 + 879 + nla_for_each_nested(nla_lse, nla_mpls_opts, rem) { 880 + if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) { 881 + NL_SET_ERR_MSG_ATTR(extack, nla_lse, 882 + "Invalid MPLS option type"); 883 + return -EINVAL; 884 + } 885 + 886 + err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack); 887 + if (err < 0) 888 + return err; 889 + } 890 + if (rem) { 891 + NL_SET_ERR_MSG(extack, 892 + "Bytes leftover after parsing MPLS options"); 893 + return -EINVAL; 894 + } 895 + 896 + return 0; 897 + } 898 + 794 899 static int fl_set_key_mpls(struct nlattr **tb, 795 900 struct flow_dissector_key_mpls *key_val, 796 901 struct flow_dissector_key_mpls *key_mask, 797 902 struct netlink_ext_ack *extack) 798 903 { 904 + struct flow_dissector_mpls_lse *lse_mask; 905 + struct flow_dissector_mpls_lse *lse_val; 906 + 907 + if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) { 908 + if (tb[TCA_FLOWER_KEY_MPLS_TTL] || 909 + tb[TCA_FLOWER_KEY_MPLS_BOS] || 910 + tb[TCA_FLOWER_KEY_MPLS_TC] || 911 + tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 912 + NL_SET_ERR_MSG_ATTR(extack, 913 + tb[TCA_FLOWER_KEY_MPLS_OPTS], 914 + "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute"); 915 + return -EBADMSG; 916 + } 917 + 918 + return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS], 919 + key_val, key_mask, extack); 920 + } 921 + 922 + lse_val = &key_val->ls[0]; 923 + lse_mask = &key_mask->ls[0]; 924 + 799 925 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) { 800 - key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); 801 - key_mask->mpls_ttl = MPLS_TTL_MASK; 926 + lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); 927 + lse_mask->mpls_ttl = MPLS_TTL_MASK; 928 + dissector_set_mpls_lse(key_val, 0); 929 + dissector_set_mpls_lse(key_mask, 0); 802 930 } 803 931 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) { 804 932 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]); ··· 952 794 "Bottom Of Stack (BOS) must be 0 or 1"); 953 795 return -EINVAL; 954 796 } 955 - key_val->mpls_bos = bos; 956 - key_mask->mpls_bos = MPLS_BOS_MASK; 797 + lse_val->mpls_bos = bos; 798 + lse_mask->mpls_bos = MPLS_BOS_MASK; 799 + dissector_set_mpls_lse(key_val, 0); 800 + dissector_set_mpls_lse(key_mask, 0); 957 801 } 958 802 if (tb[TCA_FLOWER_KEY_MPLS_TC]) { 959 803 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); ··· 966 806 "Traffic Class (TC) must be between 0 and 7"); 967 807 return -EINVAL; 968 808 } 969 - key_val->mpls_tc = tc; 970 - key_mask->mpls_tc = MPLS_TC_MASK; 809 + lse_val->mpls_tc = tc; 810 + lse_mask->mpls_tc = MPLS_TC_MASK; 811 + dissector_set_mpls_lse(key_val, 0); 812 + dissector_set_mpls_lse(key_mask, 0); 971 813 } 972 814 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 973 815 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]); ··· 980 818 "Label must be between 0 and 1048575"); 981 819 return -EINVAL; 982 820 } 983 - key_val->mpls_label = label; 984 - key_mask->mpls_label = MPLS_LABEL_MASK; 821 + lse_val->mpls_label = label; 822 + lse_mask->mpls_label = MPLS_LABEL_MASK; 823 + dissector_set_mpls_lse(key_val, 0); 824 + dissector_set_mpls_lse(key_mask, 0); 985 825 } 986 826 return 0; 987 827 } ··· 2382 2218 return 0; 2383 2219 } 2384 2220 2221 + static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb, 2222 + struct flow_dissector_key_mpls *mpls_key, 2223 + struct flow_dissector_key_mpls *mpls_mask, 2224 + u8 lse_index) 2225 + { 2226 + struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index]; 2227 + struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index]; 2228 + int err; 2229 + 2230 + err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH, 2231 + lse_index + 1); 2232 + if (err) 2233 + return err; 2234 + 2235 + if (lse_mask->mpls_ttl) { 2236 + err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL, 2237 + lse_key->mpls_ttl); 2238 + if (err) 2239 + return err; 2240 + } 2241 + if (lse_mask->mpls_bos) { 2242 + err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS, 2243 + lse_key->mpls_bos); 2244 + if (err) 2245 + return err; 2246 + } 2247 + if (lse_mask->mpls_tc) { 2248 + err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC, 2249 + lse_key->mpls_tc); 2250 + if (err) 2251 + return err; 2252 + } 2253 + if (lse_mask->mpls_label) { 2254 + err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, 2255 + lse_key->mpls_label); 2256 + if (err) 2257 + return err; 2258 + } 2259 + 2260 + return 0; 2261 + } 2262 + 2263 + static int fl_dump_key_mpls_opts(struct sk_buff *skb, 2264 + struct flow_dissector_key_mpls *mpls_key, 2265 + struct flow_dissector_key_mpls *mpls_mask) 2266 + { 2267 + struct nlattr *opts; 2268 + struct nlattr *lse; 2269 + u8 lse_index; 2270 + int err; 2271 + 2272 + opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS); 2273 + if (!opts) 2274 + return -EMSGSIZE; 2275 + 2276 + for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) { 2277 + if (!(mpls_mask->used_lses & 1 << lse_index)) 2278 + continue; 2279 + 2280 + lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE); 2281 + if (!lse) { 2282 + err = -EMSGSIZE; 2283 + goto err_opts; 2284 + } 2285 + 2286 + err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask, 2287 + lse_index); 2288 + if (err) 2289 + goto err_opts_lse; 2290 + nla_nest_end(skb, lse); 2291 + } 2292 + nla_nest_end(skb, opts); 2293 + 2294 + return 0; 2295 + 2296 + err_opts_lse: 2297 + nla_nest_cancel(skb, lse); 2298 + err_opts: 2299 + nla_nest_cancel(skb, opts); 2300 + 2301 + return err; 2302 + } 2303 + 2385 2304 static int fl_dump_key_mpls(struct sk_buff *skb, 2386 2305 struct flow_dissector_key_mpls *mpls_key, 2387 2306 struct flow_dissector_key_mpls *mpls_mask) 2388 2307 { 2308 + struct flow_dissector_mpls_lse *lse_mask; 2309 + struct flow_dissector_mpls_lse *lse_key; 2389 2310 int err; 2390 2311 2391 - if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask))) 2312 + if (!mpls_mask->used_lses) 2392 2313 return 0; 2393 - if (mpls_mask->mpls_ttl) { 2314 + 2315 + lse_mask = &mpls_mask->ls[0]; 2316 + lse_key = &mpls_key->ls[0]; 2317 + 2318 + /* For backward compatibility, don't use the MPLS nested attributes if 2319 + * the rule can be expressed using the old attributes. 2320 + */ 2321 + if (mpls_mask->used_lses & ~1 || 2322 + (!lse_mask->mpls_ttl && !lse_mask->mpls_bos && 2323 + !lse_mask->mpls_tc && !lse_mask->mpls_label)) 2324 + return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask); 2325 + 2326 + if (lse_mask->mpls_ttl) { 2394 2327 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL, 2395 - mpls_key->mpls_ttl); 2328 + lse_key->mpls_ttl); 2396 2329 if (err) 2397 2330 return err; 2398 2331 } 2399 - if (mpls_mask->mpls_tc) { 2332 + if (lse_mask->mpls_tc) { 2400 2333 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC, 2401 - mpls_key->mpls_tc); 2334 + lse_key->mpls_tc); 2402 2335 if (err) 2403 2336 return err; 2404 2337 } 2405 - if (mpls_mask->mpls_label) { 2338 + if (lse_mask->mpls_label) { 2406 2339 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL, 2407 - mpls_key->mpls_label); 2340 + lse_key->mpls_label); 2408 2341 if (err) 2409 2342 return err; 2410 2343 } 2411 - if (mpls_mask->mpls_bos) { 2344 + if (lse_mask->mpls_bos) { 2412 2345 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS, 2413 - mpls_key->mpls_bos); 2346 + lse_key->mpls_bos); 2414 2347 if (err) 2415 2348 return err; 2416 2349 }