Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'extend-action-skbedit-to-rx-queue-mapping'

Amritha Nambiar says:

====================
Extend action skbedit to RX queue mapping

Based on the discussion on
https://lore.kernel.org/netdev/166260012413.81018.8010396115034847972.stgit@anambiarhost.jf.intel.com/ ,
the following series extends skbedit tc action to RX queue mapping.
Currently, skbedit action in tc allows overriding of transmit queue.
Extending this ability of skedit action supports the selection of
receive queue for incoming packets. On the receive side, this action
is supported only in hardware, so the skip_sw flag is enforced.

Enabled ice driver to offload this type of filter into the hardware
for accepting packets to the device's receive queue.
====================

Link: https://lore.kernel.org/r/166633888716.52141.3425659377117969638.stgit@anambiarhost.jf.intel.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+389 -112
+1
Documentation/networking/index.rst
··· 104 104 switchdev 105 105 sysfs-tagging 106 106 tc-actions-env-rules 107 + tc-queue-filters 107 108 tcp-thin 108 109 team 109 110 timestamping
+37
Documentation/networking/tc-queue-filters.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 2 + 3 + ========================= 4 + TC queue based filtering 5 + ========================= 6 + 7 + TC can be used for directing traffic to either a set of queues or 8 + to a single queue on both the transmit and receive side. 9 + 10 + On the transmit side: 11 + 12 + 1) TC filter directing traffic to a set of queues is achieved 13 + using the action skbedit priority for Tx priority selection, 14 + the priority maps to a traffic class (set of queues) when 15 + the queue-sets are configured using mqprio. 16 + 17 + 2) TC filter directs traffic to a transmit queue with the action 18 + skbedit queue_mapping $tx_qid. The action skbedit queue_mapping 19 + for transmit queue is executed in software only and cannot be 20 + offloaded. 21 + 22 + Likewise, on the receive side, the two filters for selecting set of 23 + queues and/or a single queue are supported as below: 24 + 25 + 1) TC flower filter directs incoming traffic to a set of queues using 26 + the 'hw_tc' option. 27 + hw_tc $TCID - Specify a hardware traffic class to pass matching 28 + packets on to. TCID is in the range 0 through 15. 29 + 30 + 2) TC filter with action skbedit queue_mapping $rx_qid selects a 31 + receive queue. The action skbedit queue_mapping for receive queue 32 + is supported only in hardware. Multiple filters may compete in 33 + the hardware for queue selection. In such case, the hardware 34 + pipeline resolves conflicts based on priority. On Intel E810 35 + devices, TC filter directing traffic to a queue have higher 36 + priority over flow director filter assigning a queue. The hash 37 + filter has lowest priority.
+15
drivers/net/ethernet/intel/ice/ice.h
··· 137 137 */ 138 138 #define ICE_BW_KBPS_DIVISOR 125 139 139 140 + /* Default recipes have priority 4 and below, hence priority values between 5..7 141 + * can be used as filter priority for advanced switch filter (advanced switch 142 + * filters need new recipe to be created for specified extraction sequence 143 + * because default recipe extraction sequence does not represent custom 144 + * extraction) 145 + */ 146 + #define ICE_SWITCH_FLTR_PRIO_QUEUE 7 147 + /* prio 6 is reserved for future use (e.g. switch filter with L3 fields + 148 + * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as 149 + * SYN/FIN/RST)) 150 + */ 151 + #define ICE_SWITCH_FLTR_PRIO_RSVD 6 152 + #define ICE_SWITCH_FLTR_PRIO_VSI 5 153 + #define ICE_SWITCH_FLTR_PRIO_QGRP ICE_SWITCH_FLTR_PRIO_VSI 154 + 140 155 /* Macro for each VSI in a PF */ 141 156 #define ice_for_each_vsi(pf, i) \ 142 157 for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
+1 -1
drivers/net/ethernet/intel/ice/ice_main.c
··· 8283 8283 8284 8284 rule.rid = fltr->rid; 8285 8285 rule.rule_id = fltr->rule_id; 8286 - rule.vsi_handle = fltr->dest_id; 8286 + rule.vsi_handle = fltr->dest_vsi_handle; 8287 8287 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); 8288 8288 if (status) { 8289 8289 if (status == -ENOENT)
+252 -101
drivers/net/ethernet/intel/ice/ice_tc_lib.c
··· 724 724 */ 725 725 fltr->rid = rule_added.rid; 726 726 fltr->rule_id = rule_added.rule_id; 727 - fltr->dest_id = rule_added.vsi_handle; 727 + fltr->dest_vsi_handle = rule_added.vsi_handle; 728 728 729 729 exit: 730 730 kfree(list); 731 731 return ret; 732 + } 733 + 734 + /** 735 + * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action) 736 + * @vsi: Pointer to VSI 737 + * @tc_fltr: Pointer to tc_flower_filter 738 + * 739 + * Locate the VSI using specified queue. When ADQ is not enabled, always 740 + * return input VSI, otherwise locate corresponding VSI based on per channel 741 + * offset and qcount 742 + */ 743 + static struct ice_vsi * 744 + ice_locate_vsi_using_queue(struct ice_vsi *vsi, 745 + struct ice_tc_flower_fltr *tc_fltr) 746 + { 747 + int num_tc, tc, queue; 748 + 749 + /* if ADQ is not active, passed VSI is the candidate VSI */ 750 + if (!ice_is_adq_active(vsi->back)) 751 + return vsi; 752 + 753 + /* Locate the VSI (it could still be main PF VSI or CHNL_VSI depending 754 + * upon queue number) 755 + */ 756 + num_tc = vsi->mqprio_qopt.qopt.num_tc; 757 + queue = tc_fltr->action.fwd.q.queue; 758 + 759 + for (tc = 0; tc < num_tc; tc++) { 760 + int qcount = vsi->mqprio_qopt.qopt.count[tc]; 761 + int offset = vsi->mqprio_qopt.qopt.offset[tc]; 762 + 763 + if (queue >= offset && queue < offset + qcount) { 764 + /* for non-ADQ TCs, passed VSI is the candidate VSI */ 765 + if (tc < ICE_CHNL_START_TC) 766 + return vsi; 767 + else 768 + return vsi->tc_map_vsi[tc]; 769 + } 770 + } 771 + return NULL; 772 + } 773 + 774 + static struct ice_rx_ring * 775 + ice_locate_rx_ring_using_queue(struct ice_vsi *vsi, 776 + struct ice_tc_flower_fltr *tc_fltr) 777 + { 778 + u16 queue = tc_fltr->action.fwd.q.queue; 779 + 780 + return queue < vsi->num_rxq ? vsi->rx_rings[queue] : NULL; 781 + } 782 + 783 + /** 784 + * ice_tc_forward_action - Determine destination VSI and queue for the action 785 + * @vsi: Pointer to VSI 786 + * @tc_fltr: Pointer to TC flower filter structure 787 + * 788 + * Validates the tc forward action and determines the destination VSI and queue 789 + * for the forward action. 790 + */ 791 + static struct ice_vsi * 792 + ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) 793 + { 794 + struct ice_rx_ring *ring = NULL; 795 + struct ice_vsi *ch_vsi = NULL; 796 + struct ice_pf *pf = vsi->back; 797 + struct device *dev; 798 + u32 tc_class; 799 + 800 + dev = ice_pf_to_dev(pf); 801 + 802 + /* Get the destination VSI and/or destination queue and validate them */ 803 + switch (tc_fltr->action.fltr_act) { 804 + case ICE_FWD_TO_VSI: 805 + tc_class = tc_fltr->action.fwd.tc.tc_class; 806 + /* Select the destination VSI */ 807 + if (tc_class < ICE_CHNL_START_TC) { 808 + NL_SET_ERR_MSG_MOD(tc_fltr->extack, 809 + "Unable to add filter because of unsupported destination"); 810 + return ERR_PTR(-EOPNOTSUPP); 811 + } 812 + /* Locate ADQ VSI depending on hw_tc number */ 813 + ch_vsi = vsi->tc_map_vsi[tc_class]; 814 + break; 815 + case ICE_FWD_TO_Q: 816 + /* Locate the Rx queue */ 817 + ring = ice_locate_rx_ring_using_queue(vsi, tc_fltr); 818 + if (!ring) { 819 + dev_err(dev, 820 + "Unable to locate Rx queue for action fwd_to_queue: %u\n", 821 + tc_fltr->action.fwd.q.queue); 822 + return ERR_PTR(-EINVAL); 823 + } 824 + /* Determine destination VSI even though the action is 825 + * FWD_TO_QUEUE, because QUEUE is associated with VSI 826 + */ 827 + ch_vsi = tc_fltr->dest_vsi; 828 + break; 829 + default: 830 + dev_err(dev, 831 + "Unable to add filter because of unsupported action %u (supported actions: fwd to tc, fwd to queue)\n", 832 + tc_fltr->action.fltr_act); 833 + return ERR_PTR(-EINVAL); 834 + } 835 + /* Must have valid ch_vsi (it could be main VSI or ADQ VSI) */ 836 + if (!ch_vsi) { 837 + dev_err(dev, 838 + "Unable to add filter because specified destination VSI doesn't exist\n"); 839 + return ERR_PTR(-EINVAL); 840 + } 841 + return ch_vsi; 732 842 } 733 843 734 844 /** ··· 882 772 return -EOPNOTSUPP; 883 773 } 884 774 885 - /* get the channel (aka ADQ VSI) */ 886 - if (tc_fltr->dest_vsi) 887 - ch_vsi = tc_fltr->dest_vsi; 888 - else 889 - ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class]; 775 + /* validate forwarding action VSI and queue */ 776 + ch_vsi = ice_tc_forward_action(vsi, tc_fltr); 777 + if (IS_ERR(ch_vsi)) 778 + return PTR_ERR(ch_vsi); 890 779 891 780 lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr); 892 781 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); ··· 899 790 } 900 791 901 792 rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act; 902 - if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) { 903 - if (!ch_vsi) { 904 - NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist"); 905 - ret = -EINVAL; 906 - goto exit; 907 - } 793 + /* specify the cookie as filter_rule_id */ 794 + rule_info.fltr_rule_id = tc_fltr->cookie; 908 795 909 - rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 796 + switch (tc_fltr->action.fltr_act) { 797 + case ICE_FWD_TO_VSI: 910 798 rule_info.sw_act.vsi_handle = ch_vsi->idx; 911 - rule_info.priority = 7; 799 + rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; 912 800 rule_info.sw_act.src = hw->pf_id; 913 801 rule_info.rx = true; 914 802 dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n", 915 - tc_fltr->action.tc_class, 803 + tc_fltr->action.fwd.tc.tc_class, 916 804 rule_info.sw_act.vsi_handle, lkups_cnt); 917 - } else { 805 + break; 806 + case ICE_FWD_TO_Q: 807 + /* HW queue number in global space */ 808 + rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue; 809 + rule_info.sw_act.vsi_handle = ch_vsi->idx; 810 + rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE; 811 + rule_info.sw_act.src = hw->pf_id; 812 + rule_info.rx = true; 813 + dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n", 814 + tc_fltr->action.fwd.q.queue, 815 + tc_fltr->action.fwd.q.hw_queue, lkups_cnt); 816 + break; 817 + default: 918 818 rule_info.sw_act.flag |= ICE_FLTR_TX; 819 + /* In case of Tx (LOOKUP_TX), src needs to be src VSI */ 919 820 rule_info.sw_act.src = vsi->idx; 821 + /* 'Rx' is false, direction of rule(LOOKUPTRX) */ 920 822 rule_info.rx = false; 823 + rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; 824 + break; 921 825 } 922 - 923 - /* specify the cookie as filter_rule_id */ 924 - rule_info.fltr_rule_id = tc_fltr->cookie; 925 826 926 827 ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); 927 828 if (ret == -EEXIST) { ··· 950 831 */ 951 832 tc_fltr->rid = rule_added.rid; 952 833 tc_fltr->rule_id = rule_added.rule_id; 953 - if (tc_fltr->action.tc_class > 0 && ch_vsi) { 954 - /* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and 955 - * for PF ADQ filter, it is not yet set in tc_fltr, 956 - * hence store the dest_vsi ptr in tc_fltr 957 - */ 958 - if (ch_vsi->type == ICE_VSI_CHNL) 959 - tc_fltr->dest_vsi = ch_vsi; 834 + tc_fltr->dest_vsi_handle = rule_added.vsi_handle; 835 + if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI || 836 + tc_fltr->action.fltr_act == ICE_FWD_TO_Q) { 837 + tc_fltr->dest_vsi = ch_vsi; 960 838 /* keep track of advanced switch filter for 961 - * destination VSI (channel VSI) 839 + * destination VSI 962 840 */ 963 841 ch_vsi->num_chnl_fltr++; 964 - /* in this case, dest_id is VSI handle (sw handle) */ 965 - tc_fltr->dest_id = rule_added.vsi_handle; 966 842 967 843 /* keeps track of channel filters for PF VSI */ 968 844 if (vsi->type == ICE_VSI_PF && ··· 965 851 ICE_TC_FLWR_FIELD_ENC_DST_MAC))) 966 852 pf->num_dmac_chnl_fltrs++; 967 853 } 968 - dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n", 969 - lkups_cnt, flags, 970 - tc_fltr->action.tc_class, rule_added.rid, 971 - rule_added.rule_id, rule_added.vsi_handle); 854 + switch (tc_fltr->action.fltr_act) { 855 + case ICE_FWD_TO_VSI: 856 + dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to TC %u, rid %u, rule_id %u, vsi_idx %u\n", 857 + lkups_cnt, flags, 858 + tc_fltr->action.fwd.tc.tc_class, rule_added.rid, 859 + rule_added.rule_id, rule_added.vsi_handle); 860 + break; 861 + case ICE_FWD_TO_Q: 862 + dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to queue: %u (HW queue %u) , rid %u, rule_id %u\n", 863 + lkups_cnt, flags, tc_fltr->action.fwd.q.queue, 864 + tc_fltr->action.fwd.q.hw_queue, rule_added.rid, 865 + rule_added.rule_id); 866 + break; 867 + default: 868 + break; 869 + } 972 870 exit: 973 871 kfree(list); 974 872 return ret; ··· 1581 1455 } 1582 1456 1583 1457 /** 1584 - * ice_handle_tclass_action - Support directing to a traffic class 1458 + * ice_prep_adq_filter - Prepare ADQ filter with the required additional headers 1585 1459 * @vsi: Pointer to VSI 1586 - * @cls_flower: Pointer to TC flower offload structure 1587 1460 * @fltr: Pointer to TC flower filter structure 1588 1461 * 1589 - * Support directing traffic to a traffic class 1462 + * Prepare ADQ filter with the required additional header fields 1590 1463 */ 1591 1464 static int 1592 - ice_handle_tclass_action(struct ice_vsi *vsi, 1593 - struct flow_cls_offload *cls_flower, 1594 - struct ice_tc_flower_fltr *fltr) 1465 + ice_prep_adq_filter(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) 1595 1466 { 1596 - int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); 1597 - struct ice_vsi *main_vsi; 1598 - 1599 - if (tc < 0) { 1600 - NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid"); 1601 - return -EINVAL; 1602 - } 1603 - if (!tc) { 1604 - NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination"); 1605 - return -EINVAL; 1606 - } 1607 - 1608 - if (!(vsi->all_enatc & BIT(tc))) { 1609 - NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination"); 1610 - return -EINVAL; 1611 - } 1612 - 1613 - /* Redirect to a TC class or Queue Group */ 1614 - main_vsi = ice_get_main_vsi(vsi->back); 1615 - if (!main_vsi || !main_vsi->netdev) { 1616 - NL_SET_ERR_MSG_MOD(fltr->extack, 1617 - "Unable to add filter because of invalid netdevice"); 1618 - return -EINVAL; 1619 - } 1620 - 1621 1467 if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) && 1622 1468 (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | 1623 1469 ICE_TC_FLWR_FIELD_SRC_MAC))) { ··· 1601 1503 /* For ADQ, filter must include dest MAC address, otherwise unwanted 1602 1504 * packets with unrelated MAC address get delivered to ADQ VSIs as long 1603 1505 * as remaining filter criteria is satisfied such as dest IP address 1604 - * and dest/src L4 port. Following code is trying to handle: 1605 - * 1. For non-tunnel, if user specify MAC addresses, use them (means 1606 - * this code won't do anything 1506 + * and dest/src L4 port. Below code handles the following cases: 1507 + * 1. For non-tunnel, if user specify MAC addresses, use them. 1607 1508 * 2. For non-tunnel, if user didn't specify MAC address, add implicit 1608 1509 * dest MAC to be lower netdev's active unicast MAC address 1609 1510 * 3. For tunnel, as of now TC-filter through flower classifier doesn't ··· 1625 1528 eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac); 1626 1529 } 1627 1530 1628 - /* validate specified dest MAC address, make sure either it belongs to 1629 - * lower netdev or any of MACVLAN. MACVLANs MAC address are added as 1630 - * unicast MAC filter destined to main VSI. 1631 - */ 1632 - if (!ice_mac_fltr_exist(&main_vsi->back->hw, 1633 - fltr->outer_headers.l2_key.dst_mac, 1634 - main_vsi->idx)) { 1635 - NL_SET_ERR_MSG_MOD(fltr->extack, 1636 - "Unable to add filter because legacy MAC filter for specified destination doesn't exist"); 1637 - return -EINVAL; 1638 - } 1639 - 1640 1531 /* Make sure VLAN is already added to main VSI, before allowing ADQ to 1641 1532 * add a VLAN based filter such as MAC + VLAN + L4 port. 1642 1533 */ 1643 1534 if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) { 1644 1535 u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id); 1645 1536 1646 - if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id, 1647 - main_vsi->idx)) { 1537 + if (!ice_vlan_fltr_exist(&vsi->back->hw, vlan_id, vsi->idx)) { 1648 1538 NL_SET_ERR_MSG_MOD(fltr->extack, 1649 1539 "Unable to add filter because legacy VLAN filter for specified destination doesn't exist"); 1650 1540 return -EINVAL; 1651 1541 } 1652 1542 } 1653 - fltr->action.fltr_act = ICE_FWD_TO_VSI; 1654 - fltr->action.tc_class = tc; 1655 - 1656 1543 return 0; 1544 + } 1545 + 1546 + /** 1547 + * ice_handle_tclass_action - Support directing to a traffic class 1548 + * @vsi: Pointer to VSI 1549 + * @cls_flower: Pointer to TC flower offload structure 1550 + * @fltr: Pointer to TC flower filter structure 1551 + * 1552 + * Support directing traffic to a traffic class/queue-set 1553 + */ 1554 + static int 1555 + ice_handle_tclass_action(struct ice_vsi *vsi, 1556 + struct flow_cls_offload *cls_flower, 1557 + struct ice_tc_flower_fltr *fltr) 1558 + { 1559 + int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); 1560 + 1561 + /* user specified hw_tc (must be non-zero for ADQ TC), action is forward 1562 + * to hw_tc (i.e. ADQ channel number) 1563 + */ 1564 + if (tc < ICE_CHNL_START_TC) { 1565 + NL_SET_ERR_MSG_MOD(fltr->extack, 1566 + "Unable to add filter because of unsupported destination"); 1567 + return -EOPNOTSUPP; 1568 + } 1569 + if (!(vsi->all_enatc & BIT(tc))) { 1570 + NL_SET_ERR_MSG_MOD(fltr->extack, 1571 + "Unable to add filter because of non-existence destination"); 1572 + return -EINVAL; 1573 + } 1574 + fltr->action.fltr_act = ICE_FWD_TO_VSI; 1575 + fltr->action.fwd.tc.tc_class = tc; 1576 + 1577 + return ice_prep_adq_filter(vsi, fltr); 1578 + } 1579 + 1580 + static int 1581 + ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, 1582 + struct flow_action_entry *act) 1583 + { 1584 + struct ice_vsi *ch_vsi = NULL; 1585 + u16 queue = act->rx_queue; 1586 + 1587 + if (queue > vsi->num_rxq) { 1588 + NL_SET_ERR_MSG_MOD(fltr->extack, 1589 + "Unable to add filter because specified queue is invalid"); 1590 + return -EINVAL; 1591 + } 1592 + fltr->action.fltr_act = ICE_FWD_TO_Q; 1593 + fltr->action.fwd.q.queue = queue; 1594 + /* determine corresponding HW queue */ 1595 + fltr->action.fwd.q.hw_queue = vsi->rxq_map[queue]; 1596 + 1597 + /* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare 1598 + * ADQ switch filter 1599 + */ 1600 + ch_vsi = ice_locate_vsi_using_queue(vsi, fltr); 1601 + if (!ch_vsi) 1602 + return -EINVAL; 1603 + fltr->dest_vsi = ch_vsi; 1604 + if (!ice_is_chnl_fltr(fltr)) 1605 + return 0; 1606 + 1607 + return ice_prep_adq_filter(vsi, fltr); 1608 + } 1609 + 1610 + static int 1611 + ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, 1612 + struct flow_action_entry *act) 1613 + { 1614 + switch (act->id) { 1615 + case FLOW_ACTION_RX_QUEUE_MAPPING: 1616 + /* forward to queue */ 1617 + return ice_tc_forward_to_queue(vsi, fltr, act); 1618 + default: 1619 + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action"); 1620 + return -EOPNOTSUPP; 1621 + } 1657 1622 } 1658 1623 1659 1624 /** ··· 1734 1575 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); 1735 1576 struct flow_action *flow_action = &rule->action; 1736 1577 struct flow_action_entry *act; 1737 - int i; 1578 + int i, err; 1738 1579 1739 1580 if (cls_flower->classid) 1740 1581 return ice_handle_tclass_action(vsi, cls_flower, fltr); ··· 1743 1584 return -EINVAL; 1744 1585 1745 1586 flow_action_for_each(i, act, flow_action) { 1746 - if (ice_is_eswitch_mode_switchdev(vsi->back)) { 1747 - int err = ice_eswitch_tc_parse_action(fltr, act); 1748 - 1749 - if (err) 1750 - return err; 1751 - continue; 1752 - } 1753 - /* Allow only one rule per filter */ 1754 - 1755 - /* Drop action */ 1756 - if (act->id == FLOW_ACTION_DROP) { 1757 - NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP"); 1758 - return -EINVAL; 1759 - } 1760 - fltr->action.fltr_act = ICE_FWD_TO_VSI; 1587 + if (ice_is_eswitch_mode_switchdev(vsi->back)) 1588 + err = ice_eswitch_tc_parse_action(fltr, act); 1589 + else 1590 + err = ice_tc_parse_action(vsi, fltr, act); 1591 + if (err) 1592 + return err; 1593 + continue; 1761 1594 } 1762 1595 return 0; 1763 1596 } ··· 1769 1618 1770 1619 rule_rem.rid = fltr->rid; 1771 1620 rule_rem.rule_id = fltr->rule_id; 1772 - rule_rem.vsi_handle = fltr->dest_id; 1621 + rule_rem.vsi_handle = fltr->dest_vsi_handle; 1773 1622 err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem); 1774 1623 if (err) { 1775 1624 if (err == -ENOENT) {
+32 -8
drivers/net/ethernet/intel/ice/ice_tc_lib.h
··· 45 45 }; 46 46 47 47 struct ice_tc_flower_action { 48 - u32 tc_class; 48 + /* forward action specific params */ 49 + union { 50 + struct { 51 + u32 tc_class; /* forward to hw_tc */ 52 + u32 rsvd; 53 + } tc; 54 + struct { 55 + u16 queue; /* forward to queue */ 56 + /* To add filter in HW, absolute queue number in global 57 + * space of queues (between 0...N) is needed 58 + */ 59 + u16 hw_queue; 60 + } q; 61 + } fwd; 49 62 enum ice_sw_fwd_act_type fltr_act; 50 63 }; 51 64 ··· 144 131 */ 145 132 u16 rid; 146 133 u16 rule_id; 147 - /* this could be queue/vsi_idx (sw handle)/queue_group, depending upon 148 - * destination type 134 + /* VSI handle of the destination VSI (it could be main PF VSI, CHNL_VSI, 135 + * VF VSI) 149 136 */ 150 - u16 dest_id; 151 - /* if dest_id is vsi_idx, then need to store destination VSI ptr */ 137 + u16 dest_vsi_handle; 138 + /* ptr to destination VSI */ 152 139 struct ice_vsi *dest_vsi; 153 140 /* direction of fltr for eswitch use case */ 154 141 enum ice_eswitch_fltr_direction direction; ··· 175 162 * @f: Pointer to tc-flower filter 176 163 * 177 164 * Criteria to determine of given filter is valid channel filter 178 - * or not is based on its "destination". If destination is hw_tc (aka tc_class) 179 - * and it is non-zero, then it is valid channel (aka ADQ) filter 165 + * or not is based on its destination. 166 + * For forward to VSI action, if destination is valid hw_tc (aka tc_class) 167 + * and in supported range of TCs for ADQ, then return true. 168 + * For forward to queue, as long as dest_vsi is valid and it is of type 169 + * VSI_CHNL (PF ADQ VSI is of type VSI_CHNL), return true. 170 + * NOTE: For forward to queue, correct dest_vsi is still set in tc_fltr based 171 + * on destination queue specified. 180 172 */ 181 173 static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f) 182 174 { 183 - return !!f->action.tc_class; 175 + if (f->action.fltr_act == ICE_FWD_TO_VSI) 176 + return f->action.fwd.tc.tc_class >= ICE_CHNL_START_TC && 177 + f->action.fwd.tc.tc_class < ICE_CHNL_MAX_TC; 178 + else if (f->action.fltr_act == ICE_FWD_TO_Q) 179 + return f->dest_vsi && f->dest_vsi->type == ICE_VSI_CHNL; 180 + 181 + return false; 184 182 } 185 183 186 184 /**
+1
include/net/act_api.h
··· 67 67 #define TCA_ACT_FLAGS_BIND (1U << (TCA_ACT_FLAGS_USER_BITS + 1)) 68 68 #define TCA_ACT_FLAGS_REPLACE (1U << (TCA_ACT_FLAGS_USER_BITS + 2)) 69 69 #define TCA_ACT_FLAGS_NO_RTNL (1U << (TCA_ACT_FLAGS_USER_BITS + 3)) 70 + #define TCA_ACT_FLAGS_AT_INGRESS (1U << (TCA_ACT_FLAGS_USER_BITS + 4)) 70 71 71 72 /* Update lastuse only if needed, to avoid dirtying a cache line. 72 73 * We use a temp variable to avoid fetching jiffies twice.
+2
include/net/flow_offload.h
··· 155 155 FLOW_ACTION_MARK, 156 156 FLOW_ACTION_PTYPE, 157 157 FLOW_ACTION_PRIORITY, 158 + FLOW_ACTION_RX_QUEUE_MAPPING, 158 159 FLOW_ACTION_WAKE, 159 160 FLOW_ACTION_QUEUE, 160 161 FLOW_ACTION_SAMPLE, ··· 248 247 u32 csum_flags; /* FLOW_ACTION_CSUM */ 249 248 u32 mark; /* FLOW_ACTION_MARK */ 250 249 u16 ptype; /* FLOW_ACTION_PTYPE */ 250 + u16 rx_queue; /* FLOW_ACTION_RX_QUEUE_MAPPING */ 251 251 u32 priority; /* FLOW_ACTION_PRIORITY */ 252 252 struct { /* FLOW_ACTION_QUEUE */ 253 253 u32 ctx;
+29
include/net/tc_act/tc_skbedit.h
··· 95 95 return priority; 96 96 } 97 97 98 + static inline u16 tcf_skbedit_rx_queue_mapping(const struct tc_action *a) 99 + { 100 + u16 rx_queue; 101 + 102 + rcu_read_lock(); 103 + rx_queue = rcu_dereference(to_skbedit(a)->params)->queue_mapping; 104 + rcu_read_unlock(); 105 + 106 + return rx_queue; 107 + } 108 + 98 109 /* Return true iff action is queue_mapping */ 99 110 static inline bool is_tcf_skbedit_queue_mapping(const struct tc_action *a) 100 111 { 101 112 return is_tcf_skbedit_with_flag(a, SKBEDIT_F_QUEUE_MAPPING); 113 + } 114 + 115 + /* Return true if action is on ingress traffic */ 116 + static inline bool is_tcf_skbedit_ingress(u32 flags) 117 + { 118 + return flags & TCA_ACT_FLAGS_AT_INGRESS; 119 + } 120 + 121 + static inline bool is_tcf_skbedit_tx_queue_mapping(const struct tc_action *a) 122 + { 123 + return is_tcf_skbedit_queue_mapping(a) && 124 + !is_tcf_skbedit_ingress(a->tcfa_flags); 125 + } 126 + 127 + static inline bool is_tcf_skbedit_rx_queue_mapping(const struct tc_action *a) 128 + { 129 + return is_tcf_skbedit_queue_mapping(a) && 130 + is_tcf_skbedit_ingress(a->tcfa_flags); 102 131 } 103 132 104 133 /* Return true iff action is inheritdsfield */
+12 -2
net/sched/act_skbedit.c
··· 148 148 } 149 149 150 150 if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) { 151 + if (is_tcf_skbedit_ingress(act_flags) && 152 + !(act_flags & TCA_ACT_FLAGS_SKIP_SW)) { 153 + NL_SET_ERR_MSG_MOD(extack, "\"queue_mapping\" option on receive side is hardware only, use skip_sw"); 154 + return -EOPNOTSUPP; 155 + } 151 156 flags |= SKBEDIT_F_QUEUE_MAPPING; 152 157 queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); 153 158 } ··· 379 374 } else if (is_tcf_skbedit_priority(act)) { 380 375 entry->id = FLOW_ACTION_PRIORITY; 381 376 entry->priority = tcf_skbedit_priority(act); 382 - } else if (is_tcf_skbedit_queue_mapping(act)) { 383 - NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used"); 377 + } else if (is_tcf_skbedit_tx_queue_mapping(act)) { 378 + NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used on transmit side"); 384 379 return -EOPNOTSUPP; 380 + } else if (is_tcf_skbedit_rx_queue_mapping(act)) { 381 + entry->id = FLOW_ACTION_RX_QUEUE_MAPPING; 382 + entry->rx_queue = tcf_skbedit_rx_queue_mapping(act); 385 383 } else if (is_tcf_skbedit_inheritdsfield(act)) { 386 384 NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"inheritdsfield\" option is used"); 387 385 return -EOPNOTSUPP; ··· 402 394 fl_action->id = FLOW_ACTION_PTYPE; 403 395 else if (is_tcf_skbedit_priority(act)) 404 396 fl_action->id = FLOW_ACTION_PRIORITY; 397 + else if (is_tcf_skbedit_rx_queue_mapping(act)) 398 + fl_action->id = FLOW_ACTION_RX_QUEUE_MAPPING; 405 399 else 406 400 return -EOPNOTSUPP; 407 401 }
+7
net/sched/cls_api.c
··· 1953 1953 tp->ops->put(tp, fh); 1954 1954 } 1955 1955 1956 + static bool is_qdisc_ingress(__u32 classid) 1957 + { 1958 + return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS)); 1959 + } 1960 + 1956 1961 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 1957 1962 struct netlink_ext_ack *extack) 1958 1963 { ··· 2149 2144 flags |= TCA_ACT_FLAGS_REPLACE; 2150 2145 if (!rtnl_held) 2151 2146 flags |= TCA_ACT_FLAGS_NO_RTNL; 2147 + if (is_qdisc_ingress(parent)) 2148 + flags |= TCA_ACT_FLAGS_AT_INGRESS; 2152 2149 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2153 2150 flags, extack); 2154 2151 if (err == 0) {