Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2018-02-14

This patch series enables the new mqprio hardware offload mechanism
creating traffic classes on VFs for XL710 devices. The parameters
needed to configure these traffic classes/queue channels are provides
by the user via the tc tool. A maximum of four traffic classes can be
created on each VF. This patch series also enables application of cloud
filters to each of these traffic classes. The cloud filters are applied
using the tc-flower classifier.

Example:
1. tc qdisc add dev vf0 root mqprio num_tc 4 map 0 0 0 0 1 2 2 3\
queues 2@0 2@2 1@4 1@5 hw 1 mode channel
2. tc qdisc add dev vf0 ingress
3. ethtool -K vf0 hw-tc-offload on
4. ip link set eth0 vf 0 spoofchk off
5. tc filter add dev vf0 protocol ip parent ffff: prio 1 flower dst_ip\
192.168.3.5/32 ip_proto udp dst_port 25 skip_sw hw_tc 2
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+2237 -95
+6
drivers/net/ethernet/intel/i40e/i40e.h
··· 1109 1109 1110 1110 int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); 1111 1111 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); 1112 + int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, 1113 + struct i40e_cloud_filter *filter, 1114 + bool add); 1115 + int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, 1116 + struct i40e_cloud_filter *filter, 1117 + bool add); 1112 1118 #endif /* _I40E_H_ */
+5 -11
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 69 69 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired); 70 70 static void i40e_fdir_sb_setup(struct i40e_pf *pf); 71 71 static int i40e_veb_get_bw_info(struct i40e_veb *veb); 72 - static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, 73 - struct i40e_cloud_filter *filter, 74 - bool add); 75 - static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, 76 - struct i40e_cloud_filter *filter, 77 - bool add); 78 72 static int i40e_get_capabilities(struct i40e_pf *pf, 79 73 enum i40e_admin_queue_opc list_type); 80 74 ··· 6835 6841 * Add or delete a cloud filter for a specific flow spec. 6836 6842 * Returns 0 if the filter were successfully added. 6837 6843 **/ 6838 - static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, 6839 - struct i40e_cloud_filter *filter, bool add) 6844 + int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, 6845 + struct i40e_cloud_filter *filter, bool add) 6840 6846 { 6841 6847 struct i40e_aqc_cloud_filters_element_data cld_filter; 6842 6848 struct i40e_pf *pf = vsi->back; ··· 6902 6908 * Add or delete a cloud filter for a specific flow spec using big buffer. 6903 6909 * Returns 0 if the filter were successfully added. 6904 6910 **/ 6905 - static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, 6906 - struct i40e_cloud_filter *filter, 6907 - bool add) 6911 + int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, 6912 + struct i40e_cloud_filter *filter, 6913 + bool add) 6908 6914 { 6909 6915 struct i40e_aqc_cloud_filters_element_bb cld_filter; 6910 6916 struct i40e_pf *pf = vsi->back;
+1 -1
drivers/net/ethernet/intel/i40e/i40e_type.h
··· 39 39 #define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) 40 40 41 41 #define I40E_MAX_VSI_QP 16 42 - #define I40E_MAX_VF_VSI 3 42 + #define I40E_MAX_VF_VSI 4 43 43 #define I40E_MAX_CHAINED_RX_BUFFERS 5 44 44 #define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 45 45
+931 -69
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
··· 258 258 } 259 259 260 260 /** 261 + * i40e_get_real_pf_qid 262 + * @vf: pointer to the VF info 263 + * @vsi_id: vsi id 264 + * @queue_id: queue number 265 + * 266 + * wrapper function to get pf_queue_id handling ADq code as well 267 + **/ 268 + static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) 269 + { 270 + int i; 271 + 272 + if (vf->adq_enabled) { 273 + /* Although VF considers all the queues(can be 1 to 16) as its 274 + * own but they may actually belong to different VSIs(up to 4). 275 + * We need to find which queues belongs to which VSI. 276 + */ 277 + for (i = 0; i < vf->num_tc; i++) { 278 + if (queue_id < vf->ch[i].num_qps) { 279 + vsi_id = vf->ch[i].vsi_id; 280 + break; 281 + } 282 + /* find right queue id which is relative to a 283 + * given VSI. 284 + */ 285 + queue_id -= vf->ch[i].num_qps; 286 + } 287 + } 288 + 289 + return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id); 290 + } 291 + 292 + /** 261 293 * i40e_config_irq_link_list 262 294 * @vf: pointer to the VF info 263 295 * @vsi_id: id of VSI as given by the FW ··· 342 310 343 311 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 344 312 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 345 - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); 313 + pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id); 346 314 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 347 315 348 316 wr32(hw, reg_idx, reg); ··· 365 333 if (next_q < size) { 366 334 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 367 335 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 368 - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, 369 - vsi_queue_id); 336 + pf_queue_id = i40e_get_real_pf_qid(vf, 337 + vsi_id, 338 + vsi_queue_id); 370 339 } else { 371 340 pf_queue_id = I40E_QUEUE_END_OF_LIST; 372 341 qtype = 0; ··· 702 669 /** 703 670 * i40e_alloc_vsi_res 704 671 * @vf: pointer to the VF info 705 - * @type: type of VSI to allocate 672 + * @idx: VSI index, applies only for ADq mode, zero otherwise 706 673 * 707 674 * alloc VF vsi context & resources 708 675 **/ 709 - static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) 676 + static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) 710 677 { 711 678 struct i40e_mac_filter *f = NULL; 712 679 struct i40e_pf *pf = vf->pf; 713 680 struct i40e_vsi *vsi; 681 + u64 max_tx_rate = 0; 714 682 int ret = 0; 715 683 716 - vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); 684 + vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, 685 + vf->vf_id); 717 686 718 687 if (!vsi) { 719 688 dev_err(&pf->pdev->dev, ··· 724 689 ret = -ENOENT; 725 690 goto error_alloc_vsi_res; 726 691 } 727 - if (type == I40E_VSI_SRIOV) { 692 + 693 + if (!idx) { 728 694 u64 hena = i40e_pf_get_default_rss_hena(pf); 729 695 u8 broadcast[ETH_ALEN]; 730 696 ··· 757 721 spin_unlock_bh(&vsi->mac_filter_hash_lock); 758 722 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); 759 723 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); 724 + /* program mac filter only for VF VSI */ 725 + ret = i40e_sync_vsi_filters(vsi); 726 + if (ret) 727 + dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 760 728 } 761 729 762 - /* program mac filter */ 763 - ret = i40e_sync_vsi_filters(vsi); 764 - if (ret) 765 - dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 730 + /* storing VSI index and id for ADq and don't apply the mac filter */ 731 + if (vf->adq_enabled) { 732 + vf->ch[idx].vsi_idx = vsi->idx; 733 + vf->ch[idx].vsi_id = vsi->id; 734 + } 766 735 767 736 /* Set VF bandwidth if specified */ 768 737 if (vf->tx_rate) { 738 + max_tx_rate = vf->tx_rate; 739 + } else if (vf->ch[idx].max_tx_rate) { 740 + max_tx_rate = vf->ch[idx].max_tx_rate; 741 + } 742 + 743 + if (max_tx_rate) { 744 + max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR); 769 745 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, 770 - vf->tx_rate / 50, 0, NULL); 746 + max_tx_rate, 0, NULL); 771 747 if (ret) 772 748 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", 773 749 vf->vf_id, ret); ··· 787 739 788 740 error_alloc_vsi_res: 789 741 return ret; 742 + } 743 + 744 + /** 745 + * i40e_map_pf_queues_to_vsi 746 + * @vf: pointer to the VF info 747 + * 748 + * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 749 + * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. 750 + **/ 751 + static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) 752 + { 753 + struct i40e_pf *pf = vf->pf; 754 + struct i40e_hw *hw = &pf->hw; 755 + u32 reg, num_tc = 1; /* VF has at least one traffic class */ 756 + u16 vsi_id, qps; 757 + int i, j; 758 + 759 + if (vf->adq_enabled) 760 + num_tc = vf->num_tc; 761 + 762 + for (i = 0; i < num_tc; i++) { 763 + if (vf->adq_enabled) { 764 + qps = vf->ch[i].num_qps; 765 + vsi_id = vf->ch[i].vsi_id; 766 + } else { 767 + qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 768 + vsi_id = vf->lan_vsi_id; 769 + } 770 + 771 + for (j = 0; j < 7; j++) { 772 + if (j * 2 >= qps) { 773 + /* end of list */ 774 + reg = 0x07FF07FF; 775 + } else { 776 + u16 qid = i40e_vc_get_pf_queue_id(vf, 777 + vsi_id, 778 + j * 2); 779 + reg = qid; 780 + qid = i40e_vc_get_pf_queue_id(vf, vsi_id, 781 + (j * 2) + 1); 782 + reg |= qid << 16; 783 + } 784 + i40e_write_rx_ctl(hw, 785 + I40E_VSILAN_QTABLE(j, vsi_id), 786 + reg); 787 + } 788 + } 789 + } 790 + 791 + /** 792 + * i40e_map_pf_to_vf_queues 793 + * @vf: pointer to the VF info 794 + * 795 + * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This 796 + * function takes care of the second part VPLAN_QTABLE & completes VF mappings. 797 + **/ 798 + static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) 799 + { 800 + struct i40e_pf *pf = vf->pf; 801 + struct i40e_hw *hw = &pf->hw; 802 + u32 reg, total_qps = 0; 803 + u32 qps, num_tc = 1; /* VF has at least one traffic class */ 804 + u16 vsi_id, qid; 805 + int i, j; 806 + 807 + if (vf->adq_enabled) 808 + num_tc = vf->num_tc; 809 + 810 + for (i = 0; i < num_tc; i++) { 811 + if (vf->adq_enabled) { 812 + qps = vf->ch[i].num_qps; 813 + vsi_id = vf->ch[i].vsi_id; 814 + } else { 815 + qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 816 + vsi_id = vf->lan_vsi_id; 817 + } 818 + 819 + for (j = 0; j < qps; j++) { 820 + qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j); 821 + 822 + reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 823 + wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), 824 + reg); 825 + total_qps++; 826 + } 827 + } 790 828 } 791 829 792 830 /** ··· 885 751 { 886 752 struct i40e_pf *pf = vf->pf; 887 753 struct i40e_hw *hw = &pf->hw; 888 - u32 reg, total_queue_pairs = 0; 889 - int j; 754 + u32 reg; 890 755 891 756 /* Tell the hardware we're using noncontiguous mapping. HW requires 892 757 * that VF queues be mapped using this method, even when they are ··· 898 765 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; 899 766 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 900 767 901 - /* map PF queues to VF queues */ 902 - for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { 903 - u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); 904 - 905 - reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 906 - wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 907 - total_queue_pairs++; 908 - } 909 - 910 - /* map PF queues to VSI */ 911 - for (j = 0; j < 7; j++) { 912 - if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { 913 - reg = 0x07FF07FF; /* unused */ 914 - } else { 915 - u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 916 - j * 2); 917 - reg = qid; 918 - qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, 919 - (j * 2) + 1); 920 - reg |= qid << 16; 921 - } 922 - i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), 923 - reg); 924 - } 768 + i40e_map_pf_to_vf_queues(vf); 769 + i40e_map_pf_queues_to_vsi(vf); 925 770 926 771 i40e_flush(hw); 927 772 } ··· 935 824 struct i40e_pf *pf = vf->pf; 936 825 struct i40e_hw *hw = &pf->hw; 937 826 u32 reg_idx, reg; 938 - int i, msix_vf; 827 + int i, j, msix_vf; 939 828 940 829 /* Start by disabling VF's configuration API to prevent the OS from 941 830 * accessing the VF's VSI after it's freed / invalidated. ··· 956 845 vf->lan_vsi_idx = 0; 957 846 vf->lan_vsi_id = 0; 958 847 vf->num_mac = 0; 848 + } 849 + 850 + /* do the accounting and remove additional ADq VSI's */ 851 + if (vf->adq_enabled && vf->ch[0].vsi_idx) { 852 + for (j = 0; j < vf->num_tc; j++) { 853 + /* At this point VSI0 is already released so don't 854 + * release it again and only clear their values in 855 + * structure variables 856 + */ 857 + if (j) 858 + i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]); 859 + vf->ch[j].vsi_idx = 0; 860 + vf->ch[j].vsi_id = 0; 861 + } 959 862 } 960 863 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 961 864 ··· 1016 891 { 1017 892 struct i40e_pf *pf = vf->pf; 1018 893 int total_queue_pairs = 0; 1019 - int ret; 894 + int ret, idx; 1020 895 1021 896 if (vf->num_req_queues && 1022 897 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) ··· 1025 900 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 1026 901 1027 902 /* allocate hw vsi context & associated resources */ 1028 - ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 903 + ret = i40e_alloc_vsi_res(vf, 0); 1029 904 if (ret) 1030 905 goto error_alloc; 1031 906 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; 907 + 908 + /* allocate additional VSIs based on tc information for ADq */ 909 + if (vf->adq_enabled) { 910 + if (pf->queues_left >= 911 + (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { 912 + /* TC 0 always belongs to VF VSI */ 913 + for (idx = 1; idx < vf->num_tc; idx++) { 914 + ret = i40e_alloc_vsi_res(vf, idx); 915 + if (ret) 916 + goto error_alloc; 917 + } 918 + /* send correct number of queues */ 919 + total_queue_pairs = I40E_MAX_VF_QUEUES; 920 + } else { 921 + dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n", 922 + vf->vf_id); 923 + vf->adq_enabled = false; 924 + } 925 + } 1032 926 1033 927 /* We account for each VF to get a default number of queue pairs. If 1034 928 * the VF has now requested more, we need to account for that to make ··· 1681 1537 } 1682 1538 1683 1539 /** 1540 + * i40e_del_qch - delete all the additional VSIs created as a part of ADq 1541 + * @vf: pointer to VF structure 1542 + **/ 1543 + static void i40e_del_qch(struct i40e_vf *vf) 1544 + { 1545 + struct i40e_pf *pf = vf->pf; 1546 + int i; 1547 + 1548 + /* first element in the array belongs to primary VF VSI and we shouldn't 1549 + * delete it. We should however delete the rest of the VSIs created 1550 + */ 1551 + for (i = 1; i < vf->num_tc; i++) { 1552 + if (vf->ch[i].vsi_idx) { 1553 + i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]); 1554 + vf->ch[i].vsi_idx = 0; 1555 + vf->ch[i].vsi_id = 0; 1556 + } 1557 + } 1558 + } 1559 + 1560 + /** 1684 1561 * i40e_vc_get_vf_resources_msg 1685 1562 * @vf: pointer to the VF info 1686 1563 * @msg: pointer to the msg buffer ··· 1795 1630 1796 1631 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 1797 1632 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 1633 + 1634 + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) 1635 + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; 1798 1636 1799 1637 vfres->num_vsis = num_vsis; 1800 1638 vfres->num_queue_pairs = vf->num_queue_pairs; ··· 2023 1855 (struct virtchnl_vsi_queue_config_info *)msg; 2024 1856 struct virtchnl_queue_pair_info *qpi; 2025 1857 struct i40e_pf *pf = vf->pf; 2026 - u16 vsi_id, vsi_queue_id; 1858 + u16 vsi_id, vsi_queue_id = 0; 2027 1859 i40e_status aq_ret = 0; 2028 - int i; 1860 + int i, j = 0, idx = 0; 1861 + 1862 + vsi_id = qci->vsi_id; 2029 1863 2030 1864 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2031 1865 aq_ret = I40E_ERR_PARAM; 2032 1866 goto error_param; 2033 1867 } 2034 1868 2035 - vsi_id = qci->vsi_id; 2036 1869 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { 2037 1870 aq_ret = I40E_ERR_PARAM; 2038 1871 goto error_param; 2039 1872 } 1873 + 2040 1874 for (i = 0; i < qci->num_queue_pairs; i++) { 2041 1875 qpi = &qci->qpair[i]; 2042 - vsi_queue_id = qpi->txq.queue_id; 2043 - if ((qpi->txq.vsi_id != vsi_id) || 2044 - (qpi->rxq.vsi_id != vsi_id) || 2045 - (qpi->rxq.queue_id != vsi_queue_id) || 2046 - !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 1876 + 1877 + if (!vf->adq_enabled) { 1878 + vsi_queue_id = qpi->txq.queue_id; 1879 + 1880 + if (qpi->txq.vsi_id != qci->vsi_id || 1881 + qpi->rxq.vsi_id != qci->vsi_id || 1882 + qpi->rxq.queue_id != vsi_queue_id) { 1883 + aq_ret = I40E_ERR_PARAM; 1884 + goto error_param; 1885 + } 1886 + } 1887 + 1888 + if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { 2047 1889 aq_ret = I40E_ERR_PARAM; 2048 1890 goto error_param; 2049 1891 } ··· 2065 1887 aq_ret = I40E_ERR_PARAM; 2066 1888 goto error_param; 2067 1889 } 1890 + 1891 + /* For ADq there can be up to 4 VSIs with max 4 queues each. 1892 + * VF does not know about these additional VSIs and all 1893 + * it cares is about its own queues. PF configures these queues 1894 + * to its appropriate VSIs based on TC mapping 1895 + **/ 1896 + if (vf->adq_enabled) { 1897 + if (j == (vf->ch[idx].num_qps - 1)) { 1898 + idx++; 1899 + j = 0; /* resetting the queue count */ 1900 + vsi_queue_id = 0; 1901 + } else { 1902 + j++; 1903 + vsi_queue_id++; 1904 + } 1905 + vsi_id = vf->ch[idx].vsi_id; 1906 + } 2068 1907 } 2069 1908 /* set vsi num_queue_pairs in use to num configured by VF */ 2070 - pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; 1909 + if (!vf->adq_enabled) { 1910 + pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = 1911 + qci->num_queue_pairs; 1912 + } else { 1913 + for (i = 0; i < vf->num_tc; i++) 1914 + pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = 1915 + vf->ch[i].num_qps; 1916 + } 2071 1917 2072 1918 error_param: 2073 1919 /* send the response to the VF */ 2074 1920 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 2075 1921 aq_ret); 1922 + } 1923 + 1924 + /** 1925 + * i40e_validate_queue_map 1926 + * @vsi_id: vsi id 1927 + * @queuemap: Tx or Rx queue map 1928 + * 1929 + * check if Tx or Rx queue map is valid 1930 + **/ 1931 + static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, 1932 + unsigned long queuemap) 1933 + { 1934 + u16 vsi_queue_id, queue_id; 1935 + 1936 + for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { 1937 + if (vf->adq_enabled) { 1938 + vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; 1939 + queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); 1940 + } else { 1941 + queue_id = vsi_queue_id; 1942 + } 1943 + 1944 + if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) 1945 + return -EINVAL; 1946 + } 1947 + 1948 + return 0; 2076 1949 } 2077 1950 2078 1951 /** ··· 2140 1911 struct virtchnl_irq_map_info *irqmap_info = 2141 1912 (struct virtchnl_irq_map_info *)msg; 2142 1913 struct virtchnl_vector_map *map; 2143 - u16 vsi_id, vsi_queue_id, vector_id; 1914 + u16 vsi_id, vector_id; 2144 1915 i40e_status aq_ret = 0; 2145 - unsigned long tempmap; 2146 1916 int i; 2147 1917 2148 1918 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { ··· 2151 1923 2152 1924 for (i = 0; i < irqmap_info->num_vectors; i++) { 2153 1925 map = &irqmap_info->vecmap[i]; 2154 - 2155 1926 vector_id = map->vector_id; 2156 1927 vsi_id = map->vsi_id; 2157 1928 /* validate msg params */ ··· 2160 1933 goto error_param; 2161 1934 } 2162 1935 2163 - /* lookout for the invalid queue index */ 2164 - tempmap = map->rxq_map; 2165 - for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 2166 - if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 2167 - vsi_queue_id)) { 2168 - aq_ret = I40E_ERR_PARAM; 2169 - goto error_param; 2170 - } 1936 + if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { 1937 + aq_ret = I40E_ERR_PARAM; 1938 + goto error_param; 2171 1939 } 2172 1940 2173 - tempmap = map->txq_map; 2174 - for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { 2175 - if (!i40e_vc_isvalid_queue_id(vf, vsi_id, 2176 - vsi_queue_id)) { 2177 - aq_ret = I40E_ERR_PARAM; 2178 - goto error_param; 2179 - } 1941 + if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { 1942 + aq_ret = I40E_ERR_PARAM; 1943 + goto error_param; 2180 1944 } 2181 1945 2182 1946 i40e_config_irq_link_list(vf, vsi_id, map); ··· 2193 1975 struct i40e_pf *pf = vf->pf; 2194 1976 u16 vsi_id = vqs->vsi_id; 2195 1977 i40e_status aq_ret = 0; 1978 + int i; 2196 1979 2197 1980 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2198 1981 aq_ret = I40E_ERR_PARAM; ··· 2212 1993 2213 1994 if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx])) 2214 1995 aq_ret = I40E_ERR_TIMEOUT; 1996 + 1997 + /* need to start the rings for additional ADq VSI's as well */ 1998 + if (vf->adq_enabled) { 1999 + /* zero belongs to LAN VSI */ 2000 + for (i = 1; i < vf->num_tc; i++) { 2001 + if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) 2002 + aq_ret = I40E_ERR_TIMEOUT; 2003 + } 2004 + } 2005 + 2215 2006 error_param: 2216 2007 /* send the response to the VF */ 2217 2008 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, ··· 2917 2688 } 2918 2689 2919 2690 /** 2691 + * i40e_validate_cloud_filter 2692 + * @mask: mask for TC filter 2693 + * @data: data for TC filter 2694 + * 2695 + * This function validates cloud filter programmed as TC filter for ADq 2696 + **/ 2697 + static int i40e_validate_cloud_filter(struct i40e_vf *vf, 2698 + struct virtchnl_filter *tc_filter) 2699 + { 2700 + struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; 2701 + struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; 2702 + struct i40e_pf *pf = vf->pf; 2703 + struct i40e_vsi *vsi = NULL; 2704 + struct i40e_mac_filter *f; 2705 + struct hlist_node *h; 2706 + bool found = false; 2707 + int bkt; 2708 + 2709 + if (!tc_filter->action) { 2710 + dev_info(&pf->pdev->dev, 2711 + "VF %d: Currently ADq doesn't support Drop Action\n", 2712 + vf->vf_id); 2713 + goto err; 2714 + } 2715 + 2716 + /* action_meta is TC number here to which the filter is applied */ 2717 + if (!tc_filter->action_meta || 2718 + tc_filter->action_meta > I40E_MAX_VF_VSI) { 2719 + dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", 2720 + vf->vf_id, tc_filter->action_meta); 2721 + goto err; 2722 + } 2723 + 2724 + /* Check filter if it's programmed for advanced mode or basic mode. 2725 + * There are two ADq modes (for VF only), 2726 + * 1. Basic mode: intended to allow as many filter options as possible 2727 + * to be added to a VF in Non-trusted mode. Main goal is 2728 + * to add filters to its own MAC and VLAN id. 2729 + * 2. Advanced mode: is for allowing filters to be applied other than 2730 + * its own MAC or VLAN. This mode requires the VF to be 2731 + * Trusted. 2732 + */ 2733 + if (mask.dst_mac[0] && !mask.dst_ip[0]) { 2734 + vsi = pf->vsi[vf->lan_vsi_idx]; 2735 + f = i40e_find_mac(vsi, data.dst_mac); 2736 + 2737 + if (!f) { 2738 + dev_info(&pf->pdev->dev, 2739 + "Destination MAC %pM doesn't belong to VF %d\n", 2740 + data.dst_mac, vf->vf_id); 2741 + goto err; 2742 + } 2743 + 2744 + if (mask.vlan_id) { 2745 + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, 2746 + hlist) { 2747 + if (f->vlan == ntohs(data.vlan_id)) { 2748 + found = true; 2749 + break; 2750 + } 2751 + } 2752 + if (!found) { 2753 + dev_info(&pf->pdev->dev, 2754 + "VF %d doesn't have any VLAN id %u\n", 2755 + vf->vf_id, ntohs(data.vlan_id)); 2756 + goto err; 2757 + } 2758 + } 2759 + } else { 2760 + /* Check if VF is trusted */ 2761 + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { 2762 + dev_err(&pf->pdev->dev, 2763 + "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", 2764 + vf->vf_id); 2765 + return I40E_ERR_CONFIG; 2766 + } 2767 + } 2768 + 2769 + if (mask.dst_mac[0] & data.dst_mac[0]) { 2770 + if (is_broadcast_ether_addr(data.dst_mac) || 2771 + is_zero_ether_addr(data.dst_mac)) { 2772 + dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n", 2773 + vf->vf_id, data.dst_mac); 2774 + goto err; 2775 + } 2776 + } 2777 + 2778 + if (mask.src_mac[0] & data.src_mac[0]) { 2779 + if (is_broadcast_ether_addr(data.src_mac) || 2780 + is_zero_ether_addr(data.src_mac)) { 2781 + dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n", 2782 + vf->vf_id, data.src_mac); 2783 + goto err; 2784 + } 2785 + } 2786 + 2787 + if (mask.dst_port & data.dst_port) { 2788 + if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) { 2789 + dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n", 2790 + vf->vf_id); 2791 + goto err; 2792 + } 2793 + } 2794 + 2795 + if (mask.src_port & data.src_port) { 2796 + if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) { 2797 + dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n", 2798 + vf->vf_id); 2799 + goto err; 2800 + } 2801 + } 2802 + 2803 + if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && 2804 + tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { 2805 + dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n", 2806 + vf->vf_id); 2807 + goto err; 2808 + } 2809 + 2810 + if (mask.vlan_id & data.vlan_id) { 2811 + if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { 2812 + dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n", 2813 + vf->vf_id); 2814 + goto err; 2815 + } 2816 + } 2817 + 2818 + return I40E_SUCCESS; 2819 + err: 2820 + return I40E_ERR_CONFIG; 2821 + } 2822 + 2823 + /** 2824 + * i40e_find_vsi_from_seid - searches for the vsi with the given seid 2825 + * @vf: pointer to the VF info 2826 + * @seid - seid of the vsi it is searching for 2827 + **/ 2828 + static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) 2829 + { 2830 + struct i40e_pf *pf = vf->pf; 2831 + struct i40e_vsi *vsi = NULL; 2832 + int i; 2833 + 2834 + for (i = 0; i < vf->num_tc ; i++) { 2835 + vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id); 2836 + if (vsi->seid == seid) 2837 + return vsi; 2838 + } 2839 + return NULL; 2840 + } 2841 + 2842 + /** 2843 + * i40e_del_all_cloud_filters 2844 + * @vf: pointer to the VF info 2845 + * 2846 + * This function deletes all cloud filters 2847 + **/ 2848 + static void i40e_del_all_cloud_filters(struct i40e_vf *vf) 2849 + { 2850 + struct i40e_cloud_filter *cfilter = NULL; 2851 + struct i40e_pf *pf = vf->pf; 2852 + struct i40e_vsi *vsi = NULL; 2853 + struct hlist_node *node; 2854 + int ret; 2855 + 2856 + hlist_for_each_entry_safe(cfilter, node, 2857 + &vf->cloud_filter_list, cloud_node) { 2858 + vsi = i40e_find_vsi_from_seid(vf, cfilter->seid); 2859 + 2860 + if (!vsi) { 2861 + dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n", 2862 + vf->vf_id, cfilter->seid); 2863 + continue; 2864 + } 2865 + 2866 + if (cfilter->dst_port) 2867 + ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, 2868 + false); 2869 + else 2870 + ret = i40e_add_del_cloud_filter(vsi, cfilter, false); 2871 + if (ret) 2872 + dev_err(&pf->pdev->dev, 2873 + "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 2874 + vf->vf_id, i40e_stat_str(&pf->hw, ret), 2875 + i40e_aq_str(&pf->hw, 2876 + pf->hw.aq.asq_last_status)); 2877 + 2878 + hlist_del(&cfilter->cloud_node); 2879 + kfree(cfilter); 2880 + vf->num_cloud_filters--; 2881 + } 2882 + } 2883 + 2884 + /** 2885 + * i40e_vc_del_cloud_filter 2886 + * @vf: pointer to the VF info 2887 + * @msg: pointer to the msg buffer 2888 + * 2889 + * This function deletes a cloud filter programmed as TC filter for ADq 2890 + **/ 2891 + static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) 2892 + { 2893 + struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 2894 + struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 2895 + struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 2896 + struct i40e_cloud_filter cfilter, *cf = NULL; 2897 + struct i40e_pf *pf = vf->pf; 2898 + struct i40e_vsi *vsi = NULL; 2899 + struct hlist_node *node; 2900 + i40e_status aq_ret = 0; 2901 + int i, ret; 2902 + 2903 + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 2904 + aq_ret = I40E_ERR_PARAM; 2905 + goto err; 2906 + } 2907 + 2908 + if (!vf->adq_enabled) { 2909 + dev_info(&pf->pdev->dev, 2910 + "VF %d: ADq not enabled, can't apply cloud filter\n", 2911 + vf->vf_id); 2912 + aq_ret = I40E_ERR_PARAM; 2913 + goto err; 2914 + } 2915 + 2916 + if (i40e_validate_cloud_filter(vf, vcf)) { 2917 + dev_info(&pf->pdev->dev, 2918 + "VF %d: Invalid input, can't apply cloud filter\n", 2919 + vf->vf_id); 2920 + aq_ret = I40E_ERR_PARAM; 2921 + goto err; 2922 + } 2923 + 2924 + memset(&cfilter, 0, sizeof(cfilter)); 2925 + /* parse destination mac address */ 2926 + for (i = 0; i < ETH_ALEN; i++) 2927 + cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 2928 + 2929 + /* parse source mac address */ 2930 + for (i = 0; i < ETH_ALEN; i++) 2931 + cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 2932 + 2933 + cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; 2934 + cfilter.dst_port = mask.dst_port & tcf.dst_port; 2935 + cfilter.src_port = mask.src_port & tcf.src_port; 2936 + 2937 + switch (vcf->flow_type) { 2938 + case VIRTCHNL_TCP_V4_FLOW: 2939 + cfilter.n_proto = ETH_P_IP; 2940 + if (mask.dst_ip[0] & tcf.dst_ip[0]) 2941 + memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 2942 + ARRAY_SIZE(tcf.dst_ip)); 2943 + else if (mask.src_ip[0] & tcf.dst_ip[0]) 2944 + memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 2945 + ARRAY_SIZE(tcf.dst_ip)); 2946 + break; 2947 + case VIRTCHNL_TCP_V6_FLOW: 2948 + cfilter.n_proto = ETH_P_IPV6; 2949 + if (mask.dst_ip[3] & tcf.dst_ip[3]) 2950 + memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, 2951 + sizeof(cfilter.ip.v6.dst_ip6)); 2952 + if (mask.src_ip[3] & tcf.src_ip[3]) 2953 + memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, 2954 + sizeof(cfilter.ip.v6.src_ip6)); 2955 + break; 2956 + default: 2957 + /* TC filter can be configured based on different combinations 2958 + * and in this case IP is not a part of filter config 2959 + */ 2960 + dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 2961 + vf->vf_id); 2962 + } 2963 + 2964 + /* get the vsi to which the tc belongs to */ 2965 + vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 2966 + cfilter.seid = vsi->seid; 2967 + cfilter.flags = vcf->field_flags; 2968 + 2969 + /* Deleting TC filter */ 2970 + if (tcf.dst_port) 2971 + ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false); 2972 + else 2973 + ret = i40e_add_del_cloud_filter(vsi, &cfilter, false); 2974 + if (ret) { 2975 + dev_err(&pf->pdev->dev, 2976 + "VF %d: Failed to delete cloud filter, err %s aq_err %s\n", 2977 + vf->vf_id, i40e_stat_str(&pf->hw, ret), 2978 + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 2979 + goto err; 2980 + } 2981 + 2982 + hlist_for_each_entry_safe(cf, node, 2983 + &vf->cloud_filter_list, cloud_node) { 2984 + if (cf->seid != cfilter.seid) 2985 + continue; 2986 + if (mask.dst_port) 2987 + if (cfilter.dst_port != cf->dst_port) 2988 + continue; 2989 + if (mask.dst_mac[0]) 2990 + if (!ether_addr_equal(cf->src_mac, cfilter.src_mac)) 2991 + continue; 2992 + /* for ipv4 data to be valid, only first byte of mask is set */ 2993 + if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) 2994 + if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip, 2995 + ARRAY_SIZE(tcf.dst_ip))) 2996 + continue; 2997 + /* for ipv6, mask is set for all sixteen bytes (4 words) */ 2998 + if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 2999 + if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3000 + sizeof(cfilter.ip.v6.src_ip6))) 3001 + continue; 3002 + if (mask.vlan_id) 3003 + if (cfilter.vlan_id != cf->vlan_id) 3004 + continue; 3005 + 3006 + hlist_del(&cf->cloud_node); 3007 + kfree(cf); 3008 + vf->num_cloud_filters--; 3009 + } 3010 + 3011 + err: 3012 + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER, 3013 + aq_ret); 3014 + } 3015 + 3016 + /** 3017 + * i40e_vc_add_cloud_filter 3018 + * @vf: pointer to the VF info 3019 + * @msg: pointer to the msg buffer 3020 + * 3021 + * This function adds a cloud filter programmed as TC filter for ADq 3022 + **/ 3023 + static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) 3024 + { 3025 + struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; 3026 + struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; 3027 + struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; 3028 + struct i40e_cloud_filter *cfilter = NULL; 3029 + struct i40e_pf *pf = vf->pf; 3030 + struct i40e_vsi *vsi = NULL; 3031 + i40e_status aq_ret = 0; 3032 + int i, ret; 3033 + 3034 + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3035 + aq_ret = I40E_ERR_PARAM; 3036 + goto err; 3037 + } 3038 + 3039 + if (!vf->adq_enabled) { 3040 + dev_info(&pf->pdev->dev, 3041 + "VF %d: ADq is not enabled, can't apply cloud filter\n", 3042 + vf->vf_id); 3043 + aq_ret = I40E_ERR_PARAM; 3044 + goto err; 3045 + } 3046 + 3047 + if (i40e_validate_cloud_filter(vf, vcf)) { 3048 + dev_info(&pf->pdev->dev, 3049 + "VF %d: Invalid input/s, can't apply cloud filter\n", 3050 + vf->vf_id); 3051 + aq_ret = I40E_ERR_PARAM; 3052 + goto err; 3053 + } 3054 + 3055 + cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); 3056 + if (!cfilter) 3057 + return -ENOMEM; 3058 + 3059 + /* parse destination mac address */ 3060 + for (i = 0; i < ETH_ALEN; i++) 3061 + cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; 3062 + 3063 + /* parse source mac address */ 3064 + for (i = 0; i < ETH_ALEN; i++) 3065 + cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; 3066 + 3067 + cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; 3068 + cfilter->dst_port = mask.dst_port & tcf.dst_port; 3069 + cfilter->src_port = mask.src_port & tcf.src_port; 3070 + 3071 + switch (vcf->flow_type) { 3072 + case VIRTCHNL_TCP_V4_FLOW: 3073 + cfilter->n_proto = ETH_P_IP; 3074 + if (mask.dst_ip[0] & tcf.dst_ip[0]) 3075 + memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3076 + ARRAY_SIZE(tcf.dst_ip)); 3077 + else if (mask.src_ip[0] & tcf.dst_ip[0]) 3078 + memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3079 + ARRAY_SIZE(tcf.dst_ip)); 3080 + break; 3081 + case VIRTCHNL_TCP_V6_FLOW: 3082 + cfilter->n_proto = ETH_P_IPV6; 3083 + if (mask.dst_ip[3] & tcf.dst_ip[3]) 3084 + memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, 3085 + sizeof(cfilter->ip.v6.dst_ip6)); 3086 + if (mask.src_ip[3] & tcf.src_ip[3]) 3087 + memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, 3088 + sizeof(cfilter->ip.v6.src_ip6)); 3089 + break; 3090 + default: 3091 + /* TC filter can be configured based on different combinations 3092 + * and in this case IP is not a part of filter config 3093 + */ 3094 + dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n", 3095 + vf->vf_id); 3096 + } 3097 + 3098 + /* get the VSI to which the TC belongs to */ 3099 + vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; 3100 + cfilter->seid = vsi->seid; 3101 + cfilter->flags = vcf->field_flags; 3102 + 3103 + /* Adding cloud filter programmed as TC filter */ 3104 + if (tcf.dst_port) 3105 + ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); 3106 + else 3107 + ret = i40e_add_del_cloud_filter(vsi, cfilter, true); 3108 + if (ret) { 3109 + dev_err(&pf->pdev->dev, 3110 + "VF %d: Failed to add cloud filter, err %s aq_err %s\n", 3111 + vf->vf_id, i40e_stat_str(&pf->hw, ret), 3112 + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); 3113 + goto err; 3114 + } 3115 + 3116 + INIT_HLIST_NODE(&cfilter->cloud_node); 3117 + hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); 3118 + vf->num_cloud_filters++; 3119 + err: 3120 + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, 3121 + aq_ret); 3122 + } 3123 + 3124 + /** 3125 + * i40e_vc_add_qch_msg: Add queue channel and enable ADq 3126 + * @vf: pointer to the VF info 3127 + * @msg: pointer to the msg buffer 3128 + **/ 3129 + static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) 3130 + { 3131 + struct virtchnl_tc_info *tci = 3132 + (struct virtchnl_tc_info *)msg; 3133 + struct i40e_pf *pf = vf->pf; 3134 + struct i40e_link_status *ls = &pf->hw.phy.link_info; 3135 + int i, adq_request_qps = 0, speed = 0; 3136 + i40e_status aq_ret = 0; 3137 + 3138 + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3139 + aq_ret = I40E_ERR_PARAM; 3140 + goto err; 3141 + } 3142 + 3143 + /* ADq cannot be applied if spoof check is ON */ 3144 + if (vf->spoofchk) { 3145 + dev_err(&pf->pdev->dev, 3146 + "Spoof check is ON, turn it OFF to enable ADq\n"); 3147 + aq_ret = I40E_ERR_PARAM; 3148 + goto err; 3149 + } 3150 + 3151 + if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { 3152 + dev_err(&pf->pdev->dev, 3153 + "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", 3154 + vf->vf_id); 3155 + aq_ret = I40E_ERR_PARAM; 3156 + goto err; 3157 + } 3158 + 3159 + /* max number of traffic classes for VF currently capped at 4 */ 3160 + if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { 3161 + dev_err(&pf->pdev->dev, 3162 + "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n", 3163 + vf->vf_id, tci->num_tc); 3164 + aq_ret = I40E_ERR_PARAM; 3165 + goto err; 3166 + } 3167 + 3168 + /* validate queues for each TC */ 3169 + for (i = 0; i < tci->num_tc; i++) 3170 + if (!tci->list[i].count || 3171 + tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { 3172 + dev_err(&pf->pdev->dev, 3173 + "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n", 3174 + vf->vf_id, i, tci->list[i].count); 3175 + aq_ret = I40E_ERR_PARAM; 3176 + goto err; 3177 + } 3178 + 3179 + /* need Max VF queues but already have default number of queues */ 3180 + adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; 3181 + 3182 + if (pf->queues_left < adq_request_qps) { 3183 + dev_err(&pf->pdev->dev, 3184 + "No queues left to allocate to VF %d\n", 3185 + vf->vf_id); 3186 + aq_ret = I40E_ERR_PARAM; 3187 + goto err; 3188 + } else { 3189 + /* we need to allocate max VF queues to enable ADq so as to 3190 + * make sure ADq enabled VF always gets back queues when it 3191 + * goes through a reset. 3192 + */ 3193 + vf->num_queue_pairs = I40E_MAX_VF_QUEUES; 3194 + } 3195 + 3196 + /* get link speed in MB to validate rate limit */ 3197 + switch (ls->link_speed) { 3198 + case VIRTCHNL_LINK_SPEED_100MB: 3199 + speed = SPEED_100; 3200 + break; 3201 + case VIRTCHNL_LINK_SPEED_1GB: 3202 + speed = SPEED_1000; 3203 + break; 3204 + case VIRTCHNL_LINK_SPEED_10GB: 3205 + speed = SPEED_10000; 3206 + break; 3207 + case VIRTCHNL_LINK_SPEED_20GB: 3208 + speed = SPEED_20000; 3209 + break; 3210 + case VIRTCHNL_LINK_SPEED_25GB: 3211 + speed = SPEED_25000; 3212 + break; 3213 + case VIRTCHNL_LINK_SPEED_40GB: 3214 + speed = SPEED_40000; 3215 + break; 3216 + default: 3217 + dev_err(&pf->pdev->dev, 3218 + "Cannot detect link speed\n"); 3219 + aq_ret = I40E_ERR_PARAM; 3220 + goto err; 3221 + } 3222 + 3223 + /* parse data from the queue channel info */ 3224 + vf->num_tc = tci->num_tc; 3225 + for (i = 0; i < vf->num_tc; i++) { 3226 + if (tci->list[i].max_tx_rate) { 3227 + if (tci->list[i].max_tx_rate > speed) { 3228 + dev_err(&pf->pdev->dev, 3229 + "Invalid max tx rate %llu specified for VF %d.", 3230 + tci->list[i].max_tx_rate, 3231 + vf->vf_id); 3232 + aq_ret = I40E_ERR_PARAM; 3233 + goto err; 3234 + } else { 3235 + vf->ch[i].max_tx_rate = 3236 + tci->list[i].max_tx_rate; 3237 + } 3238 + } 3239 + vf->ch[i].num_qps = tci->list[i].count; 3240 + } 3241 + 3242 + /* set this flag only after making sure all inputs are sane */ 3243 + vf->adq_enabled = true; 3244 + /* num_req_queues is set when user changes number of queues via ethtool 3245 + * and this causes issue for default VSI(which depends on this variable) 3246 + * when ADq is enabled, hence reset it. 3247 + */ 3248 + vf->num_req_queues = 0; 3249 + 3250 + /* reset the VF in order to allocate resources */ 3251 + i40e_vc_notify_vf_reset(vf); 3252 + i40e_reset_vf(vf, false); 3253 + 3254 + return I40E_SUCCESS; 3255 + 3256 + /* send the response to the VF */ 3257 + err: 3258 + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS, 3259 + aq_ret); 3260 + } 3261 + 3262 + /** 3263 + * i40e_vc_del_qch_msg 3264 + * @vf: pointer to the VF info 3265 + * @msg: pointer to the msg buffer 3266 + **/ 3267 + static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) 3268 + { 3269 + struct i40e_pf *pf = vf->pf; 3270 + i40e_status aq_ret = 0; 3271 + 3272 + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { 3273 + aq_ret = I40E_ERR_PARAM; 3274 + goto err; 3275 + } 3276 + 3277 + if (vf->adq_enabled) { 3278 + i40e_del_all_cloud_filters(vf); 3279 + i40e_del_qch(vf); 3280 + vf->adq_enabled = false; 3281 + vf->num_tc = 0; 3282 + dev_info(&pf->pdev->dev, 3283 + "Deleting Queue Channels and cloud filters for ADq on VF %d\n", 3284 + vf->vf_id); 3285 + } else { 3286 + dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", 3287 + vf->vf_id); 3288 + aq_ret = I40E_ERR_PARAM; 3289 + } 3290 + 3291 + /* reset the VF in order to allocate resources */ 3292 + i40e_vc_notify_vf_reset(vf); 3293 + i40e_reset_vf(vf, false); 3294 + 3295 + return I40E_SUCCESS; 3296 + 3297 + err: 3298 + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, 3299 + aq_ret); 3300 + } 3301 + 3302 + /** 2920 3303 * i40e_vc_process_vf_msg 2921 3304 * @pf: pointer to the PF structure 2922 3305 * @vf_id: source VF id ··· 3657 2816 case VIRTCHNL_OP_REQUEST_QUEUES: 3658 2817 ret = i40e_vc_request_queues_msg(vf, msg, msglen); 3659 2818 break; 3660 - 2819 + case VIRTCHNL_OP_ENABLE_CHANNELS: 2820 + ret = i40e_vc_add_qch_msg(vf, msg); 2821 + break; 2822 + case VIRTCHNL_OP_DISABLE_CHANNELS: 2823 + ret = i40e_vc_del_qch_msg(vf, msg); 2824 + break; 2825 + case VIRTCHNL_OP_ADD_CLOUD_FILTER: 2826 + ret = i40e_vc_add_cloud_filter(vf, msg); 2827 + break; 2828 + case VIRTCHNL_OP_DEL_CLOUD_FILTER: 2829 + ret = i40e_vc_del_cloud_filter(vf, msg); 2830 + break; 3661 2831 case VIRTCHNL_OP_UNKNOWN: 3662 2832 default: 3663 2833 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", ··· 4234 3382 i40e_vc_disable_vf(vf); 4235 3383 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", 4236 3384 vf_id, setting ? "" : "un"); 3385 + 3386 + if (vf->adq_enabled) { 3387 + if (!vf->trusted) { 3388 + dev_info(&pf->pdev->dev, 3389 + "VF %u no longer Trusted, deleting all cloud filters\n", 3390 + vf_id); 3391 + i40e_del_all_cloud_filters(vf); 3392 + } 3393 + } 3394 + 4237 3395 out: 4238 3396 return ret; 4239 3397 }
+20
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
··· 69 69 I40E_VIRTCHNL_VF_CAP_IWARP, 70 70 }; 71 71 72 + /* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI. 73 + * These variables are used to store indices, id's and number of queues 74 + * for each VSI including that of primary VF VSI. Each Traffic class is 75 + * termed as channel and each channel can in-turn have 4 queues which 76 + * means max 16 queues overall per VF. 77 + */ 78 + struct i40evf_channel { 79 + u16 vsi_idx; /* index in PF struct for all channel VSIs */ 80 + u16 vsi_id; /* VSI ID used by firmware */ 81 + u16 num_qps; /* number of queue pairs requested by user */ 82 + u64 max_tx_rate; /* bandwidth rate allocation for VSIs */ 83 + }; 84 + 72 85 /* VF information structure */ 73 86 struct i40e_vf { 74 87 struct i40e_pf *pf; ··· 123 110 bool spoofchk; 124 111 u16 num_mac; 125 112 u16 num_vlan; 113 + 114 + /* ADq related variables */ 115 + bool adq_enabled; /* flag to enable adq */ 116 + u8 num_tc; 117 + struct i40evf_channel ch[I40E_MAX_VF_VSI]; 118 + struct hlist_head cloud_filter_list; 119 + u16 num_cloud_filters; 126 120 127 121 /* RDMA Client */ 128 122 struct virtchnl_iwarp_qvlist_info *qvlist_info;
+72
drivers/net/ethernet/intel/i40evf/i40evf.h
··· 52 52 #include <linux/socket.h> 53 53 #include <linux/jiffies.h> 54 54 #include <net/ip6_checksum.h> 55 + #include <net/pkt_cls.h> 55 56 #include <net/udp.h> 57 + #include <net/tc_act/tc_gact.h> 58 + #include <net/tc_act/tc_mirred.h> 56 59 57 60 #include "i40e_type.h" 58 61 #include <linux/avf/virtchnl.h> ··· 109 106 110 107 #define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) 111 108 #define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4) 109 + #define I40EVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ 112 110 113 111 /* MAX_MSIX_Q_VECTORS of these are allocated, 114 112 * but we only use one per queue-specific vector. ··· 172 168 bool add; /* filter needs to be added */ 173 169 }; 174 170 171 + #define I40EVF_MAX_TRAFFIC_CLASS 4 172 + /* State of traffic class creation */ 173 + enum i40evf_tc_state_t { 174 + __I40EVF_TC_INVALID, /* no traffic class, default state */ 175 + __I40EVF_TC_RUNNING, /* traffic classes have been created */ 176 + }; 177 + 178 + /* channel info */ 179 + struct i40evf_channel_config { 180 + struct virtchnl_channel_info ch_info[I40EVF_MAX_TRAFFIC_CLASS]; 181 + enum i40evf_tc_state_t state; 182 + u8 total_qps; 183 + }; 184 + 185 + /* State of cloud filter */ 186 + enum i40evf_cloud_filter_state_t { 187 + __I40EVF_CF_INVALID, /* cloud filter not added */ 188 + __I40EVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */ 189 + __I40EVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */ 190 + __I40EVF_CF_ACTIVE, /* cloud filter is active */ 191 + }; 192 + 175 193 /* Driver state. The order of these is important! */ 176 194 enum i40evf_state_t { 177 195 __I40EVF_STARTUP, /* driver loaded, probe complete */ ··· 213 187 __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ 214 188 __I40EVF_IN_CLIENT_TASK, 215 189 __I40EVF_IN_REMOVE_TASK, /* device being removed */ 190 + }; 191 + 192 + #define I40EVF_CLOUD_FIELD_OMAC 0x01 193 + #define I40EVF_CLOUD_FIELD_IMAC 0x02 194 + #define I40EVF_CLOUD_FIELD_IVLAN 0x04 195 + #define I40EVF_CLOUD_FIELD_TEN_ID 0x08 196 + #define I40EVF_CLOUD_FIELD_IIP 0x10 197 + 198 + #define I40EVF_CF_FLAGS_OMAC I40EVF_CLOUD_FIELD_OMAC 199 + #define I40EVF_CF_FLAGS_IMAC I40EVF_CLOUD_FIELD_IMAC 200 + #define I40EVF_CF_FLAGS_IMAC_IVLAN (I40EVF_CLOUD_FIELD_IMAC |\ 201 + I40EVF_CLOUD_FIELD_IVLAN) 202 + #define I40EVF_CF_FLAGS_IMAC_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\ 203 + I40EVF_CLOUD_FIELD_TEN_ID) 204 + #define I40EVF_CF_FLAGS_OMAC_TEN_ID_IMAC (I40EVF_CLOUD_FIELD_OMAC |\ 205 + I40EVF_CLOUD_FIELD_IMAC |\ 206 + I40EVF_CLOUD_FIELD_TEN_ID) 207 + #define I40EVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\ 208 + I40EVF_CLOUD_FIELD_IVLAN |\ 209 + I40EVF_CLOUD_FIELD_TEN_ID) 210 + #define I40EVF_CF_FLAGS_IIP I40E_CLOUD_FIELD_IIP 211 + 212 + /* bookkeeping of cloud filters */ 213 + struct i40evf_cloud_filter { 214 + enum i40evf_cloud_filter_state_t state; 215 + struct list_head list; 216 + struct virtchnl_filter f; 217 + unsigned long cookie; 218 + bool del; /* filter needs to be deleted */ 219 + bool add; /* filter needs to be added */ 216 220 }; 217 221 218 222 /* board specific private data structure */ ··· 296 240 #define I40EVF_FLAG_ALLMULTI_ON BIT(14) 297 241 #define I40EVF_FLAG_LEGACY_RX BIT(15) 298 242 #define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16) 243 + #define I40EVF_FLAG_QUEUES_DISABLED BIT(17) 299 244 /* duplicates for common code */ 300 245 #define I40E_FLAG_DCB_ENABLED 0 301 246 #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED ··· 325 268 #define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) 326 269 #define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) 327 270 #define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) 271 + #define I40EVF_FLAG_AQ_ENABLE_CHANNELS BIT(21) 272 + #define I40EVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) 273 + #define I40EVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) 274 + #define I40EVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) 328 275 329 276 /* OS defined structs */ 330 277 struct net_device *netdev; ··· 374 313 u16 rss_lut_size; 375 314 u8 *rss_key; 376 315 u8 *rss_lut; 316 + /* ADQ related members */ 317 + struct i40evf_channel_config ch_config; 318 + u8 num_tc; 319 + struct list_head cloud_filter_list; 320 + /* lock to protest access to the cloud filter list */ 321 + spinlock_t cloud_filter_list_lock; 322 + u16 num_cloud_filters; 377 323 }; 378 324 379 325 ··· 447 379 void i40evf_notify_client_l2_params(struct i40e_vsi *vsi); 448 380 void i40evf_notify_client_open(struct i40e_vsi *vsi); 449 381 void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset); 382 + void i40evf_enable_channels(struct i40evf_adapter *adapter); 383 + void i40evf_disable_channels(struct i40evf_adapter *adapter); 384 + void i40evf_add_cloud_filter(struct i40evf_adapter *adapter); 385 + void i40evf_del_cloud_filter(struct i40evf_adapter *adapter); 450 386 #endif /* _I40EVF_H_ */
+6
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
··· 695 695 return -EINVAL; 696 696 } 697 697 698 + if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 699 + adapter->num_tc) { 700 + dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n"); 701 + return -EINVAL; 702 + } 703 + 698 704 /* All of these should have already been checked by ethtool before this 699 705 * even gets to us, but just to be sure. 700 706 */
+801 -2
drivers/net/ethernet/intel/i40evf/i40evf_main.c
··· 1041 1041 struct net_device *netdev = adapter->netdev; 1042 1042 struct i40evf_vlan_filter *vlf; 1043 1043 struct i40evf_mac_filter *f; 1044 + struct i40evf_cloud_filter *cf; 1044 1045 1045 1046 if (adapter->state <= __I40EVF_DOWN_PENDING) 1046 1047 return; ··· 1065 1064 1066 1065 /* remove all VLAN filters */ 1067 1066 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { 1068 - f->remove = true; 1067 + vlf->remove = true; 1069 1068 } 1070 1069 1071 1070 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1071 + 1072 + /* remove all cloud filters */ 1073 + spin_lock_bh(&adapter->cloud_filter_list_lock); 1074 + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1075 + cf->del = true; 1076 + } 1077 + spin_unlock_bh(&adapter->cloud_filter_list_lock); 1072 1078 1073 1079 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && 1074 1080 adapter->state != __I40EVF_RESETTING) { ··· 1087 1079 */ 1088 1080 adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER; 1089 1081 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 1082 + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; 1090 1083 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; 1091 1084 } 1092 1085 ··· 1173 1164 */ 1174 1165 if (adapter->num_req_queues) 1175 1166 num_active_queues = adapter->num_req_queues; 1167 + else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1168 + adapter->num_tc) 1169 + num_active_queues = adapter->ch_config.total_qps; 1176 1170 else 1177 1171 num_active_queues = min_t(int, 1178 1172 adapter->vsi_res->num_queue_pairs, ··· 1503 1491 goto err_alloc_q_vectors; 1504 1492 } 1505 1493 1494 + /* If we've made it so far while ADq flag being ON, then we haven't 1495 + * bailed out anywhere in middle. And ADq isn't just enabled but actual 1496 + * resources have been allocated in the reset path. 1497 + * Now we can truly claim that ADq is enabled. 1498 + */ 1499 + if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1500 + adapter->num_tc) 1501 + dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", 1502 + adapter->num_tc); 1503 + 1506 1504 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", 1507 1505 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", 1508 1506 adapter->num_active_queues); ··· 1754 1732 i40evf_set_promiscuous(adapter, 0); 1755 1733 goto watchdog_done; 1756 1734 } 1735 + 1736 + if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) { 1737 + i40evf_enable_channels(adapter); 1738 + goto watchdog_done; 1739 + } 1740 + 1741 + if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) { 1742 + i40evf_disable_channels(adapter); 1743 + goto watchdog_done; 1744 + } 1745 + 1746 + if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) { 1747 + i40evf_add_cloud_filter(adapter); 1748 + goto watchdog_done; 1749 + } 1750 + 1751 + if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) { 1752 + i40evf_del_cloud_filter(adapter); 1753 + goto watchdog_done; 1754 + } 1755 + 1757 1756 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); 1758 1757 1759 1758 if (adapter->state == __I40EVF_RUNNING) ··· 1798 1755 { 1799 1756 struct i40evf_mac_filter *f, *ftmp; 1800 1757 struct i40evf_vlan_filter *fv, *fvtmp; 1758 + struct i40evf_cloud_filter *cf, *cftmp; 1801 1759 1802 1760 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; 1803 1761 ··· 1820 1776 1821 1777 spin_lock_bh(&adapter->mac_vlan_list_lock); 1822 1778 1823 - /* Delete all of the filters, both MAC and VLAN. */ 1779 + /* Delete all of the filters */ 1824 1780 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 1825 1781 list_del(&f->list); 1826 1782 kfree(f); ··· 1832 1788 } 1833 1789 1834 1790 spin_unlock_bh(&adapter->mac_vlan_list_lock); 1791 + 1792 + spin_lock_bh(&adapter->cloud_filter_list_lock); 1793 + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 1794 + list_del(&cf->list); 1795 + kfree(cf); 1796 + adapter->num_cloud_filters--; 1797 + } 1798 + spin_unlock_bh(&adapter->cloud_filter_list_lock); 1835 1799 1836 1800 i40evf_free_misc_irq(adapter); 1837 1801 i40evf_reset_interrupt_capability(adapter); ··· 1870 1818 struct i40evf_adapter *adapter = container_of(work, 1871 1819 struct i40evf_adapter, 1872 1820 reset_task); 1821 + struct virtchnl_vf_resource *vfres = adapter->vf_res; 1873 1822 struct net_device *netdev = adapter->netdev; 1874 1823 struct i40e_hw *hw = &adapter->hw; 1875 1824 struct i40evf_vlan_filter *vlf; 1825 + struct i40evf_cloud_filter *cf; 1876 1826 struct i40evf_mac_filter *f; 1877 1827 u32 reg_val; 1878 1828 int i = 0, err; ··· 1967 1913 i40evf_free_all_rx_resources(adapter); 1968 1914 i40evf_free_all_tx_resources(adapter); 1969 1915 1916 + adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED; 1970 1917 /* kill and reinit the admin queue */ 1971 1918 i40evf_shutdown_adminq(hw); 1972 1919 adapter->current_op = VIRTCHNL_OP_UNKNOWN; ··· 1999 1944 2000 1945 spin_unlock_bh(&adapter->mac_vlan_list_lock); 2001 1946 1947 + /* check if TCs are running and re-add all cloud filters */ 1948 + spin_lock_bh(&adapter->cloud_filter_list_lock); 1949 + if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1950 + adapter->num_tc) { 1951 + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1952 + cf->add = true; 1953 + } 1954 + } 1955 + spin_unlock_bh(&adapter->cloud_filter_list_lock); 1956 + 2002 1957 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; 2003 1958 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; 1959 + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; 2004 1960 i40evf_misc_irq_enable(adapter); 2005 1961 2006 1962 mod_timer(&adapter->watchdog_timer, jiffies + 2); ··· 2274 2208 for (i = 0; i < adapter->num_active_queues; i++) 2275 2209 if (adapter->rx_rings[i].desc) 2276 2210 i40evf_free_rx_resources(&adapter->rx_rings[i]); 2211 + } 2212 + 2213 + /** 2214 + * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth 2215 + * @adapter: board private structure 2216 + * @max_tx_rate: max Tx bw for a tc 2217 + **/ 2218 + static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter, 2219 + u64 max_tx_rate) 2220 + { 2221 + int speed = 0, ret = 0; 2222 + 2223 + switch (adapter->link_speed) { 2224 + case I40E_LINK_SPEED_40GB: 2225 + speed = 40000; 2226 + break; 2227 + case I40E_LINK_SPEED_25GB: 2228 + speed = 25000; 2229 + break; 2230 + case I40E_LINK_SPEED_20GB: 2231 + speed = 20000; 2232 + break; 2233 + case I40E_LINK_SPEED_10GB: 2234 + speed = 10000; 2235 + break; 2236 + case I40E_LINK_SPEED_1GB: 2237 + speed = 1000; 2238 + break; 2239 + case I40E_LINK_SPEED_100MB: 2240 + speed = 100; 2241 + break; 2242 + default: 2243 + break; 2244 + } 2245 + 2246 + if (max_tx_rate > speed) { 2247 + dev_err(&adapter->pdev->dev, 2248 + "Invalid tx rate specified\n"); 2249 + ret = -EINVAL; 2250 + } 2251 + 2252 + return ret; 2253 + } 2254 + 2255 + /** 2256 + * i40evf_validate_channel_config - validate queue mapping info 2257 + * @adapter: board private structure 2258 + * @mqprio_qopt: queue parameters 2259 + * 2260 + * This function validates if the config provided by the user to 2261 + * configure queue channels is valid or not. Returns 0 on a valid 2262 + * config. 2263 + **/ 2264 + static int i40evf_validate_ch_config(struct i40evf_adapter *adapter, 2265 + struct tc_mqprio_qopt_offload *mqprio_qopt) 2266 + { 2267 + u64 total_max_rate = 0; 2268 + int i, num_qps = 0; 2269 + u64 tx_rate = 0; 2270 + int ret = 0; 2271 + 2272 + if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS || 2273 + mqprio_qopt->qopt.num_tc < 1) 2274 + return -EINVAL; 2275 + 2276 + for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { 2277 + if (!mqprio_qopt->qopt.count[i] || 2278 + mqprio_qopt->qopt.offset[i] != num_qps) 2279 + return -EINVAL; 2280 + if (mqprio_qopt->min_rate[i]) { 2281 + dev_err(&adapter->pdev->dev, 2282 + "Invalid min tx rate (greater than 0) specified\n"); 2283 + return -EINVAL; 2284 + } 2285 + /*convert to Mbps */ 2286 + tx_rate = div_u64(mqprio_qopt->max_rate[i], 2287 + I40EVF_MBPS_DIVISOR); 2288 + total_max_rate += tx_rate; 2289 + num_qps += mqprio_qopt->qopt.count[i]; 2290 + } 2291 + if (num_qps > MAX_QUEUES) 2292 + return -EINVAL; 2293 + 2294 + ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate); 2295 + return ret; 2296 + } 2297 + 2298 + /** 2299 + * i40evf_del_all_cloud_filters - delete all cloud filters 2300 + * on the traffic classes 2301 + **/ 2302 + static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter) 2303 + { 2304 + struct i40evf_cloud_filter *cf, *cftmp; 2305 + 2306 + spin_lock_bh(&adapter->cloud_filter_list_lock); 2307 + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 2308 + list) { 2309 + list_del(&cf->list); 2310 + kfree(cf); 2311 + adapter->num_cloud_filters--; 2312 + } 2313 + spin_unlock_bh(&adapter->cloud_filter_list_lock); 2314 + } 2315 + 2316 + /** 2317 + * __i40evf_setup_tc - configure multiple traffic classes 2318 + * @netdev: network interface device structure 2319 + * @type_date: tc offload data 2320 + * 2321 + * This function processes the config information provided by the 2322 + * user to configure traffic classes/queue channels and packages the 2323 + * information to request the PF to setup traffic classes. 2324 + * 2325 + * Returns 0 on success. 2326 + **/ 2327 + static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) 2328 + { 2329 + struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 2330 + struct i40evf_adapter *adapter = netdev_priv(netdev); 2331 + struct virtchnl_vf_resource *vfres = adapter->vf_res; 2332 + u8 num_tc = 0, total_qps = 0; 2333 + int ret = 0, netdev_tc = 0; 2334 + u64 max_tx_rate; 2335 + u16 mode; 2336 + int i; 2337 + 2338 + num_tc = mqprio_qopt->qopt.num_tc; 2339 + mode = mqprio_qopt->mode; 2340 + 2341 + /* delete queue_channel */ 2342 + if (!mqprio_qopt->qopt.hw) { 2343 + if (adapter->ch_config.state == __I40EVF_TC_RUNNING) { 2344 + /* reset the tc configuration */ 2345 + netdev_reset_tc(netdev); 2346 + adapter->num_tc = 0; 2347 + netif_tx_stop_all_queues(netdev); 2348 + netif_tx_disable(netdev); 2349 + i40evf_del_all_cloud_filters(adapter); 2350 + adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS; 2351 + goto exit; 2352 + } else { 2353 + return -EINVAL; 2354 + } 2355 + } 2356 + 2357 + /* add queue channel */ 2358 + if (mode == TC_MQPRIO_MODE_CHANNEL) { 2359 + if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { 2360 + dev_err(&adapter->pdev->dev, "ADq not supported\n"); 2361 + return -EOPNOTSUPP; 2362 + } 2363 + if (adapter->ch_config.state != __I40EVF_TC_INVALID) { 2364 + dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); 2365 + return -EINVAL; 2366 + } 2367 + 2368 + ret = i40evf_validate_ch_config(adapter, mqprio_qopt); 2369 + if (ret) 2370 + return ret; 2371 + /* Return if same TC config is requested */ 2372 + if (adapter->num_tc == num_tc) 2373 + return 0; 2374 + adapter->num_tc = num_tc; 2375 + 2376 + for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { 2377 + if (i < num_tc) { 2378 + adapter->ch_config.ch_info[i].count = 2379 + mqprio_qopt->qopt.count[i]; 2380 + adapter->ch_config.ch_info[i].offset = 2381 + mqprio_qopt->qopt.offset[i]; 2382 + total_qps += mqprio_qopt->qopt.count[i]; 2383 + max_tx_rate = mqprio_qopt->max_rate[i]; 2384 + /* convert to Mbps */ 2385 + max_tx_rate = div_u64(max_tx_rate, 2386 + I40EVF_MBPS_DIVISOR); 2387 + adapter->ch_config.ch_info[i].max_tx_rate = 2388 + max_tx_rate; 2389 + } else { 2390 + adapter->ch_config.ch_info[i].count = 1; 2391 + adapter->ch_config.ch_info[i].offset = 0; 2392 + } 2393 + } 2394 + adapter->ch_config.total_qps = total_qps; 2395 + netif_tx_stop_all_queues(netdev); 2396 + netif_tx_disable(netdev); 2397 + adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS; 2398 + netdev_reset_tc(netdev); 2399 + /* Report the tc mapping up the stack */ 2400 + netdev_set_num_tc(adapter->netdev, num_tc); 2401 + for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { 2402 + u16 qcount = mqprio_qopt->qopt.count[i]; 2403 + u16 qoffset = mqprio_qopt->qopt.offset[i]; 2404 + 2405 + if (i < num_tc) 2406 + netdev_set_tc_queue(netdev, netdev_tc++, qcount, 2407 + qoffset); 2408 + } 2409 + } 2410 + exit: 2411 + return ret; 2412 + } 2413 + 2414 + /** 2415 + * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel 2416 + * @adapter: board private structure 2417 + * @cls_flower: pointer to struct tc_cls_flower_offload 2418 + * @filter: pointer to cloud filter structure 2419 + */ 2420 + static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, 2421 + struct tc_cls_flower_offload *f, 2422 + struct i40evf_cloud_filter *filter) 2423 + { 2424 + u16 n_proto_mask = 0; 2425 + u16 n_proto_key = 0; 2426 + u8 field_flags = 0; 2427 + u16 addr_type = 0; 2428 + u16 n_proto = 0; 2429 + int i = 0; 2430 + 2431 + if (f->dissector->used_keys & 2432 + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 2433 + BIT(FLOW_DISSECTOR_KEY_BASIC) | 2434 + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 2435 + BIT(FLOW_DISSECTOR_KEY_VLAN) | 2436 + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 2437 + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 2438 + BIT(FLOW_DISSECTOR_KEY_PORTS) | 2439 + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { 2440 + dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", 2441 + f->dissector->used_keys); 2442 + return -EOPNOTSUPP; 2443 + } 2444 + 2445 + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2446 + struct flow_dissector_key_keyid *mask = 2447 + skb_flow_dissector_target(f->dissector, 2448 + FLOW_DISSECTOR_KEY_ENC_KEYID, 2449 + f->mask); 2450 + 2451 + if (mask->keyid != 0) 2452 + field_flags |= I40EVF_CLOUD_FIELD_TEN_ID; 2453 + } 2454 + 2455 + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 2456 + struct flow_dissector_key_basic *key = 2457 + skb_flow_dissector_target(f->dissector, 2458 + FLOW_DISSECTOR_KEY_BASIC, 2459 + f->key); 2460 + 2461 + struct flow_dissector_key_basic *mask = 2462 + skb_flow_dissector_target(f->dissector, 2463 + FLOW_DISSECTOR_KEY_BASIC, 2464 + f->mask); 2465 + n_proto_key = ntohs(key->n_proto); 2466 + n_proto_mask = ntohs(mask->n_proto); 2467 + 2468 + if (n_proto_key == ETH_P_ALL) { 2469 + n_proto_key = 0; 2470 + n_proto_mask = 0; 2471 + } 2472 + n_proto = n_proto_key & n_proto_mask; 2473 + if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) 2474 + return -EINVAL; 2475 + if (n_proto == ETH_P_IPV6) { 2476 + /* specify flow type as TCP IPv6 */ 2477 + filter->f.flow_type = VIRTCHNL_TCP_V6_FLOW; 2478 + } 2479 + 2480 + if (key->ip_proto != IPPROTO_TCP) { 2481 + dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); 2482 + return -EINVAL; 2483 + } 2484 + } 2485 + 2486 + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 2487 + struct flow_dissector_key_eth_addrs *key = 2488 + skb_flow_dissector_target(f->dissector, 2489 + FLOW_DISSECTOR_KEY_ETH_ADDRS, 2490 + f->key); 2491 + 2492 + struct flow_dissector_key_eth_addrs *mask = 2493 + skb_flow_dissector_target(f->dissector, 2494 + FLOW_DISSECTOR_KEY_ETH_ADDRS, 2495 + f->mask); 2496 + /* use is_broadcast and is_zero to check for all 0xf or 0 */ 2497 + if (!is_zero_ether_addr(mask->dst)) { 2498 + if (is_broadcast_ether_addr(mask->dst)) { 2499 + field_flags |= I40EVF_CLOUD_FIELD_OMAC; 2500 + } else { 2501 + dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", 2502 + mask->dst); 2503 + return I40E_ERR_CONFIG; 2504 + } 2505 + } 2506 + 2507 + if (!is_zero_ether_addr(mask->src)) { 2508 + if (is_broadcast_ether_addr(mask->src)) { 2509 + field_flags |= I40EVF_CLOUD_FIELD_IMAC; 2510 + } else { 2511 + dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", 2512 + mask->src); 2513 + return I40E_ERR_CONFIG; 2514 + } 2515 + } 2516 + 2517 + if (!is_zero_ether_addr(key->dst)) 2518 + if (is_valid_ether_addr(key->dst) || 2519 + is_multicast_ether_addr(key->dst)) { 2520 + /* set the mask if a valid dst_mac address */ 2521 + for (i = 0; i < ETH_ALEN; i++) 2522 + filter->f.mask.tcp_spec.dst_mac[i] |= 2523 + 0xff; 2524 + ether_addr_copy(filter->f.data.tcp_spec.dst_mac, 2525 + key->dst); 2526 + } 2527 + 2528 + if (!is_zero_ether_addr(key->src)) 2529 + if (is_valid_ether_addr(key->src) || 2530 + is_multicast_ether_addr(key->src)) { 2531 + /* set the mask if a valid dst_mac address */ 2532 + for (i = 0; i < ETH_ALEN; i++) 2533 + filter->f.mask.tcp_spec.src_mac[i] |= 2534 + 0xff; 2535 + ether_addr_copy(filter->f.data.tcp_spec.src_mac, 2536 + key->src); 2537 + } 2538 + } 2539 + 2540 + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { 2541 + struct flow_dissector_key_vlan *key = 2542 + skb_flow_dissector_target(f->dissector, 2543 + FLOW_DISSECTOR_KEY_VLAN, 2544 + f->key); 2545 + struct flow_dissector_key_vlan *mask = 2546 + skb_flow_dissector_target(f->dissector, 2547 + FLOW_DISSECTOR_KEY_VLAN, 2548 + f->mask); 2549 + 2550 + if (mask->vlan_id) { 2551 + if (mask->vlan_id == VLAN_VID_MASK) { 2552 + field_flags |= I40EVF_CLOUD_FIELD_IVLAN; 2553 + } else { 2554 + dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", 2555 + mask->vlan_id); 2556 + return I40E_ERR_CONFIG; 2557 + } 2558 + } 2559 + filter->f.mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); 2560 + filter->f.data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id); 2561 + } 2562 + 2563 + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 2564 + struct flow_dissector_key_control *key = 2565 + skb_flow_dissector_target(f->dissector, 2566 + FLOW_DISSECTOR_KEY_CONTROL, 2567 + f->key); 2568 + 2569 + addr_type = key->addr_type; 2570 + } 2571 + 2572 + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2573 + struct flow_dissector_key_ipv4_addrs *key = 2574 + skb_flow_dissector_target(f->dissector, 2575 + FLOW_DISSECTOR_KEY_IPV4_ADDRS, 2576 + f->key); 2577 + struct flow_dissector_key_ipv4_addrs *mask = 2578 + skb_flow_dissector_target(f->dissector, 2579 + FLOW_DISSECTOR_KEY_IPV4_ADDRS, 2580 + f->mask); 2581 + 2582 + if (mask->dst) { 2583 + if (mask->dst == cpu_to_be32(0xffffffff)) { 2584 + field_flags |= I40EVF_CLOUD_FIELD_IIP; 2585 + } else { 2586 + dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", 2587 + be32_to_cpu(mask->dst)); 2588 + return I40E_ERR_CONFIG; 2589 + } 2590 + } 2591 + 2592 + if (mask->src) { 2593 + if (mask->src == cpu_to_be32(0xffffffff)) { 2594 + field_flags |= I40EVF_CLOUD_FIELD_IIP; 2595 + } else { 2596 + dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 2597 + be32_to_cpu(mask->dst)); 2598 + return I40E_ERR_CONFIG; 2599 + } 2600 + } 2601 + 2602 + if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) { 2603 + dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); 2604 + return I40E_ERR_CONFIG; 2605 + } 2606 + if (key->dst) { 2607 + filter->f.mask.tcp_spec.dst_ip[0] |= 2608 + cpu_to_be32(0xffffffff); 2609 + filter->f.data.tcp_spec.dst_ip[0] = key->dst; 2610 + } 2611 + if (key->src) { 2612 + filter->f.mask.tcp_spec.src_ip[0] |= 2613 + cpu_to_be32(0xffffffff); 2614 + filter->f.data.tcp_spec.src_ip[0] = key->src; 2615 + } 2616 + } 2617 + 2618 + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2619 + struct flow_dissector_key_ipv6_addrs *key = 2620 + skb_flow_dissector_target(f->dissector, 2621 + FLOW_DISSECTOR_KEY_IPV6_ADDRS, 2622 + f->key); 2623 + struct flow_dissector_key_ipv6_addrs *mask = 2624 + skb_flow_dissector_target(f->dissector, 2625 + FLOW_DISSECTOR_KEY_IPV6_ADDRS, 2626 + f->mask); 2627 + 2628 + /* validate mask, make sure it is not IPV6_ADDR_ANY */ 2629 + if (ipv6_addr_any(&mask->dst)) { 2630 + dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", 2631 + IPV6_ADDR_ANY); 2632 + return I40E_ERR_CONFIG; 2633 + } 2634 + 2635 + /* src and dest IPv6 address should not be LOOPBACK 2636 + * (0:0:0:0:0:0:0:1) which can be represented as ::1 2637 + */ 2638 + if (ipv6_addr_loopback(&key->dst) || 2639 + ipv6_addr_loopback(&key->src)) { 2640 + dev_err(&adapter->pdev->dev, 2641 + "ipv6 addr should not be loopback\n"); 2642 + return I40E_ERR_CONFIG; 2643 + } 2644 + if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) 2645 + field_flags |= I40EVF_CLOUD_FIELD_IIP; 2646 + 2647 + if (key->dst.s6_addr) { 2648 + for (i = 0; i < 4; i++) 2649 + filter->f.mask.tcp_spec.dst_ip[i] |= 2650 + cpu_to_be32(0xffffffff); 2651 + memcpy(&filter->f.data.tcp_spec.dst_ip, 2652 + &key->dst.s6_addr32, 2653 + sizeof(filter->f.data.tcp_spec.dst_ip)); 2654 + } 2655 + if (key->src.s6_addr) { 2656 + for (i = 0; i < 4; i++) 2657 + filter->f.mask.tcp_spec.src_ip[i] |= 2658 + cpu_to_be32(0xffffffff); 2659 + memcpy(&filter->f.data.tcp_spec.src_ip, 2660 + &key->src.s6_addr32, 2661 + sizeof(filter->f.data.tcp_spec.src_ip)); 2662 + } 2663 + } 2664 + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { 2665 + struct flow_dissector_key_ports *key = 2666 + skb_flow_dissector_target(f->dissector, 2667 + FLOW_DISSECTOR_KEY_PORTS, 2668 + f->key); 2669 + struct flow_dissector_key_ports *mask = 2670 + skb_flow_dissector_target(f->dissector, 2671 + FLOW_DISSECTOR_KEY_PORTS, 2672 + f->mask); 2673 + 2674 + if (mask->src) { 2675 + if (mask->src == cpu_to_be16(0xffff)) { 2676 + field_flags |= I40EVF_CLOUD_FIELD_IIP; 2677 + } else { 2678 + dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", 2679 + be16_to_cpu(mask->src)); 2680 + return I40E_ERR_CONFIG; 2681 + } 2682 + } 2683 + 2684 + if (mask->dst) { 2685 + if (mask->dst == cpu_to_be16(0xffff)) { 2686 + field_flags |= I40EVF_CLOUD_FIELD_IIP; 2687 + } else { 2688 + dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", 2689 + be16_to_cpu(mask->dst)); 2690 + return I40E_ERR_CONFIG; 2691 + } 2692 + } 2693 + if (key->dst) { 2694 + filter->f.mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); 2695 + filter->f.data.tcp_spec.dst_port = key->dst; 2696 + } 2697 + 2698 + if (key->src) { 2699 + filter->f.mask.tcp_spec.src_port |= cpu_to_be16(0xffff); 2700 + filter->f.data.tcp_spec.src_port = key->dst; 2701 + } 2702 + } 2703 + filter->f.field_flags = field_flags; 2704 + 2705 + return 0; 2706 + } 2707 + 2708 + /** 2709 + * i40evf_handle_tclass - Forward to a traffic class on the device 2710 + * @adapter: board private structure 2711 + * @tc: traffic class index on the device 2712 + * @filter: pointer to cloud filter structure 2713 + */ 2714 + static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc, 2715 + struct i40evf_cloud_filter *filter) 2716 + { 2717 + if (tc == 0) 2718 + return 0; 2719 + if (tc < adapter->num_tc) { 2720 + if (!filter->f.data.tcp_spec.dst_port) { 2721 + dev_err(&adapter->pdev->dev, 2722 + "Specify destination port to redirect to traffic class other than TC0\n"); 2723 + return -EINVAL; 2724 + } 2725 + } 2726 + /* redirect to a traffic class on the same device */ 2727 + filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; 2728 + filter->f.action_meta = tc; 2729 + return 0; 2730 + } 2731 + 2732 + /** 2733 + * i40evf_configure_clsflower - Add tc flower filters 2734 + * @adapter: board private structure 2735 + * @cls_flower: Pointer to struct tc_cls_flower_offload 2736 + */ 2737 + static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, 2738 + struct tc_cls_flower_offload *cls_flower) 2739 + { 2740 + int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); 2741 + struct i40evf_cloud_filter *filter = NULL; 2742 + int err = 0, count = 50; 2743 + 2744 + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, 2745 + &adapter->crit_section)) { 2746 + udelay(1); 2747 + if (--count == 0) 2748 + return -EINVAL; 2749 + } 2750 + 2751 + if (tc < 0) { 2752 + dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); 2753 + return -EINVAL; 2754 + } 2755 + 2756 + filter = kzalloc(sizeof(*filter), GFP_KERNEL); 2757 + if (!filter) { 2758 + err = -ENOMEM; 2759 + goto clearout; 2760 + } 2761 + filter->cookie = cls_flower->cookie; 2762 + 2763 + /* set the mask to all zeroes to begin with */ 2764 + memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); 2765 + /* start out with flow type and eth type IPv4 to begin with */ 2766 + filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; 2767 + err = i40evf_parse_cls_flower(adapter, cls_flower, filter); 2768 + if (err < 0) 2769 + goto err; 2770 + 2771 + err = i40evf_handle_tclass(adapter, tc, filter); 2772 + if (err < 0) 2773 + goto err; 2774 + 2775 + /* add filter to the list */ 2776 + spin_lock_bh(&adapter->cloud_filter_list_lock); 2777 + list_add_tail(&filter->list, &adapter->cloud_filter_list); 2778 + adapter->num_cloud_filters++; 2779 + filter->add = true; 2780 + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; 2781 + spin_unlock_bh(&adapter->cloud_filter_list_lock); 2782 + err: 2783 + if (err) 2784 + kfree(filter); 2785 + clearout: 2786 + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); 2787 + return err; 2788 + } 2789 + 2790 + /* i40evf_find_cf - Find the cloud filter in the list 2791 + * @adapter: Board private structure 2792 + * @cookie: filter specific cookie 2793 + * 2794 + * Returns ptr to the filter object or NULL. Must be called while holding the 2795 + * cloud_filter_list_lock. 2796 + */ 2797 + static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter, 2798 + unsigned long *cookie) 2799 + { 2800 + struct i40evf_cloud_filter *filter = NULL; 2801 + 2802 + if (!cookie) 2803 + return NULL; 2804 + 2805 + list_for_each_entry(filter, &adapter->cloud_filter_list, list) { 2806 + if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) 2807 + return filter; 2808 + } 2809 + return NULL; 2810 + } 2811 + 2812 + /** 2813 + * i40evf_delete_clsflower - Remove tc flower filters 2814 + * @adapter: board private structure 2815 + * @cls_flower: Pointer to struct tc_cls_flower_offload 2816 + */ 2817 + static int i40evf_delete_clsflower(struct i40evf_adapter *adapter, 2818 + struct tc_cls_flower_offload *cls_flower) 2819 + { 2820 + struct i40evf_cloud_filter *filter = NULL; 2821 + int err = 0; 2822 + 2823 + spin_lock_bh(&adapter->cloud_filter_list_lock); 2824 + filter = i40evf_find_cf(adapter, &cls_flower->cookie); 2825 + if (filter) { 2826 + filter->del = true; 2827 + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; 2828 + } else { 2829 + err = -EINVAL; 2830 + } 2831 + spin_unlock_bh(&adapter->cloud_filter_list_lock); 2832 + 2833 + return err; 2834 + } 2835 + 2836 + /** 2837 + * i40evf_setup_tc_cls_flower - flower classifier offloads 2838 + * @netdev: net device to configure 2839 + * @type_data: offload data 2840 + */ 2841 + static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, 2842 + struct tc_cls_flower_offload *cls_flower) 2843 + { 2844 + if (cls_flower->common.chain_index) 2845 + return -EOPNOTSUPP; 2846 + 2847 + switch (cls_flower->command) { 2848 + case TC_CLSFLOWER_REPLACE: 2849 + return i40evf_configure_clsflower(adapter, cls_flower); 2850 + case TC_CLSFLOWER_DESTROY: 2851 + return i40evf_delete_clsflower(adapter, cls_flower); 2852 + case TC_CLSFLOWER_STATS: 2853 + return -EOPNOTSUPP; 2854 + default: 2855 + return -EINVAL; 2856 + } 2857 + } 2858 + 2859 + /** 2860 + * i40evf_setup_tc_block_cb - block callback for tc 2861 + * @type: type of offload 2862 + * @type_data: offload data 2863 + * @cb_priv: 2864 + * 2865 + * This function is the block callback for traffic classes 2866 + **/ 2867 + static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 2868 + void *cb_priv) 2869 + { 2870 + switch (type) { 2871 + case TC_SETUP_CLSFLOWER: 2872 + return i40evf_setup_tc_cls_flower(cb_priv, type_data); 2873 + default: 2874 + return -EOPNOTSUPP; 2875 + } 2876 + } 2877 + 2878 + /** 2879 + * i40evf_setup_tc_block - register callbacks for tc 2880 + * @netdev: network interface device structure 2881 + * @f: tc offload data 2882 + * 2883 + * This function registers block callbacks for tc 2884 + * offloads 2885 + **/ 2886 + static int i40evf_setup_tc_block(struct net_device *dev, 2887 + struct tc_block_offload *f) 2888 + { 2889 + struct i40evf_adapter *adapter = netdev_priv(dev); 2890 + 2891 + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 2892 + return -EOPNOTSUPP; 2893 + 2894 + switch (f->command) { 2895 + case TC_BLOCK_BIND: 2896 + return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb, 2897 + adapter, adapter); 2898 + case TC_BLOCK_UNBIND: 2899 + tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb, 2900 + adapter); 2901 + return 0; 2902 + default: 2903 + return -EOPNOTSUPP; 2904 + } 2905 + } 2906 + 2907 + /** 2908 + * i40evf_setup_tc - configure multiple traffic classes 2909 + * @netdev: network interface device structure 2910 + * @type: type of offload 2911 + * @type_date: tc offload data 2912 + * 2913 + * This function is the callback to ndo_setup_tc in the 2914 + * netdev_ops. 2915 + * 2916 + * Returns 0 on success 2917 + **/ 2918 + static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type, 2919 + void *type_data) 2920 + { 2921 + switch (type) { 2922 + case TC_SETUP_QDISC_MQPRIO: 2923 + return __i40evf_setup_tc(netdev, type_data); 2924 + case TC_SETUP_BLOCK: 2925 + return i40evf_setup_tc_block(netdev, type_data); 2926 + default: 2927 + return -EOPNOTSUPP; 2928 + } 2277 2929 } 2278 2930 2279 2931 /** ··· 3261 2477 #ifdef CONFIG_NET_POLL_CONTROLLER 3262 2478 .ndo_poll_controller = i40evf_netpoll, 3263 2479 #endif 2480 + .ndo_setup_tc = i40evf_setup_tc, 3264 2481 }; 3265 2482 3266 2483 /** ··· 3376 2591 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3377 2592 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | 3378 2593 NETIF_F_HW_VLAN_CTAG_RX); 2594 + /* Enable cloud filter if ADQ is supported */ 2595 + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) 2596 + hw_features |= NETIF_F_HW_TC; 3379 2597 3380 2598 netdev->hw_features |= hw_features; 3381 2599 ··· 3746 2958 mutex_init(&hw->aq.arq_mutex); 3747 2959 3748 2960 spin_lock_init(&adapter->mac_vlan_list_lock); 2961 + spin_lock_init(&adapter->cloud_filter_list_lock); 3749 2962 3750 2963 INIT_LIST_HEAD(&adapter->mac_filter_list); 3751 2964 INIT_LIST_HEAD(&adapter->vlan_filter_list); 2965 + INIT_LIST_HEAD(&adapter->cloud_filter_list); 3752 2966 3753 2967 INIT_WORK(&adapter->reset_task, i40evf_reset_task); 3754 2968 INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); ··· 3877 3087 struct i40evf_adapter *adapter = netdev_priv(netdev); 3878 3088 struct i40evf_vlan_filter *vlf, *vlftmp; 3879 3089 struct i40evf_mac_filter *f, *ftmp; 3090 + struct i40evf_cloud_filter *cf, *cftmp; 3880 3091 struct i40e_hw *hw = &adapter->hw; 3881 3092 int err; 3882 3093 /* Indicate we are in remove and not to run reset_task */ ··· 3899 3108 /* Shut down all the garbage mashers on the detention level */ 3900 3109 adapter->state = __I40EVF_REMOVE; 3901 3110 adapter->aq_required = 0; 3111 + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; 3902 3112 i40evf_request_reset(adapter); 3903 3113 msleep(50); 3904 3114 /* If the FW isn't responding, kick it once, but only once. */ ··· 3947 3155 } 3948 3156 3949 3157 spin_unlock_bh(&adapter->mac_vlan_list_lock); 3158 + 3159 + spin_lock_bh(&adapter->cloud_filter_list_lock); 3160 + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 3161 + list_del(&cf->list); 3162 + kfree(cf); 3163 + } 3164 + spin_unlock_bh(&adapter->cloud_filter_list_lock); 3950 3165 3951 3166 free_netdev(netdev); 3952 3167
+291 -9
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
··· 161 161 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | 162 162 VIRTCHNL_VF_OFFLOAD_ENCAP | 163 163 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | 164 - VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 164 + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | 165 + VIRTCHNL_VF_OFFLOAD_ADQ; 165 166 166 167 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; 167 168 adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; ··· 974 973 } 975 974 976 975 /** 976 + * i40evf_enable_channel 977 + * @adapter: adapter structure 978 + * 979 + * Request that the PF enable channels as specified by 980 + * the user via tc tool. 981 + **/ 982 + void i40evf_enable_channels(struct i40evf_adapter *adapter) 983 + { 984 + struct virtchnl_tc_info *vti = NULL; 985 + u16 len; 986 + int i; 987 + 988 + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 989 + /* bail because we already have a command pending */ 990 + dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 991 + adapter->current_op); 992 + return; 993 + } 994 + 995 + len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) + 996 + sizeof(struct virtchnl_tc_info); 997 + 998 + vti = kzalloc(len, GFP_KERNEL); 999 + if (!vti) 1000 + return; 1001 + vti->num_tc = adapter->num_tc; 1002 + for (i = 0; i < vti->num_tc; i++) { 1003 + vti->list[i].count = adapter->ch_config.ch_info[i].count; 1004 + vti->list[i].offset = adapter->ch_config.ch_info[i].offset; 1005 + vti->list[i].pad = 0; 1006 + vti->list[i].max_tx_rate = 1007 + adapter->ch_config.ch_info[i].max_tx_rate; 1008 + } 1009 + 1010 + adapter->ch_config.state = __I40EVF_TC_RUNNING; 1011 + adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; 1012 + adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; 1013 + adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS; 1014 + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, 1015 + (u8 *)vti, len); 1016 + kfree(vti); 1017 + } 1018 + 1019 + /** 1020 + * i40evf_disable_channel 1021 + * @adapter: adapter structure 1022 + * 1023 + * Request that the PF disable channels that are configured 1024 + **/ 1025 + void i40evf_disable_channels(struct i40evf_adapter *adapter) 1026 + { 1027 + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1028 + /* bail because we already have a command pending */ 1029 + dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", 1030 + adapter->current_op); 1031 + return; 1032 + } 1033 + 1034 + adapter->ch_config.state = __I40EVF_TC_INVALID; 1035 + adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; 1036 + adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; 1037 + adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS; 1038 + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, 1039 + NULL, 0); 1040 + } 1041 + 1042 + /** 1043 + * i40evf_print_cloud_filter 1044 + * @adapter: adapter structure 1045 + * @f: cloud filter to print 1046 + * 1047 + * Print the cloud filter 1048 + **/ 1049 + static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter, 1050 + struct virtchnl_filter f) 1051 + { 1052 + switch (f.flow_type) { 1053 + case VIRTCHNL_TCP_V4_FLOW: 1054 + dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", 1055 + &f.data.tcp_spec.dst_mac, &f.data.tcp_spec.src_mac, 1056 + ntohs(f.data.tcp_spec.vlan_id), 1057 + &f.data.tcp_spec.dst_ip[0], &f.data.tcp_spec.src_ip[0], 1058 + ntohs(f.data.tcp_spec.dst_port), 1059 + ntohs(f.data.tcp_spec.src_port)); 1060 + break; 1061 + case VIRTCHNL_TCP_V6_FLOW: 1062 + dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", 1063 + &f.data.tcp_spec.dst_mac, &f.data.tcp_spec.src_mac, 1064 + ntohs(f.data.tcp_spec.vlan_id), 1065 + &f.data.tcp_spec.dst_ip, &f.data.tcp_spec.src_ip, 1066 + ntohs(f.data.tcp_spec.dst_port), 1067 + ntohs(f.data.tcp_spec.src_port)); 1068 + break; 1069 + } 1070 + } 1071 + 1072 + /** 1073 + * i40evf_add_cloud_filter 1074 + * @adapter: adapter structure 1075 + * 1076 + * Request that the PF add cloud filters as specified 1077 + * by the user via tc tool. 1078 + **/ 1079 + void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) 1080 + { 1081 + struct i40evf_cloud_filter *cf; 1082 + struct virtchnl_filter *f; 1083 + int len = 0, count = 0; 1084 + 1085 + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1086 + /* bail because we already have a command pending */ 1087 + dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n", 1088 + adapter->current_op); 1089 + return; 1090 + } 1091 + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1092 + if (cf->add) { 1093 + count++; 1094 + break; 1095 + } 1096 + } 1097 + if (!count) { 1098 + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; 1099 + return; 1100 + } 1101 + adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; 1102 + 1103 + len = sizeof(struct virtchnl_filter); 1104 + f = kzalloc(len, GFP_KERNEL); 1105 + if (!f) 1106 + return; 1107 + 1108 + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1109 + if (cf->add) { 1110 + memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1111 + cf->add = false; 1112 + cf->state = __I40EVF_CF_ADD_PENDING; 1113 + i40evf_send_pf_msg(adapter, 1114 + VIRTCHNL_OP_ADD_CLOUD_FILTER, 1115 + (u8 *)f, len); 1116 + } 1117 + } 1118 + kfree(f); 1119 + } 1120 + 1121 + /** 1122 + * i40evf_del_cloud_filter 1123 + * @adapter: adapter structure 1124 + * 1125 + * Request that the PF delete cloud filters as specified 1126 + * by the user via tc tool. 1127 + **/ 1128 + void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) 1129 + { 1130 + struct i40evf_cloud_filter *cf, *cftmp; 1131 + struct virtchnl_filter *f; 1132 + int len = 0, count = 0; 1133 + 1134 + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 1135 + /* bail because we already have a command pending */ 1136 + dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n", 1137 + adapter->current_op); 1138 + return; 1139 + } 1140 + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1141 + if (cf->del) { 1142 + count++; 1143 + break; 1144 + } 1145 + } 1146 + if (!count) { 1147 + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; 1148 + return; 1149 + } 1150 + adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; 1151 + 1152 + len = sizeof(struct virtchnl_filter); 1153 + f = kzalloc(len, GFP_KERNEL); 1154 + if (!f) 1155 + return; 1156 + 1157 + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { 1158 + if (cf->del) { 1159 + memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); 1160 + cf->del = false; 1161 + cf->state = __I40EVF_CF_DEL_PENDING; 1162 + i40evf_send_pf_msg(adapter, 1163 + VIRTCHNL_OP_DEL_CLOUD_FILTER, 1164 + (u8 *)f, len); 1165 + } 1166 + } 1167 + kfree(f); 1168 + } 1169 + 1170 + /** 977 1171 * i40evf_request_reset 978 1172 * @adapter: adapter structure 979 1173 * ··· 1213 1017 if (adapter->link_up == link_up) 1214 1018 break; 1215 1019 1216 - /* If we get link up message and start queues before 1217 - * our queues are configured it will trigger a TX hang. 1218 - * In that case, just ignore the link status message, 1219 - * we'll get another one after we enable queues and 1220 - * actually prepared to send traffic. 1221 - */ 1222 - if (link_up && adapter->state != __I40EVF_RUNNING) 1223 - break; 1020 + if (link_up) { 1021 + /* If we get link up message and start queues 1022 + * before our queues are configured it will 1023 + * trigger a TX hang. In that case, just ignore 1024 + * the link status message,we'll get another one 1025 + * after we enable queues and actually prepared 1026 + * to send traffic. 1027 + */ 1028 + if (adapter->state != __I40EVF_RUNNING) 1029 + break; 1030 + 1031 + /* For ADq enabled VF, we reconfigure VSIs and 1032 + * re-allocate queues. Hence wait till all 1033 + * queues are enabled. 1034 + */ 1035 + if (adapter->flags & 1036 + I40EVF_FLAG_QUEUES_DISABLED) 1037 + break; 1038 + } 1224 1039 1225 1040 adapter->link_up = link_up; 1226 1041 if (link_up) { ··· 1276 1069 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", 1277 1070 i40evf_stat_str(&adapter->hw, v_retval)); 1278 1071 break; 1072 + case VIRTCHNL_OP_ENABLE_CHANNELS: 1073 + dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", 1074 + i40evf_stat_str(&adapter->hw, v_retval)); 1075 + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; 1076 + adapter->ch_config.state = __I40EVF_TC_INVALID; 1077 + netdev_reset_tc(netdev); 1078 + netif_tx_start_all_queues(netdev); 1079 + break; 1080 + case VIRTCHNL_OP_DISABLE_CHANNELS: 1081 + dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", 1082 + i40evf_stat_str(&adapter->hw, v_retval)); 1083 + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; 1084 + adapter->ch_config.state = __I40EVF_TC_RUNNING; 1085 + netif_tx_start_all_queues(netdev); 1086 + break; 1087 + case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 1088 + struct i40evf_cloud_filter *cf, *cftmp; 1089 + 1090 + list_for_each_entry_safe(cf, cftmp, 1091 + &adapter->cloud_filter_list, 1092 + list) { 1093 + if (cf->state == __I40EVF_CF_ADD_PENDING) { 1094 + cf->state = __I40EVF_CF_INVALID; 1095 + dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", 1096 + i40evf_stat_str(&adapter->hw, 1097 + v_retval)); 1098 + i40evf_print_cloud_filter(adapter, 1099 + cf->f); 1100 + list_del(&cf->list); 1101 + kfree(cf); 1102 + adapter->num_cloud_filters--; 1103 + } 1104 + } 1105 + } 1106 + break; 1107 + case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 1108 + struct i40evf_cloud_filter *cf; 1109 + 1110 + list_for_each_entry(cf, &adapter->cloud_filter_list, 1111 + list) { 1112 + if (cf->state == __I40EVF_CF_DEL_PENDING) { 1113 + cf->state = __I40EVF_CF_ACTIVE; 1114 + dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", 1115 + i40evf_stat_str(&adapter->hw, 1116 + v_retval)); 1117 + i40evf_print_cloud_filter(adapter, 1118 + cf->f); 1119 + } 1120 + } 1121 + } 1122 + break; 1279 1123 default: 1280 1124 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", 1281 1125 v_retval, ··· 1366 1108 case VIRTCHNL_OP_ENABLE_QUEUES: 1367 1109 /* enable transmits */ 1368 1110 i40evf_irq_enable(adapter, true); 1111 + adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED; 1369 1112 break; 1370 1113 case VIRTCHNL_OP_DISABLE_QUEUES: 1371 1114 i40evf_free_all_tx_resources(adapter); ··· 1418 1159 vfres->num_queue_pairs); 1419 1160 adapter->num_req_queues = 0; 1420 1161 adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; 1162 + } 1163 + } 1164 + break; 1165 + case VIRTCHNL_OP_ADD_CLOUD_FILTER: { 1166 + struct i40evf_cloud_filter *cf; 1167 + 1168 + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { 1169 + if (cf->state == __I40EVF_CF_ADD_PENDING) 1170 + cf->state = __I40EVF_CF_ACTIVE; 1171 + } 1172 + } 1173 + break; 1174 + case VIRTCHNL_OP_DEL_CLOUD_FILTER: { 1175 + struct i40evf_cloud_filter *cf, *cftmp; 1176 + 1177 + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, 1178 + list) { 1179 + if (cf->state == __I40EVF_CF_DEL_PENDING) { 1180 + cf->state = __I40EVF_CF_INVALID; 1181 + list_del(&cf->list); 1182 + kfree(cf); 1183 + adapter->num_cloud_filters--; 1184 + } 1421 1185 } 1422 1186 } 1423 1187 break;
+104 -3
include/linux/avf/virtchnl.h
··· 136 136 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, 137 137 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, 138 138 VIRTCHNL_OP_REQUEST_QUEUES = 29, 139 + VIRTCHNL_OP_ENABLE_CHANNELS = 30, 140 + VIRTCHNL_OP_DISABLE_CHANNELS = 31, 141 + VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, 142 + VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, 139 143 }; 140 144 141 - /* This macro is used to generate a compilation error if a structure 145 + /* These macros are used to generate compilation errors if a structure/union 142 146 * is not exactly the correct length. It gives a divide by zero error if the 143 - * structure is not of the correct size, otherwise it creates an enum that is 144 - * never used. 147 + * structure/union is not of the correct size, otherwise it creates an enum 148 + * that is never used. 145 149 */ 146 150 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ 147 151 { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } 152 + #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ 153 + { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } 148 154 149 155 /* Virtual channel message descriptor. This overlays the admin queue 150 156 * descriptor. All other data is passed in external buffers. ··· 250 244 #define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 251 245 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 252 246 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 247 + #define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 253 248 254 249 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ 255 250 VIRTCHNL_VF_OFFLOAD_VLAN | \ ··· 503 496 504 497 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); 505 498 499 + /* VIRTCHNL_OP_ENABLE_CHANNELS 500 + * VIRTCHNL_OP_DISABLE_CHANNELS 501 + * VF sends these messages to enable or disable channels based on 502 + * the user specified queue count and queue offset for each traffic class. 503 + * This struct encompasses all the information that the PF needs from 504 + * VF to create a channel. 505 + */ 506 + struct virtchnl_channel_info { 507 + u16 count; /* number of queues in a channel */ 508 + u16 offset; /* queues in a channel start from 'offset' */ 509 + u32 pad; 510 + u64 max_tx_rate; 511 + }; 512 + 513 + VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); 514 + 515 + struct virtchnl_tc_info { 516 + u32 num_tc; 517 + u32 pad; 518 + struct virtchnl_channel_info list[1]; 519 + }; 520 + 521 + VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info); 522 + 523 + /* VIRTCHNL_ADD_CLOUD_FILTER 524 + * VIRTCHNL_DEL_CLOUD_FILTER 525 + * VF sends these messages to add or delete a cloud filter based on the 526 + * user specified match and action filters. These structures encompass 527 + * all the information that the PF needs from the VF to add/delete a 528 + * cloud filter. 529 + */ 530 + 531 + struct virtchnl_l4_spec { 532 + u8 src_mac[ETH_ALEN]; 533 + u8 dst_mac[ETH_ALEN]; 534 + __be16 vlan_id; 535 + __be16 pad; /* reserved for future use */ 536 + __be32 src_ip[4]; 537 + __be32 dst_ip[4]; 538 + __be16 src_port; 539 + __be16 dst_port; 540 + }; 541 + 542 + VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec); 543 + 544 + union virtchnl_flow_spec { 545 + struct virtchnl_l4_spec tcp_spec; 546 + u8 buffer[128]; /* reserved for future use */ 547 + }; 548 + 549 + VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec); 550 + 551 + enum virtchnl_action { 552 + /* action types */ 553 + VIRTCHNL_ACTION_DROP = 0, 554 + VIRTCHNL_ACTION_TC_REDIRECT, 555 + }; 556 + 557 + enum virtchnl_flow_type { 558 + /* flow types */ 559 + VIRTCHNL_TCP_V4_FLOW = 0, 560 + VIRTCHNL_TCP_V6_FLOW, 561 + }; 562 + 563 + struct virtchnl_filter { 564 + union virtchnl_flow_spec data; 565 + union virtchnl_flow_spec mask; 566 + enum virtchnl_flow_type flow_type; 567 + enum virtchnl_action action; 568 + u32 action_meta; 569 + __u8 field_flags; 570 + }; 571 + 572 + VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); 573 + 506 574 /* VIRTCHNL_OP_EVENT 507 575 * PF sends this message to inform the VF driver of events that may affect it. 508 576 * No direct response is expected from the VF, though it may generate other ··· 792 710 break; 793 711 case VIRTCHNL_OP_REQUEST_QUEUES: 794 712 valid_len = sizeof(struct virtchnl_vf_res_request); 713 + break; 714 + case VIRTCHNL_OP_ENABLE_CHANNELS: 715 + valid_len = sizeof(struct virtchnl_tc_info); 716 + if (msglen >= valid_len) { 717 + struct virtchnl_tc_info *vti = 718 + (struct virtchnl_tc_info *)msg; 719 + valid_len += vti->num_tc * 720 + sizeof(struct virtchnl_channel_info); 721 + if (vti->num_tc == 0) 722 + err_msg_format = true; 723 + } 724 + break; 725 + case VIRTCHNL_OP_DISABLE_CHANNELS: 726 + break; 727 + case VIRTCHNL_OP_ADD_CLOUD_FILTER: 728 + valid_len = sizeof(struct virtchnl_filter); 729 + break; 730 + case VIRTCHNL_OP_DEL_CLOUD_FILTER: 731 + valid_len = sizeof(struct virtchnl_filter); 795 732 break; 796 733 /* These are always errors coming from the VF. */ 797 734 case VIRTCHNL_OP_EVENT: