Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: updates for -next

This patchset adds support for tc mqprio offload, hw tc
offload of tc flower, and adpation for max rss size changes.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+762 -154
+25 -11
drivers/net/ethernet/hisilicon/hns3/hnae3.h
··· 29 29 #include <linux/module.h> 30 30 #include <linux/netdevice.h> 31 31 #include <linux/pci.h> 32 + #include <linux/pkt_sched.h> 32 33 #include <linux/types.h> 34 + #include <net/pkt_cls.h> 33 35 34 36 #define HNAE3_MOD_VERSION "1.0" 35 37 ··· 459 457 * Configure the default MAC for specified VF 460 458 * get_module_eeprom 461 459 * Get the optical module eeprom info. 460 + * add_cls_flower 461 + * Add clsflower rule 462 + * del_cls_flower 463 + * Delete clsflower rule 464 + * cls_flower_active 465 + * Check if any cls flower rule exist 462 466 */ 463 467 struct hnae3_ae_ops { 464 468 int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); ··· 642 634 int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset, 643 635 u32 len, u8 *data); 644 636 bool (*get_cmdq_stat)(struct hnae3_handle *handle); 637 + int (*add_cls_flower)(struct hnae3_handle *handle, 638 + struct flow_cls_offload *cls_flower, int tc); 639 + int (*del_cls_flower)(struct hnae3_handle *handle, 640 + struct flow_cls_offload *cls_flower); 641 + bool (*cls_flower_active)(struct hnae3_handle *handle); 645 642 }; 646 643 647 644 struct hnae3_dcb_ops { ··· 660 647 u8 (*getdcbx)(struct hnae3_handle *); 661 648 u8 (*setdcbx)(struct hnae3_handle *, u8); 662 649 663 - int (*setup_tc)(struct hnae3_handle *, u8, u8 *); 650 + int (*setup_tc)(struct hnae3_handle *handle, 651 + struct tc_mqprio_qopt_offload *mqprio_qopt); 664 652 }; 665 653 666 654 struct hnae3_ae_algo { ··· 673 659 #define HNAE3_INT_NAME_LEN 32 674 660 #define HNAE3_ITR_COUNTDOWN_START 100 675 661 676 - struct hnae3_tc_info { 677 - u16 tqp_offset; /* TQP offset from base TQP */ 678 - u16 tqp_count; /* Total TQPs */ 679 - u8 tc; /* TC index */ 680 - bool enable; /* If this TC is enable or not */ 681 - }; 682 - 683 662 #define HNAE3_MAX_TC 8 684 663 #define HNAE3_MAX_USER_PRIO 8 664 + struct hnae3_tc_info { 665 + u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ 666 + u16 tqp_count[HNAE3_MAX_TC]; 667 + u16 tqp_offset[HNAE3_MAX_TC]; 668 + unsigned long tc_en; /* bitmap of TC enabled */ 669 + u8 num_tc; /* Total number of enabled TCs */ 670 + bool mqprio_active; 671 + }; 672 + 685 673 struct hnae3_knic_private_info { 686 674 struct net_device *netdev; /* Set by KNIC client when init instance */ 687 675 u16 rss_size; /* Allocated RSS queues */ ··· 692 676 u16 num_tx_desc; 693 677 u16 num_rx_desc; 694 678 695 - u8 num_tc; /* Total number of enabled TCs */ 696 - u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ 697 - struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ 679 + struct hnae3_tc_info tc_info; 698 680 699 681 u16 num_tqps; /* total number of TQPs in this handle */ 700 682 struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
+2 -1
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
··· 385 385 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); 386 386 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); 387 387 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); 388 - dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); 388 + dev_info(priv->dev, "Total number of enabled TCs: %u\n", 389 + kinfo->tc_info.num_tc); 389 390 dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max); 390 391 dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl); 391 392 }
+92 -22
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 323 323 { 324 324 struct hnae3_handle *h = hns3_get_handle(netdev); 325 325 struct hnae3_knic_private_info *kinfo = &h->kinfo; 326 - unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; 326 + struct hnae3_tc_info *tc_info = &kinfo->tc_info; 327 + unsigned int queue_size = kinfo->num_tqps; 327 328 int i, ret; 328 329 329 - if (kinfo->num_tc <= 1) { 330 + if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { 330 331 netdev_reset_tc(netdev); 331 332 } else { 332 - ret = netdev_set_num_tc(netdev, kinfo->num_tc); 333 + ret = netdev_set_num_tc(netdev, tc_info->num_tc); 333 334 if (ret) { 334 335 netdev_err(netdev, 335 336 "netdev_set_num_tc fail, ret=%d!\n", ret); ··· 338 337 } 339 338 340 339 for (i = 0; i < HNAE3_MAX_TC; i++) { 341 - if (!kinfo->tc_info[i].enable) 340 + if (!test_bit(i, &tc_info->tc_en)) 342 341 continue; 343 342 344 - netdev_set_tc_queue(netdev, 345 - kinfo->tc_info[i].tc, 346 - kinfo->tc_info[i].tqp_count, 347 - kinfo->tc_info[i].tqp_offset); 343 + netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], 344 + tc_info->tqp_offset[i]); 348 345 } 349 346 } 350 347 ··· 368 369 u16 alloc_tqps, max_rss_size, rss_size; 369 370 370 371 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); 371 - rss_size = alloc_tqps / h->kinfo.num_tc; 372 + rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; 372 373 373 374 return min_t(u16, rss_size, max_rss_size); 374 375 } ··· 507 508 508 509 kinfo = &h->kinfo; 509 510 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 510 - netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]); 511 + netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); 511 512 512 513 if (h->ae_algo->ops->set_timer_task) 513 514 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); ··· 1668 1669 h->ae_algo->ops->enable_fd(h, enable); 1669 1670 } 1670 1671 1672 + if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && 1673 + h->ae_algo->ops->cls_flower_active(h)) { 1674 + netdev_err(netdev, 1675 + "there are offloaded TC filters active, cannot disable HW TC offload"); 1676 + return -EINVAL; 1677 + } 1678 + 1671 1679 netdev->features = features; 1672 1680 return 0; 1673 1681 } ··· 1800 1794 static int hns3_setup_tc(struct net_device *netdev, void *type_data) 1801 1795 { 1802 1796 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 1803 - u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; 1804 1797 struct hnae3_knic_private_info *kinfo; 1805 1798 u8 tc = mqprio_qopt->qopt.num_tc; 1806 1799 u16 mode = mqprio_qopt->mode; ··· 1822 1817 netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); 1823 1818 1824 1819 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? 1825 - kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; 1820 + kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; 1826 1821 } 1822 + 1823 + static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv, 1824 + struct flow_cls_offload *flow) 1825 + { 1826 + int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); 1827 + struct hnae3_handle *h = hns3_get_handle(priv->netdev); 1828 + 1829 + switch (flow->command) { 1830 + case FLOW_CLS_REPLACE: 1831 + if (h->ae_algo->ops->add_cls_flower) 1832 + return h->ae_algo->ops->add_cls_flower(h, flow, tc); 1833 + break; 1834 + case FLOW_CLS_DESTROY: 1835 + if (h->ae_algo->ops->del_cls_flower) 1836 + return h->ae_algo->ops->del_cls_flower(h, flow); 1837 + break; 1838 + default: 1839 + break; 1840 + } 1841 + 1842 + return -EOPNOTSUPP; 1843 + } 1844 + 1845 + static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 1846 + void *cb_priv) 1847 + { 1848 + struct hns3_nic_priv *priv = cb_priv; 1849 + 1850 + if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) 1851 + return -EOPNOTSUPP; 1852 + 1853 + switch (type) { 1854 + case TC_SETUP_CLSFLOWER: 1855 + return hns3_setup_tc_cls_flower(priv, type_data); 1856 + default: 1857 + return -EOPNOTSUPP; 1858 + } 1859 + } 1860 + 1861 + static LIST_HEAD(hns3_block_cb_list); 1827 1862 1828 1863 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, 1829 1864 void *type_data) 1830 1865 { 1831 - if (type != TC_SETUP_QDISC_MQPRIO) 1832 - return -EOPNOTSUPP; 1866 + struct hns3_nic_priv *priv = netdev_priv(dev); 1867 + int ret; 1833 1868 1834 - return hns3_setup_tc(dev, type_data); 1869 + switch (type) { 1870 + case TC_SETUP_QDISC_MQPRIO: 1871 + ret = hns3_setup_tc(dev, type_data); 1872 + break; 1873 + case TC_SETUP_BLOCK: 1874 + ret = flow_block_cb_setup_simple(type_data, 1875 + &hns3_block_cb_list, 1876 + hns3_setup_tc_block_cb, 1877 + priv, priv, true); 1878 + break; 1879 + default: 1880 + return -EOPNOTSUPP; 1881 + } 1882 + 1883 + return ret; 1835 1884 } 1836 1885 1837 1886 static int hns3_vlan_rx_add_vid(struct net_device *netdev, ··· 2481 2422 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2482 2423 netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2483 2424 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2425 + } 2426 + 2427 + if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { 2428 + netdev->hw_features |= NETIF_F_HW_TC; 2429 + netdev->features |= NETIF_F_HW_TC; 2484 2430 } 2485 2431 } 2486 2432 ··· 4044 3980 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) 4045 3981 { 4046 3982 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; 3983 + struct hnae3_tc_info *tc_info = &kinfo->tc_info; 4047 3984 int i; 4048 3985 4049 3986 for (i = 0; i < HNAE3_MAX_TC; i++) { 4050 - struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; 4051 3987 int j; 4052 3988 4053 - if (!tc_info->enable) 3989 + if (!test_bit(i, &tc_info->tc_en)) 4054 3990 continue; 4055 3991 4056 - for (j = 0; j < tc_info->tqp_count; j++) { 3992 + for (j = 0; j < tc_info->tqp_count[i]; j++) { 4057 3993 struct hnae3_queue *q; 4058 3994 4059 - q = priv->ring[tc_info->tqp_offset + j].tqp; 4060 - hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, 4061 - tc_info->tc); 3995 + q = priv->ring[tc_info->tqp_offset[i] + j].tqp; 3996 + hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i); 4062 3997 } 4063 3998 } 4064 3999 } ··· 4184 4121 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); 4185 4122 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); 4186 4123 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); 4187 - dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); 4124 + dev_info(priv->dev, "Total number of enabled TCs: %u\n", 4125 + kinfo->tc_info.num_tc); 4188 4126 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); 4189 4127 } 4190 4128 ··· 4755 4691 4756 4692 if (ch->rx_count || ch->tx_count) 4757 4693 return -EINVAL; 4694 + 4695 + if (kinfo->tc_info.mqprio_active) { 4696 + dev_err(&netdev->dev, 4697 + "it's not allowed to set channels via ethtool when MQPRIO mode is on\n"); 4698 + return -EINVAL; 4699 + } 4758 4700 4759 4701 if (new_tqp_num > hns3_get_max_available_channels(h) || 4760 4702 new_tqp_num < 1) {
+2
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
··· 359 359 set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps); 360 360 if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B)) 361 361 set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps); 362 + if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B)) 363 + set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps); 362 364 } 363 365 364 366 static enum hclge_cmd_status
+13 -3
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
··· 518 518 #define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10) 519 519 #define HCLGE_CFG_UMV_TBL_SPACE_S 16 520 520 #define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) 521 + #define HCLGE_CFG_PF_RSS_SIZE_S 0 522 + #define HCLGE_CFG_PF_RSS_SIZE_M GENMASK(3, 0) 521 523 522 524 #define HCLGE_CFG_CMD_CNT 4 523 525 ··· 560 558 }; 561 559 562 560 #define HCLGE_RSS_CFG_TBL_SIZE 16 561 + #define HCLGE_RSS_CFG_TBL_SIZE_H 4 562 + #define HCLGE_RSS_CFG_TBL_BW_H 2U 563 + #define HCLGE_RSS_CFG_TBL_BW_L 8U 563 564 564 565 struct hclge_rss_indirection_table_cmd { 565 566 __le16 start_table_index; 566 567 __le16 rss_set_bitmap; 567 - u8 rsv[4]; 568 - u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE]; 568 + u8 rss_qid_h[HCLGE_RSS_CFG_TBL_SIZE_H]; 569 + u8 rss_qid_l[HCLGE_RSS_CFG_TBL_SIZE]; 569 570 }; 570 571 571 572 #define HCLGE_RSS_TC_OFFSET_S 0 572 - #define HCLGE_RSS_TC_OFFSET_M GENMASK(9, 0) 573 + #define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0) 574 + #define HCLGE_RSS_TC_SIZE_MSB_B 11 573 575 #define HCLGE_RSS_TC_SIZE_S 12 574 576 #define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12) 577 + #define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3 575 578 #define HCLGE_RSS_TC_VALID_B 15 576 579 struct hclge_rss_tc_mode_cmd { 577 580 __le16 rss_tc_mode[HCLGE_MAX_TC_NUM]; ··· 1058 1051 #define HCLGE_FD_AD_WR_RULE_ID_B 0 1059 1052 #define HCLGE_FD_AD_RULE_ID_S 1 1060 1053 #define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1) 1054 + #define HCLGE_FD_AD_TC_OVRD_B 16 1055 + #define HCLGE_FD_AD_TC_SIZE_S 17 1056 + #define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17) 1061 1057 1062 1058 struct hclge_fd_ad_config_cmd { 1063 1059 u8 stage;
+115 -11
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
··· 397 397 return 0; 398 398 } 399 399 400 + static int hclge_mqprio_qopt_check(struct hclge_dev *hdev, 401 + struct tc_mqprio_qopt_offload *mqprio_qopt) 402 + { 403 + u16 queue_sum = 0; 404 + int ret; 405 + int i; 406 + 407 + if (!mqprio_qopt->qopt.num_tc) { 408 + mqprio_qopt->qopt.num_tc = 1; 409 + return 0; 410 + } 411 + 412 + ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc, 413 + mqprio_qopt->qopt.prio_tc_map); 414 + if (ret) 415 + return ret; 416 + 417 + for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) { 418 + if (!is_power_of_2(mqprio_qopt->qopt.count[i])) { 419 + dev_err(&hdev->pdev->dev, 420 + "qopt queue count must be power of 2\n"); 421 + return -EINVAL; 422 + } 423 + 424 + if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) { 425 + dev_err(&hdev->pdev->dev, 426 + "qopt queue count should be no more than %u\n", 427 + hdev->pf_rss_size_max); 428 + return -EINVAL; 429 + } 430 + 431 + if (mqprio_qopt->qopt.offset[i] != queue_sum) { 432 + dev_err(&hdev->pdev->dev, 433 + "qopt queue offset must start from 0, and being continuous\n"); 434 + return -EINVAL; 435 + } 436 + 437 + if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) { 438 + dev_err(&hdev->pdev->dev, 439 + "qopt tx_rate is not supported\n"); 440 + return -EOPNOTSUPP; 441 + } 442 + 443 + queue_sum = mqprio_qopt->qopt.offset[i]; 444 + queue_sum += mqprio_qopt->qopt.count[i]; 445 + } 446 + if (hdev->vport[0].alloc_tqps < queue_sum) { 447 + dev_err(&hdev->pdev->dev, 448 + "qopt queue count sum should be less than %u\n", 449 + hdev->vport[0].alloc_tqps); 450 + return -EINVAL; 451 + } 452 + 453 + return 0; 454 + } 455 + 456 + static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info, 457 + struct tc_mqprio_qopt_offload *mqprio_qopt) 458 + { 459 + int i; 460 + 461 + memset(tc_info, 0, sizeof(*tc_info)); 462 + tc_info->num_tc = mqprio_qopt->qopt.num_tc; 463 + memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map, 464 + sizeof_field(struct hnae3_tc_info, prio_tc)); 465 + memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count, 466 + sizeof_field(struct hnae3_tc_info, tqp_count)); 467 + memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset, 468 + sizeof_field(struct hnae3_tc_info, tqp_offset)); 469 + 470 + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 471 + set_bit(tc_info->prio_tc[i], &tc_info->tc_en); 472 + } 473 + 474 + static int hclge_config_tc(struct hclge_dev *hdev, 475 + struct hnae3_tc_info *tc_info) 476 + { 477 + int i; 478 + 479 + hclge_tm_schd_info_update(hdev, tc_info->num_tc); 480 + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 481 + hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i]; 482 + 483 + return hclge_map_update(hdev); 484 + } 485 + 400 486 /* Set up TC for hardware offloaded mqprio in channel mode */ 401 - static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) 487 + static int hclge_setup_tc(struct hnae3_handle *h, 488 + struct tc_mqprio_qopt_offload *mqprio_qopt) 402 489 { 403 490 struct hclge_vport *vport = hclge_get_vport(h); 491 + struct hnae3_knic_private_info *kinfo; 404 492 struct hclge_dev *hdev = vport->back; 493 + struct hnae3_tc_info old_tc_info; 494 + u8 tc = mqprio_qopt->qopt.num_tc; 405 495 int ret; 496 + 497 + /* if client unregistered, it's not allowed to change 498 + * mqprio configuration, which may cause uninit ring 499 + * fail. 500 + */ 501 + if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) 502 + return -EBUSY; 406 503 407 504 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE) 408 505 return -EINVAL; 409 506 410 - ret = hclge_dcb_common_validate(hdev, tc, prio_tc); 411 - if (ret) 412 - return -EINVAL; 507 + ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt); 508 + if (ret) { 509 + dev_err(&hdev->pdev->dev, 510 + "failed to check mqprio qopt params, ret = %d\n", ret); 511 + return ret; 512 + } 413 513 414 514 ret = hclge_notify_down_uinit(hdev); 415 515 if (ret) 416 516 return ret; 417 517 418 - hclge_tm_schd_info_update(hdev, tc); 419 - hclge_tm_prio_tc_info_update(hdev, prio_tc); 518 + kinfo = &vport->nic.kinfo; 519 + memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info)); 520 + hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt); 521 + kinfo->tc_info.mqprio_active = tc > 0; 420 522 421 - ret = hclge_tm_init_hw(hdev, false); 422 - if (ret) 423 - goto err_out; 424 - 425 - ret = hclge_client_setup_tc(hdev); 523 + ret = hclge_config_tc(hdev, &kinfo->tc_info); 426 524 if (ret) 427 525 goto err_out; 428 526 ··· 534 436 return hclge_notify_init_up(hdev); 535 437 536 438 err_out: 439 + /* roll-back */ 440 + memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info)); 441 + if (hclge_config_tc(hdev, &kinfo->tc_info)) 442 + dev_err(&hdev->pdev->dev, 443 + "failed to roll back tc configuration\n"); 444 + 537 445 hclge_notify_init_up(hdev); 538 446 539 447 return ret;
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
··· 1454 1454 1455 1455 dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id); 1456 1456 1457 - for (i = 0; i < kinfo->num_tc; i++) { 1457 + for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1458 1458 u16 qsid = vport->qs_offset + i; 1459 1459 1460 1460 hclge_dbg_dump_qs_shaper_single(hdev, qsid);
+408 -60
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 1285 1285 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), 1286 1286 HCLGE_CFG_DEFAULT_SPEED_M, 1287 1287 HCLGE_CFG_DEFAULT_SPEED_S); 1288 - cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), 1289 - HCLGE_CFG_RSS_SIZE_M, 1290 - HCLGE_CFG_RSS_SIZE_S); 1288 + cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), 1289 + HCLGE_CFG_RSS_SIZE_M, 1290 + HCLGE_CFG_RSS_SIZE_S); 1291 1291 1292 1292 for (i = 0; i < ETH_ALEN; i++) 1293 1293 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; ··· 1308 1308 HCLGE_CFG_UMV_TBL_SPACE_S); 1309 1309 if (!cfg->umv_space) 1310 1310 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; 1311 + 1312 + cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), 1313 + HCLGE_CFG_PF_RSS_SIZE_M, 1314 + HCLGE_CFG_PF_RSS_SIZE_S); 1315 + 1316 + /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a 1317 + * power of 2, instead of reading out directly. This would 1318 + * be more flexible for future changes and expansions. 1319 + * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S, 1320 + * it does not make sense if PF's field is 0. In this case, PF and VF 1321 + * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S. 1322 + */ 1323 + cfg->pf_rss_size_max = cfg->pf_rss_size_max ? 1324 + 1U << cfg->pf_rss_size_max : 1325 + cfg->vf_rss_size_max; 1311 1326 } 1312 1327 1313 1328 /* hclge_get_cfg: query the static parameter from flash ··· 1484 1469 1485 1470 hdev->num_vmdq_vport = cfg.vmdq_vport_num; 1486 1471 hdev->base_tqp_pid = 0; 1487 - hdev->rss_size_max = cfg.rss_size_max; 1472 + hdev->vf_rss_size_max = cfg.vf_rss_size_max; 1473 + hdev->pf_rss_size_max = cfg.pf_rss_size_max; 1488 1474 hdev->rx_buf_len = cfg.rx_buf_len; 1489 1475 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1490 1476 hdev->hw.mac.media_type = cfg.media_type; ··· 1668 1652 } 1669 1653 } 1670 1654 vport->alloc_tqps = alloced; 1671 - kinfo->rss_size = min_t(u16, hdev->rss_size_max, 1655 + kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, 1672 1656 vport->alloc_tqps / hdev->tm_info.num_tc); 1673 1657 1674 1658 /* ensure one to one mapping between irq and queue at default */ ··· 4278 4262 return 0; 4279 4263 } 4280 4264 4281 - static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) 4265 + static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir) 4282 4266 { 4283 4267 struct hclge_rss_indirection_table_cmd *req; 4284 4268 struct hclge_desc desc; 4285 - int i, j; 4269 + u8 rss_msb_oft; 4270 + u8 rss_msb_val; 4286 4271 int ret; 4272 + u16 qid; 4273 + int i; 4274 + u32 j; 4287 4275 4288 4276 req = (struct hclge_rss_indirection_table_cmd *)desc.data; 4289 4277 ··· 4298 4278 req->start_table_index = 4299 4279 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); 4300 4280 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); 4301 - 4302 - for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) 4303 - req->rss_result[j] = 4304 - indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; 4305 - 4281 + for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) { 4282 + qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; 4283 + req->rss_qid_l[j] = qid & 0xff; 4284 + rss_msb_oft = 4285 + j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE; 4286 + rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) << 4287 + (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE); 4288 + req->rss_qid_h[rss_msb_oft] |= rss_msb_val; 4289 + } 4306 4290 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4307 4291 if (ret) { 4308 4292 dev_err(&hdev->pdev->dev, ··· 4335 4311 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); 4336 4312 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, 4337 4313 HCLGE_RSS_TC_SIZE_S, tc_size[i]); 4314 + hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B, 4315 + tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1); 4338 4316 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, 4339 4317 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); 4340 4318 ··· 4627 4601 struct hclge_vport *vport = hclge_get_vport(handle); 4628 4602 struct hclge_dev *hdev = vport->back; 4629 4603 4630 - return hdev->rss_size_max; 4604 + return hdev->pf_rss_size_max; 4605 + } 4606 + 4607 + static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) 4608 + { 4609 + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 4610 + struct hclge_vport *vport = hdev->vport; 4611 + u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; 4612 + u16 tc_valid[HCLGE_MAX_TC_NUM] = {0}; 4613 + u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; 4614 + struct hnae3_tc_info *tc_info; 4615 + u16 roundup_size; 4616 + u16 rss_size; 4617 + int i; 4618 + 4619 + tc_info = &vport->nic.kinfo.tc_info; 4620 + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 4621 + rss_size = tc_info->tqp_count[i]; 4622 + tc_valid[i] = 0; 4623 + 4624 + if (!(hdev->hw_tc_map & BIT(i))) 4625 + continue; 4626 + 4627 + /* tc_size set to hardware is the log2 of roundup power of two 4628 + * of rss_size, the acutal queue size is limited by indirection 4629 + * table. 4630 + */ 4631 + if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size || 4632 + rss_size == 0) { 4633 + dev_err(&hdev->pdev->dev, 4634 + "Configure rss tc size failed, invalid TC_SIZE = %u\n", 4635 + rss_size); 4636 + return -EINVAL; 4637 + } 4638 + 4639 + roundup_size = roundup_pow_of_two(rss_size); 4640 + roundup_size = ilog2(roundup_size); 4641 + 4642 + tc_valid[i] = 1; 4643 + tc_size[i] = roundup_size; 4644 + tc_offset[i] = tc_info->tqp_offset[i]; 4645 + } 4646 + 4647 + return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 4631 4648 } 4632 4649 4633 4650 int hclge_rss_init_hw(struct hclge_dev *hdev) 4634 4651 { 4635 4652 struct hclge_vport *vport = hdev->vport; 4636 - u8 *rss_indir = vport[0].rss_indirection_tbl; 4637 - u16 rss_size = vport[0].alloc_rss_size; 4638 - u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; 4639 - u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; 4653 + u16 *rss_indir = vport[0].rss_indirection_tbl; 4640 4654 u8 *key = vport[0].rss_hash_key; 4641 4655 u8 hfunc = vport[0].rss_algo; 4642 - u16 tc_valid[HCLGE_MAX_TC_NUM]; 4643 - u16 roundup_size; 4644 - unsigned int i; 4645 4656 int ret; 4646 4657 4647 4658 ret = hclge_set_rss_indir_table(hdev, rss_indir); ··· 4693 4630 if (ret) 4694 4631 return ret; 4695 4632 4696 - /* Each TC have the same queue size, and tc_size set to hardware is 4697 - * the log2 of roundup power of two of rss_size, the acutal queue 4698 - * size is limited by indirection table. 4699 - */ 4700 - if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { 4701 - dev_err(&hdev->pdev->dev, 4702 - "Configure rss tc size failed, invalid TC_SIZE = %u\n", 4703 - rss_size); 4704 - return -EINVAL; 4705 - } 4706 - 4707 - roundup_size = roundup_pow_of_two(rss_size); 4708 - roundup_size = ilog2(roundup_size); 4709 - 4710 - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 4711 - tc_valid[i] = 0; 4712 - 4713 - if (!(hdev->hw_tc_map & BIT(i))) 4714 - continue; 4715 - 4716 - tc_valid[i] = 1; 4717 - tc_size[i] = roundup_size; 4718 - tc_offset[i] = rss_size * i; 4719 - } 4720 - 4721 - return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 4633 + return hclge_init_rss_tc_mode(hdev); 4722 4634 } 4723 4635 4724 4636 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) ··· 5125 5087 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, 5126 5088 struct hclge_fd_ad_data *action) 5127 5089 { 5090 + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 5128 5091 struct hclge_fd_ad_config_cmd *req; 5129 5092 struct hclge_desc desc; 5130 5093 u64 ad_data = 0; ··· 5141 5102 action->write_rule_id_to_bd); 5142 5103 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, 5143 5104 action->rule_id); 5105 + if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { 5106 + hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B, 5107 + action->override_tc); 5108 + hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M, 5109 + HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size); 5110 + } 5144 5111 ad_data <<= 32; 5145 5112 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); 5146 5113 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, ··· 5390 5345 static int hclge_config_action(struct hclge_dev *hdev, u8 stage, 5391 5346 struct hclge_fd_rule *rule) 5392 5347 { 5348 + struct hclge_vport *vport = hdev->vport; 5349 + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5393 5350 struct hclge_fd_ad_data ad_data; 5394 5351 5352 + memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data)); 5395 5353 ad_data.ad_id = rule->location; 5396 5354 5397 5355 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 5398 5356 ad_data.drop_packet = true; 5399 - ad_data.forward_to_direct_queue = false; 5400 - ad_data.queue_id = 0; 5357 + } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) { 5358 + ad_data.override_tc = true; 5359 + ad_data.queue_id = 5360 + kinfo->tc_info.tqp_offset[rule->cls_flower.tc]; 5361 + ad_data.tc_size = 5362 + ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]); 5401 5363 } else { 5402 - ad_data.drop_packet = false; 5403 5364 ad_data.forward_to_direct_queue = true; 5404 5365 ad_data.queue_id = rule->queue_id; 5405 5366 } ··· 5922 5871 return ret; 5923 5872 } 5924 5873 5874 + static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) 5875 + { 5876 + struct hclge_vport *vport = hclge_get_vport(handle); 5877 + struct hclge_dev *hdev = vport->back; 5878 + 5879 + return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; 5880 + } 5881 + 5925 5882 static int hclge_add_fd_entry(struct hnae3_handle *handle, 5926 5883 struct ethtool_rxnfc *cmd) 5927 5884 { ··· 5952 5893 dev_err(&hdev->pdev->dev, 5953 5894 "please enable flow director first\n"); 5954 5895 return -EOPNOTSUPP; 5896 + } 5897 + 5898 + if (hclge_is_cls_flower_active(handle)) { 5899 + dev_err(&hdev->pdev->dev, 5900 + "please delete all exist cls flower rules first\n"); 5901 + return -EINVAL; 5955 5902 } 5956 5903 5957 5904 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; ··· 5990 5925 return -EINVAL; 5991 5926 } 5992 5927 5993 - action = HCLGE_FD_ACTION_ACCEPT_PACKET; 5928 + action = HCLGE_FD_ACTION_SELECT_QUEUE; 5994 5929 q_index = ring; 5995 5930 } 5996 5931 ··· 6041 5976 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 6042 5977 return -EINVAL; 6043 5978 6044 - if (!hclge_fd_rule_exist(hdev, fs->location)) { 5979 + if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num || 5980 + !hclge_fd_rule_exist(hdev, fs->location)) { 6045 5981 dev_err(&hdev->pdev->dev, 6046 5982 "Delete fail, rule %u is inexistent\n", fs->location); 6047 5983 return -ENOENT; ··· 6142 6076 struct hclge_vport *vport = hclge_get_vport(handle); 6143 6077 struct hclge_dev *hdev = vport->back; 6144 6078 6145 - if (!hnae3_dev_fd_supported(hdev)) 6079 + if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle)) 6146 6080 return -EOPNOTSUPP; 6147 6081 6148 6082 cmd->rule_cnt = hdev->hclge_fd_rule_num; ··· 6485 6419 * arfs should not work 6486 6420 */ 6487 6421 spin_lock_bh(&hdev->fd_rule_lock); 6488 - if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { 6422 + if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE || 6423 + hdev->fd_active_type != HCLGE_FD_RULE_NONE) { 6489 6424 spin_unlock_bh(&hdev->fd_rule_lock); 6490 6425 return -EOPNOTSUPP; 6491 6426 } ··· 6514 6447 6515 6448 set_bit(bit_id, hdev->fd_bmap); 6516 6449 rule->location = bit_id; 6517 - rule->flow_id = flow_id; 6450 + rule->arfs.flow_id = flow_id; 6518 6451 rule->queue_id = queue_id; 6519 6452 hclge_fd_build_arfs_rule(&new_tuples, rule); 6520 6453 ret = hclge_fd_config_rule(hdev, rule); ··· 6558 6491 } 6559 6492 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 6560 6493 if (rps_may_expire_flow(handle->netdev, rule->queue_id, 6561 - rule->flow_id, rule->location)) { 6494 + rule->arfs.flow_id, rule->location)) { 6562 6495 hlist_del_init(&rule->rule_node); 6563 6496 hlist_add_head(&rule->rule_node, &del_list); 6564 6497 hdev->hclge_fd_rule_num--; ··· 6585 6518 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE) 6586 6519 hclge_del_all_fd_entries(handle, true); 6587 6520 #endif 6521 + } 6522 + 6523 + static void hclge_get_cls_key_basic(const struct flow_rule *flow, 6524 + struct hclge_fd_rule *rule) 6525 + { 6526 + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) { 6527 + struct flow_match_basic match; 6528 + u16 ethtype_key, ethtype_mask; 6529 + 6530 + flow_rule_match_basic(flow, &match); 6531 + ethtype_key = ntohs(match.key->n_proto); 6532 + ethtype_mask = ntohs(match.mask->n_proto); 6533 + 6534 + if (ethtype_key == ETH_P_ALL) { 6535 + ethtype_key = 0; 6536 + ethtype_mask = 0; 6537 + } 6538 + rule->tuples.ether_proto = ethtype_key; 6539 + rule->tuples_mask.ether_proto = ethtype_mask; 6540 + rule->tuples.ip_proto = match.key->ip_proto; 6541 + rule->tuples_mask.ip_proto = match.mask->ip_proto; 6542 + } else { 6543 + rule->unused_tuple |= BIT(INNER_IP_PROTO); 6544 + rule->unused_tuple |= BIT(INNER_ETH_TYPE); 6545 + } 6546 + } 6547 + 6548 + static void hclge_get_cls_key_mac(const struct flow_rule *flow, 6549 + struct hclge_fd_rule *rule) 6550 + { 6551 + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 6552 + struct flow_match_eth_addrs match; 6553 + 6554 + flow_rule_match_eth_addrs(flow, &match); 6555 + ether_addr_copy(rule->tuples.dst_mac, match.key->dst); 6556 + ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst); 6557 + ether_addr_copy(rule->tuples.src_mac, match.key->src); 6558 + ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src); 6559 + } else { 6560 + rule->unused_tuple |= BIT(INNER_DST_MAC); 6561 + rule->unused_tuple |= BIT(INNER_SRC_MAC); 6562 + } 6563 + } 6564 + 6565 + static void hclge_get_cls_key_vlan(const struct flow_rule *flow, 6566 + struct hclge_fd_rule *rule) 6567 + { 6568 + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) { 6569 + struct flow_match_vlan match; 6570 + 6571 + flow_rule_match_vlan(flow, &match); 6572 + rule->tuples.vlan_tag1 = match.key->vlan_id | 6573 + (match.key->vlan_priority << VLAN_PRIO_SHIFT); 6574 + rule->tuples_mask.vlan_tag1 = match.mask->vlan_id | 6575 + (match.mask->vlan_priority << VLAN_PRIO_SHIFT); 6576 + } else { 6577 + rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST); 6578 + } 6579 + } 6580 + 6581 + static void hclge_get_cls_key_ip(const struct flow_rule *flow, 6582 + struct hclge_fd_rule *rule) 6583 + { 6584 + u16 addr_type = 0; 6585 + 6586 + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) { 6587 + struct flow_match_control match; 6588 + 6589 + flow_rule_match_control(flow, &match); 6590 + addr_type = match.key->addr_type; 6591 + } 6592 + 6593 + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 6594 + struct flow_match_ipv4_addrs match; 6595 + 6596 + flow_rule_match_ipv4_addrs(flow, &match); 6597 + rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src); 6598 + rule->tuples_mask.src_ip[IPV4_INDEX] = 6599 + be32_to_cpu(match.mask->src); 6600 + rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst); 6601 + rule->tuples_mask.dst_ip[IPV4_INDEX] = 6602 + be32_to_cpu(match.mask->dst); 6603 + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 6604 + struct flow_match_ipv6_addrs match; 6605 + 6606 + flow_rule_match_ipv6_addrs(flow, &match); 6607 + be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32, 6608 + IPV6_SIZE); 6609 + be32_to_cpu_array(rule->tuples_mask.src_ip, 6610 + match.mask->src.s6_addr32, IPV6_SIZE); 6611 + be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32, 6612 + IPV6_SIZE); 6613 + be32_to_cpu_array(rule->tuples_mask.dst_ip, 6614 + match.mask->dst.s6_addr32, IPV6_SIZE); 6615 + } else { 6616 + rule->unused_tuple |= BIT(INNER_SRC_IP); 6617 + rule->unused_tuple |= BIT(INNER_DST_IP); 6618 + } 6619 + } 6620 + 6621 + static void hclge_get_cls_key_port(const struct flow_rule *flow, 6622 + struct hclge_fd_rule *rule) 6623 + { 6624 + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { 6625 + struct flow_match_ports match; 6626 + 6627 + flow_rule_match_ports(flow, &match); 6628 + 6629 + rule->tuples.src_port = be16_to_cpu(match.key->src); 6630 + rule->tuples_mask.src_port = be16_to_cpu(match.mask->src); 6631 + rule->tuples.dst_port = be16_to_cpu(match.key->dst); 6632 + rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst); 6633 + } else { 6634 + rule->unused_tuple |= BIT(INNER_SRC_PORT); 6635 + rule->unused_tuple |= BIT(INNER_DST_PORT); 6636 + } 6637 + } 6638 + 6639 + static int hclge_parse_cls_flower(struct hclge_dev *hdev, 6640 + struct flow_cls_offload *cls_flower, 6641 + struct hclge_fd_rule *rule) 6642 + { 6643 + struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower); 6644 + struct flow_dissector *dissector = flow->match.dissector; 6645 + 6646 + if (dissector->used_keys & 6647 + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 6648 + BIT(FLOW_DISSECTOR_KEY_BASIC) | 6649 + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 6650 + BIT(FLOW_DISSECTOR_KEY_VLAN) | 6651 + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 6652 + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 6653 + BIT(FLOW_DISSECTOR_KEY_PORTS))) { 6654 + dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n", 6655 + dissector->used_keys); 6656 + return -EOPNOTSUPP; 6657 + } 6658 + 6659 + hclge_get_cls_key_basic(flow, rule); 6660 + hclge_get_cls_key_mac(flow, rule); 6661 + hclge_get_cls_key_vlan(flow, rule); 6662 + hclge_get_cls_key_ip(flow, rule); 6663 + hclge_get_cls_key_port(flow, rule); 6664 + 6665 + return 0; 6666 + } 6667 + 6668 + static int hclge_check_cls_flower(struct hclge_dev *hdev, 6669 + struct flow_cls_offload *cls_flower, int tc) 6670 + { 6671 + u32 prio = cls_flower->common.prio; 6672 + 6673 + if (tc < 0 || tc > hdev->tc_max) { 6674 + dev_err(&hdev->pdev->dev, "invalid traffic class\n"); 6675 + return -EINVAL; 6676 + } 6677 + 6678 + if (prio == 0 || 6679 + prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 6680 + dev_err(&hdev->pdev->dev, 6681 + "prio %u should be in range[1, %u]\n", 6682 + prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); 6683 + return -EINVAL; 6684 + } 6685 + 6686 + if (test_bit(prio - 1, hdev->fd_bmap)) { 6687 + dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); 6688 + return -EINVAL; 6689 + } 6690 + return 0; 6691 + } 6692 + 6693 + static int hclge_add_cls_flower(struct hnae3_handle *handle, 6694 + struct flow_cls_offload *cls_flower, 6695 + int tc) 6696 + { 6697 + struct hclge_vport *vport = hclge_get_vport(handle); 6698 + struct hclge_dev *hdev = vport->back; 6699 + struct hclge_fd_rule *rule; 6700 + int ret; 6701 + 6702 + if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { 6703 + dev_err(&hdev->pdev->dev, 6704 + "please remove all exist fd rules via ethtool first\n"); 6705 + return -EINVAL; 6706 + } 6707 + 6708 + ret = hclge_check_cls_flower(hdev, cls_flower, tc); 6709 + if (ret) { 6710 + dev_err(&hdev->pdev->dev, 6711 + "failed to check cls flower params, ret = %d\n", ret); 6712 + return ret; 6713 + } 6714 + 6715 + rule = kzalloc(sizeof(*rule), GFP_KERNEL); 6716 + if (!rule) 6717 + return -ENOMEM; 6718 + 6719 + ret = hclge_parse_cls_flower(hdev, cls_flower, rule); 6720 + if (ret) 6721 + goto err; 6722 + 6723 + rule->action = HCLGE_FD_ACTION_SELECT_TC; 6724 + rule->cls_flower.tc = tc; 6725 + rule->location = cls_flower->common.prio - 1; 6726 + rule->vf_id = 0; 6727 + rule->cls_flower.cookie = cls_flower->cookie; 6728 + rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; 6729 + 6730 + spin_lock_bh(&hdev->fd_rule_lock); 6731 + hclge_clear_arfs_rules(handle); 6732 + 6733 + ret = hclge_fd_config_rule(hdev, rule); 6734 + 6735 + spin_unlock_bh(&hdev->fd_rule_lock); 6736 + 6737 + if (ret) { 6738 + dev_err(&hdev->pdev->dev, 6739 + "failed to add cls flower rule, ret = %d\n", ret); 6740 + goto err; 6741 + } 6742 + 6743 + return 0; 6744 + err: 6745 + kfree(rule); 6746 + return ret; 6747 + } 6748 + 6749 + static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, 6750 + unsigned long cookie) 6751 + { 6752 + struct hclge_fd_rule *rule; 6753 + struct hlist_node *node; 6754 + 6755 + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 6756 + if (rule->cls_flower.cookie == cookie) 6757 + return rule; 6758 + } 6759 + 6760 + return NULL; 6761 + } 6762 + 6763 + static int hclge_del_cls_flower(struct hnae3_handle *handle, 6764 + struct flow_cls_offload *cls_flower) 6765 + { 6766 + struct hclge_vport *vport = hclge_get_vport(handle); 6767 + struct hclge_dev *hdev = vport->back; 6768 + struct hclge_fd_rule *rule; 6769 + int ret; 6770 + 6771 + spin_lock_bh(&hdev->fd_rule_lock); 6772 + 6773 + rule = hclge_find_cls_flower(hdev, cls_flower->cookie); 6774 + if (!rule) { 6775 + spin_unlock_bh(&hdev->fd_rule_lock); 6776 + return -EINVAL; 6777 + } 6778 + 6779 + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, 6780 + NULL, false); 6781 + if (ret) { 6782 + dev_err(&hdev->pdev->dev, 6783 + "failed to delete cls flower rule %u, ret = %d\n", 6784 + rule->location, ret); 6785 + spin_unlock_bh(&hdev->fd_rule_lock); 6786 + return ret; 6787 + } 6788 + 6789 + ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false); 6790 + if (ret) { 6791 + dev_err(&hdev->pdev->dev, 6792 + "failed to delete cls flower rule %u in list, ret = %d\n", 6793 + rule->location, ret); 6794 + spin_unlock_bh(&hdev->fd_rule_lock); 6795 + return ret; 6796 + } 6797 + 6798 + spin_unlock_bh(&hdev->fd_rule_lock); 6799 + 6800 + return 0; 6588 6801 } 6589 6802 6590 6803 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) ··· 11041 10694 11042 10695 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 11043 10696 { 11044 - struct hnae3_knic_private_info *kinfo = &handle->kinfo; 11045 10697 struct hclge_vport *vport = hclge_get_vport(handle); 11046 10698 struct hclge_dev *hdev = vport->back; 11047 10699 11048 - return min_t(u32, hdev->rss_size_max, 11049 - vport->alloc_tqps / kinfo->num_tc); 10700 + return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); 11050 10701 } 11051 10702 11052 10703 static void hclge_get_channels(struct hnae3_handle *handle, ··· 11063 10718 struct hclge_dev *hdev = vport->back; 11064 10719 11065 10720 *alloc_tqps = vport->alloc_tqps; 11066 - *max_rss_size = hdev->rss_size_max; 10721 + *max_rss_size = hdev->pf_rss_size_max; 11067 10722 } 11068 10723 11069 10724 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, ··· 11131 10786 dev_info(&hdev->pdev->dev, 11132 10787 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 11133 10788 cur_rss_size, kinfo->rss_size, 11134 - cur_tqps, kinfo->rss_size * kinfo->num_tc); 10789 + cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 11135 10790 11136 10791 return ret; 11137 10792 } ··· 11864 11519 .set_vf_mac = hclge_set_vf_mac, 11865 11520 .get_module_eeprom = hclge_get_module_eeprom, 11866 11521 .get_cmdq_stat = hclge_get_cmdq_stat, 11522 + .add_cls_flower = hclge_add_cls_flower, 11523 + .del_cls_flower = hclge_del_cls_flower, 11524 + .cls_flower_active = hclge_is_cls_flower_active, 11867 11525 }; 11868 11526 11869 11527 static struct hnae3_ae_algo ae_algo = {
+21 -7
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
··· 348 348 u8 tc_num; 349 349 u16 tqp_desc_num; 350 350 u16 rx_buf_len; 351 - u16 rss_size_max; 351 + u16 vf_rss_size_max; 352 + u16 pf_rss_size_max; 352 353 u8 phy_addr; 353 354 u8 media_type; 354 355 u8 mac_addr[ETH_ALEN]; ··· 565 564 HCLGE_FD_RULE_NONE, 566 565 HCLGE_FD_ARFS_ACTIVE, 567 566 HCLGE_FD_EP_ACTIVE, 567 + HCLGE_FD_TC_FLOWER_ACTIVE, 568 568 }; 569 569 570 570 enum HCLGE_FD_PACKET_TYPE { ··· 574 572 }; 575 573 576 574 enum HCLGE_FD_ACTION { 577 - HCLGE_FD_ACTION_ACCEPT_PACKET, 575 + HCLGE_FD_ACTION_SELECT_QUEUE, 578 576 HCLGE_FD_ACTION_DROP_PACKET, 577 + HCLGE_FD_ACTION_SELECT_TC, 579 578 }; 580 579 581 580 struct hclge_fd_key_cfg { ··· 621 618 struct hclge_fd_rule_tuples tuples_mask; 622 619 u32 unused_tuple; 623 620 u32 flow_type; 624 - u8 action; 625 - u16 vf_id; 621 + union { 622 + struct { 623 + unsigned long cookie; 624 + u8 tc; 625 + } cls_flower; 626 + struct { 627 + u16 flow_id; /* only used for arfs */ 628 + } arfs; 629 + }; 626 630 u16 queue_id; 631 + u16 vf_id; 627 632 u16 location; 628 - u16 flow_id; /* only used for arfs */ 629 633 enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type; 634 + u8 action; 630 635 }; 631 636 632 637 struct hclge_fd_ad_data { ··· 648 637 u8 write_rule_id_to_bd; 649 638 u8 next_input_key; 650 639 u16 rule_id; 640 + u16 tc_size; 641 + u8 override_tc; 651 642 }; 652 643 653 644 enum HCLGE_MAC_NODE_STATE { ··· 758 745 759 746 u16 base_tqp_pid; /* Base task tqp physical id of this PF */ 760 747 u16 alloc_rss_size; /* Allocated RSS task queue */ 761 - u16 rss_size_max; /* HW defined max RSS task queue */ 748 + u16 vf_rss_size_max; /* HW defined VF max RSS task queue */ 749 + u16 pf_rss_size_max; /* HW defined PF max RSS task queue */ 762 750 763 751 u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ 764 752 u16 num_alloc_vport; /* Num vports this driver supports */ ··· 920 906 921 907 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ 922 908 /* User configured lookup table entries */ 923 - u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; 909 + u16 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; 924 910 int rss_algo; /* User configured hash algorithm */ 925 911 /* User configured rss tuple sets */ 926 912 struct hclge_rss_tuple_cfg rss_tuple_sets;
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
··· 414 414 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 415 415 unsigned int i; 416 416 417 - for (i = 0; i < kinfo->num_tc; i++) 417 + for (i = 0; i < kinfo->tc_info.num_tc; i++) 418 418 resp_msg->data[0] |= BIT(i); 419 419 420 420 resp_msg->len = sizeof(u8);
+71 -27
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
··· 565 565 HCLGE_SHAPER_BS_U_DEF, 566 566 HCLGE_SHAPER_BS_S_DEF); 567 567 568 - for (i = 0; i < kinfo->num_tc; i++) { 568 + for (i = 0; i < kinfo->tc_info.num_tc; i++) { 569 569 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, 570 570 false); 571 571 ··· 589 589 return 0; 590 590 } 591 591 592 + static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport) 593 + { 594 + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 595 + struct hnae3_tc_info *tc_info = &kinfo->tc_info; 596 + struct hclge_dev *hdev = vport->back; 597 + u16 max_rss_size = 0; 598 + int i; 599 + 600 + if (!tc_info->mqprio_active) 601 + return vport->alloc_tqps / tc_info->num_tc; 602 + 603 + for (i = 0; i < HNAE3_MAX_TC; i++) { 604 + if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc) 605 + continue; 606 + if (max_rss_size < tc_info->tqp_count[i]) 607 + max_rss_size = tc_info->tqp_count[i]; 608 + } 609 + 610 + return max_rss_size; 611 + } 612 + 613 + static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport) 614 + { 615 + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 616 + struct hnae3_tc_info *tc_info = &kinfo->tc_info; 617 + struct hclge_dev *hdev = vport->back; 618 + int sum = 0; 619 + int i; 620 + 621 + if (!tc_info->mqprio_active) 622 + return kinfo->rss_size * tc_info->num_tc; 623 + 624 + for (i = 0; i < HNAE3_MAX_TC; i++) { 625 + if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc) 626 + sum += tc_info->tqp_count[i]; 627 + } 628 + 629 + return sum; 630 + } 631 + 592 632 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) 593 633 { 594 634 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 595 635 struct hclge_dev *hdev = vport->back; 636 + u16 vport_max_rss_size; 596 637 u16 max_rss_size; 597 638 u8 i; 598 639 599 640 /* TC configuration is shared by PF/VF in one port, only allow 600 641 * one tc for VF for simplicity. VF's vport_id is non zero. 601 642 */ 602 - kinfo->num_tc = vport->vport_id ? 1 : 643 + kinfo->tc_info.num_tc = vport->vport_id ? 1 : 603 644 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); 604 645 vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) + 605 646 (vport->vport_id ? (vport->vport_id - 1) : 0); 606 647 607 - max_rss_size = min_t(u16, hdev->rss_size_max, 608 - vport->alloc_tqps / kinfo->num_tc); 648 + vport_max_rss_size = vport->vport_id ? hdev->vf_rss_size_max : 649 + hdev->pf_rss_size_max; 650 + max_rss_size = min_t(u16, vport_max_rss_size, 651 + hclge_vport_get_max_rss_size(vport)); 609 652 610 653 /* Set to user value, no larger than max_rss_size. */ 611 654 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && ··· 665 622 if (!kinfo->req_rss_size) 666 623 max_rss_size = min_t(u16, max_rss_size, 667 624 (hdev->num_nic_msi - 1) / 668 - kinfo->num_tc); 625 + kinfo->tc_info.num_tc); 669 626 670 627 /* Set to the maximum specification value (max_rss_size). */ 671 628 kinfo->rss_size = max_rss_size; 672 629 } 673 630 674 - kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 631 + kinfo->num_tqps = hclge_vport_get_tqp_num(vport); 675 632 vport->dwrr = 100; /* 100 percent as init */ 676 633 vport->alloc_rss_size = kinfo->rss_size; 677 634 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; 678 635 636 + /* when enable mqprio, the tc_info has been updated. */ 637 + if (kinfo->tc_info.mqprio_active) 638 + return; 639 + 679 640 for (i = 0; i < HNAE3_MAX_TC; i++) { 680 - if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) { 681 - kinfo->tc_info[i].enable = true; 682 - kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 683 - kinfo->tc_info[i].tqp_count = kinfo->rss_size; 684 - kinfo->tc_info[i].tc = i; 641 + if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { 642 + set_bit(i, &kinfo->tc_info.tc_en); 643 + kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size; 644 + kinfo->tc_info.tqp_count[i] = kinfo->rss_size; 685 645 } else { 686 646 /* Set to default queue if TC is disable */ 687 - kinfo->tc_info[i].enable = false; 688 - kinfo->tc_info[i].tqp_offset = 0; 689 - kinfo->tc_info[i].tqp_count = 1; 690 - kinfo->tc_info[i].tc = 0; 647 + clear_bit(i, &kinfo->tc_info.tc_en); 648 + kinfo->tc_info.tqp_offset[i] = 0; 649 + kinfo->tc_info.tqp_count[i] = 1; 691 650 } 692 651 } 693 652 694 - memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, 695 - sizeof_field(struct hnae3_knic_private_info, prio_tc)); 653 + memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc, 654 + sizeof_field(struct hnae3_tc_info, prio_tc)); 696 655 } 697 656 698 657 static void hclge_tm_vport_info_update(struct hclge_dev *hdev) ··· 899 854 struct hclge_vport *vport) 900 855 { 901 856 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 857 + struct hnae3_tc_info *tc_info = &kinfo->tc_info; 902 858 struct hnae3_queue **tqp = kinfo->tqp; 903 - struct hnae3_tc_info *v_tc_info; 904 859 u32 i, j; 905 860 int ret; 906 861 907 - for (i = 0; i < kinfo->num_tc; i++) { 908 - v_tc_info = &kinfo->tc_info[i]; 909 - for (j = 0; j < v_tc_info->tqp_count; j++) { 910 - struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j]; 862 + for (i = 0; i < tc_info->num_tc; i++) { 863 + for (j = 0; j < tc_info->tqp_count[i]; j++) { 864 + struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j]; 911 865 912 866 ret = hclge_tm_q_to_qs_map_cfg(hdev, 913 867 hclge_get_queue_id(q), ··· 931 887 struct hnae3_knic_private_info *kinfo = 932 888 &vport[k].nic.kinfo; 933 889 934 - for (i = 0; i < kinfo->num_tc; i++) { 890 + for (i = 0; i < kinfo->tc_info.num_tc; i++) { 935 891 ret = hclge_tm_qs_to_pri_map_cfg( 936 892 hdev, vport[k].qs_offset + i, i); 937 893 if (ret) ··· 1045 1001 u32 i; 1046 1002 int ret; 1047 1003 1048 - for (i = 0; i < kinfo->num_tc; i++) { 1004 + for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1049 1005 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, 1050 1006 HCLGE_SHAPER_LVL_QSET, 1051 1007 &ir_para, max_tm_rate); ··· 1167 1123 return ret; 1168 1124 1169 1125 /* Qset dwrr */ 1170 - for (i = 0; i < kinfo->num_tc; i++) { 1126 + for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1171 1127 ret = hclge_tm_qs_weight_cfg( 1172 1128 hdev, vport->qs_offset + i, 1173 1129 hdev->tm_info.pg_info[0].tc_dwrr[i]); ··· 1298 1254 if (ret) 1299 1255 return ret; 1300 1256 1301 - for (i = 0; i < kinfo->num_tc; i++) { 1257 + for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1302 1258 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; 1303 1259 1304 1260 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, ··· 1528 1484 1529 1485 for (k = 0; k < hdev->num_alloc_vport; k++) { 1530 1486 kinfo = &vport[k].nic.kinfo; 1531 - kinfo->prio_tc[i] = prio_tc[i]; 1487 + kinfo->tc_info.prio_tc[i] = prio_tc[i]; 1532 1488 } 1533 1489 } 1534 1490 }
+11 -10
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
··· 433 433 struct hnae3_knic_private_info *kinfo; 434 434 u16 new_tqps = hdev->num_tqps; 435 435 unsigned int i; 436 + u8 num_tc = 0; 436 437 437 438 kinfo = &nic->kinfo; 438 - kinfo->num_tc = 0; 439 439 kinfo->num_tx_desc = hdev->num_tx_desc; 440 440 kinfo->num_rx_desc = hdev->num_rx_desc; 441 441 kinfo->rx_buf_len = hdev->rx_buf_len; 442 442 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) 443 443 if (hdev->hw_tc_map & BIT(i)) 444 - kinfo->num_tc++; 444 + num_tc++; 445 445 446 - kinfo->rss_size 447 - = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); 448 - new_tqps = kinfo->rss_size * kinfo->num_tc; 446 + num_tc = num_tc ? num_tc : 1; 447 + kinfo->tc_info.num_tc = num_tc; 448 + kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 449 + new_tqps = kinfo->rss_size * num_tc; 449 450 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 450 451 451 452 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, ··· 464 463 * and rss size with the actual vector numbers 465 464 */ 466 465 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 467 - kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, 466 + kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 468 467 kinfo->rss_size); 469 468 470 469 return 0; ··· 3361 3360 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3362 3361 3363 3362 return min_t(u32, hdev->rss_size_max, 3364 - hdev->num_tqps / kinfo->num_tc); 3363 + hdev->num_tqps / kinfo->tc_info.num_tc); 3365 3364 } 3366 3365 3367 3366 /** ··· 3404 3403 kinfo->req_rss_size = new_tqps_num; 3405 3404 3406 3405 max_rss_size = min_t(u16, hdev->rss_size_max, 3407 - hdev->num_tqps / kinfo->num_tc); 3406 + hdev->num_tqps / kinfo->tc_info.num_tc); 3408 3407 3409 3408 /* Use the user's configuration when it is not larger than 3410 3409 * max_rss_size, otherwise, use the maximum specification value. ··· 3416 3415 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3417 3416 kinfo->rss_size = max_rss_size; 3418 3417 3419 - kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 3418 + kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 3420 3419 } 3421 3420 3422 3421 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, ··· 3462 3461 dev_info(&hdev->pdev->dev, 3463 3462 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3464 3463 cur_rss_size, kinfo->rss_size, 3465 - cur_tqps, kinfo->rss_size * kinfo->num_tc); 3464 + cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 3466 3465 3467 3466 return ret; 3468 3467 }