Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'hns3-next'

Peng Li says:

====================
fix some bugs for hns3 driver

This patchset fix some bugs for hns3 driver.
[Patch 1/6 - Patch 3/6] fix bugs related about VF driver.
[Patch 3/6 - Patch 6/6] fix the bugs about ethtool_ops.set_channels.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+54 -44
+13 -14
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 249 249 return 0; 250 250 } 251 251 252 + static u16 hns3_get_max_available_channels(struct hnae3_handle *h) 253 + { 254 + u16 free_tqps, max_rss_size, max_tqps; 255 + 256 + h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); 257 + max_tqps = h->kinfo.num_tc * max_rss_size; 258 + 259 + return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); 260 + } 261 + 252 262 static int hns3_nic_net_up(struct net_device *netdev) 253 263 { 254 264 struct hns3_nic_priv *priv = netdev_priv(netdev); ··· 3023 3013 int ret; 3024 3014 3025 3015 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), 3026 - handle->kinfo.num_tqps); 3016 + hns3_get_max_available_channels(handle)); 3027 3017 if (!netdev) 3028 3018 return -ENOMEM; 3029 3019 ··· 3346 3336 return ret; 3347 3337 } 3348 3338 3349 - static u16 hns3_get_max_available_channels(struct net_device *netdev) 3350 - { 3351 - struct hnae3_handle *h = hns3_get_handle(netdev); 3352 - u16 free_tqps, max_rss_size, max_tqps; 3353 - 3354 - h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); 3355 - max_tqps = h->kinfo.num_tc * max_rss_size; 3356 - 3357 - return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); 3358 - } 3359 - 3360 3339 static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) 3361 3340 { 3362 3341 struct hns3_nic_priv *priv = netdev_priv(netdev); ··· 3396 3397 if (ch->rx_count || ch->tx_count) 3397 3398 return -EINVAL; 3398 3399 3399 - if (new_tqp_num > hns3_get_max_available_channels(netdev) || 3400 + if (new_tqp_num > hns3_get_max_available_channels(h) || 3400 3401 new_tqp_num < kinfo->num_tc) { 3401 3402 dev_err(&netdev->dev, 3402 3403 "Change tqps fail, the tqp range is from %d to %d", 3403 3404 kinfo->num_tc, 3404 - hns3_get_max_available_channels(netdev)); 3405 + hns3_get_max_available_channels(h)); 3405 3406 return -EINVAL; 3406 3407 } 3407 3408
+24 -26
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 3717 3717 { 3718 3718 struct hclge_vport *vport = hclge_get_vport(handle); 3719 3719 struct hclge_dev *hdev = vport->back; 3720 - int i, queue_id, ret; 3720 + int i, ret; 3721 3721 3722 - for (i = 0; i < vport->alloc_tqps; i++) { 3723 - /* todo clear interrupt */ 3724 - /* ring enable */ 3725 - queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); 3726 - if (queue_id < 0) { 3727 - dev_warn(&hdev->pdev->dev, 3728 - "Get invalid queue id, ignore it\n"); 3729 - continue; 3730 - } 3722 + for (i = 0; i < vport->alloc_tqps; i++) 3723 + hclge_tqp_enable(hdev, i, 0, true); 3731 3724 3732 - hclge_tqp_enable(hdev, queue_id, 0, true); 3733 - } 3734 3725 /* mac enable */ 3735 3726 hclge_cfg_mac_mode(hdev, true); 3736 3727 clear_bit(HCLGE_STATE_DOWN, &hdev->state); ··· 3741 3750 { 3742 3751 struct hclge_vport *vport = hclge_get_vport(handle); 3743 3752 struct hclge_dev *hdev = vport->back; 3744 - int i, queue_id; 3753 + int i; 3745 3754 3746 - for (i = 0; i < vport->alloc_tqps; i++) { 3747 - /* Ring disable */ 3748 - queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); 3749 - if (queue_id < 0) { 3750 - dev_warn(&hdev->pdev->dev, 3751 - "Get invalid queue id, ignore it\n"); 3752 - continue; 3753 - } 3755 + for (i = 0; i < vport->alloc_tqps; i++) 3756 + hclge_tqp_enable(hdev, i, 0, false); 3754 3757 3755 - hclge_tqp_enable(hdev, queue_id, 0, false); 3756 - } 3757 3758 /* Mac disable */ 3758 3759 hclge_cfg_mac_mode(hdev, false); 3759 3760 ··· 4831 4848 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 4832 4849 } 4833 4850 4851 + static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, 4852 + u16 queue_id) 4853 + { 4854 + struct hnae3_queue *queue; 4855 + struct hclge_tqp *tqp; 4856 + 4857 + queue = handle->kinfo.tqp[queue_id]; 4858 + tqp = container_of(queue, struct hclge_tqp, q); 4859 + 4860 + return tqp->index; 4861 + } 4862 + 4834 4863 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 4835 4864 { 4836 4865 struct hclge_vport *vport = hclge_get_vport(handle); 4837 4866 struct hclge_dev *hdev = vport->back; 4838 4867 int reset_try_times = 0; 4839 4868 int reset_status; 4869 + u16 queue_gid; 4840 4870 int ret; 4871 + 4872 + queue_gid = hclge_covert_handle_qid_global(handle, queue_id); 4841 4873 4842 4874 ret = hclge_tqp_enable(hdev, queue_id, 0, false); 4843 4875 if (ret) { ··· 4860 4862 return; 4861 4863 } 4862 4864 4863 - ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true); 4865 + ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 4864 4866 if (ret) { 4865 4867 dev_warn(&hdev->pdev->dev, 4866 4868 "Send reset tqp cmd fail, ret = %d\n", ret); ··· 4871 4873 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 4872 4874 /* Wait for tqp hw reset */ 4873 4875 msleep(20); 4874 - reset_status = hclge_get_reset_status(hdev, queue_id); 4876 + reset_status = hclge_get_reset_status(hdev, queue_gid); 4875 4877 if (reset_status) 4876 4878 break; 4877 4879 } ··· 4881 4883 return; 4882 4884 } 4883 4885 4884 - ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false); 4886 + ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 4885 4887 if (ret) { 4886 4888 dev_warn(&hdev->pdev->dev, 4887 4889 "Deassert the soft reset fail, ret = %d\n", ret);
+6 -4
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
··· 291 291 292 292 /* get the queue related info */ 293 293 memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16)); 294 - memcpy(&resp_data[2], &hdev->rss_size_max, sizeof(u16)); 294 + memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16)); 295 295 memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16)); 296 296 memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16)); 297 297 ··· 333 333 struct hclge_mbx_vf_to_pf_cmd *req; 334 334 struct hclge_vport *vport; 335 335 struct hclge_desc *desc; 336 - int ret; 336 + int ret, flag; 337 337 338 + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); 338 339 /* handle all the mailbox requests in the queue */ 339 - while (hnae_get_bit(crq->desc[crq->next_to_use].flag, 340 - HCLGE_CMDQ_RX_OUTVLD_B)) { 340 + while (hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B)) { 341 341 desc = &crq->desc[crq->next_to_use]; 342 342 req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; 343 343 ··· 410 410 req->msg[0]); 411 411 break; 412 412 } 413 + crq->desc[crq->next_to_use].flag = 0; 413 414 hclge_mbx_ring_ptr_move_crq(crq); 415 + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); 414 416 } 415 417 416 418 /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+10
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
··· 1447 1447 ch->combined_count = hdev->num_tqps; 1448 1448 } 1449 1449 1450 + static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 1451 + u16 *free_tqps, u16 *max_rss_size) 1452 + { 1453 + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1454 + 1455 + *free_tqps = 0; 1456 + *max_rss_size = hdev->rss_size_max; 1457 + } 1458 + 1450 1459 static const struct hnae3_ae_ops hclgevf_ops = { 1451 1460 .init_ae_dev = hclgevf_init_ae_dev, 1452 1461 .uninit_ae_dev = hclgevf_uninit_ae_dev, ··· 1486 1477 .get_fw_version = hclgevf_get_fw_version, 1487 1478 .set_vlan_filter = hclgevf_set_vlan_filter, 1488 1479 .get_channels = hclgevf_get_channels, 1480 + .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 1489 1481 }; 1490 1482 1491 1483 static struct hnae3_ae_algo ae_algovf = {
+1
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
··· 171 171 req->msg[0]); 172 172 break; 173 173 } 174 + crq->desc[crq->next_to_use].flag = 0; 174 175 hclge_mbx_ring_ptr_move_crq(crq); 175 176 flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); 176 177 }