Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'hns3-dcb'

Yunsheng Lin says:

====================
Add support for DCB feature in hns3 driver

The patchset contains some enhancement related to DCB before
adding support for DCB feature.

This patchset depends on the following patchset:
https://patchwork.ozlabs.org/cover/815646/
https://patchwork.ozlabs.org/cover/816145/

High Level Architecture:

[ lldpad ]
|
|
|
[ hns3_dcbnl ]
|
|
|
[ hclge_dcb ]
/ \
/ \
/ \
[ hclge_main ] [ hclge_tm ]

Current patch-set support following functionality:
Use of lldptool to configure the tc schedule mode, tc
bandwidth(if schedule mode is ETS), prio_tc_map and
PFC parameter.

V3: Drop mqprio support

V2: Fix for not defining variables in local loop.

V1: Initial Submit.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+927 -119
+9
drivers/net/ethernet/hisilicon/Kconfig
··· 103 103 family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 104 104 devices and their associated operations. 105 105 106 + config HNS3_DCB 107 + bool "Hisilicon HNS3 Data Center Bridge Support" 108 + default n 109 + depends on HNS3 && HNS3_HCLGE && DCB 110 + ---help--- 111 + Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver. 112 + 113 + If unsure, say N. 114 + 106 115 endif # NET_VENDOR_HISILICON
+17
drivers/net/ethernet/hisilicon/hns3/hnae3.h
··· 28 28 */ 29 29 30 30 #include <linux/acpi.h> 31 + #include <linux/dcbnl.h> 31 32 #include <linux/delay.h> 32 33 #include <linux/device.h> 33 34 #include <linux/module.h> ··· 132 131 int (*init_instance)(struct hnae3_handle *handle); 133 132 void (*uninit_instance)(struct hnae3_handle *handle, bool reset); 134 133 void (*link_status_change)(struct hnae3_handle *handle, bool state); 134 + int (*setup_tc)(struct hnae3_handle *handle, u8 tc); 135 135 }; 136 136 137 137 #define HNAE3_CLIENT_NAME_LENGTH 16 ··· 365 363 u16 vlan, u8 qos, __be16 proto); 366 364 }; 367 365 366 + struct hnae3_dcb_ops { 367 + /* IEEE 802.1Qaz std */ 368 + int (*ieee_getets)(struct hnae3_handle *, struct ieee_ets *); 369 + int (*ieee_setets)(struct hnae3_handle *, struct ieee_ets *); 370 + int (*ieee_getpfc)(struct hnae3_handle *, struct ieee_pfc *); 371 + int (*ieee_setpfc)(struct hnae3_handle *, struct ieee_pfc *); 372 + 373 + /* DCBX configuration */ 374 + u8 (*getdcbx)(struct hnae3_handle *); 375 + u8 (*setdcbx)(struct hnae3_handle *, u8); 376 + 377 + int (*map_update)(struct hnae3_handle *); 378 + }; 379 + 368 380 struct hnae3_ae_algo { 369 381 const struct hnae3_ae_ops *ops; 370 382 struct list_head node; ··· 410 394 411 395 u16 num_tqps; /* total number of TQPs in this handle */ 412 396 struct hnae3_queue **tqp; /* array base of all TQPs in this instance */ 397 + const struct hnae3_dcb_ops *dcb_ops; 413 398 }; 414 399 415 400 struct hnae3_roce_private_info {
+4
drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
··· 7 7 obj-$(CONFIG_HNS3_HCLGE) += hclge.o 8 8 hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o 9 9 10 + hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o 11 + 10 12 obj-$(CONFIG_HNS3_ENET) += hns3.o 11 13 hns3-objs = hns3_enet.o hns3_ethtool.o 14 + 15 + hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
+6
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
··· 311 311 struct hclge_priv_buf { 312 312 struct hclge_waterline wl; /* Waterline for low and high*/ 313 313 u32 buf_size; /* TC private buffer size */ 314 + u32 tx_buf_size; 314 315 u32 enable; /* Enable TC private buffer or not */ 315 316 }; 316 317 ··· 320 319 struct hclge_waterline self; 321 320 struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; 322 321 u32 buf_size; 322 + }; 323 + 324 + struct hclge_pkt_buf_alloc { 325 + struct hclge_priv_buf priv_buf[HCLGE_MAX_TC_NUM]; 326 + struct hclge_shared_buf s_buf; 323 327 }; 324 328 325 329 #define HCLGE_RX_COM_WL_EN_B 15
+304
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
··· 1 + /* 2 + * Copyright (c) 2016-2017 Hisilicon Limited. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + */ 9 + 10 + #include "hclge_main.h" 11 + #include "hclge_tm.h" 12 + #include "hnae3.h" 13 + 14 + #define BW_PERCENT 100 15 + 16 + static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev, 17 + struct ieee_ets *ets) 18 + { 19 + u8 i; 20 + 21 + for (i = 0; i < HNAE3_MAX_TC; i++) { 22 + switch (ets->tc_tsa[i]) { 23 + case IEEE_8021QAZ_TSA_STRICT: 24 + hdev->tm_info.tc_info[i].tc_sch_mode = 25 + HCLGE_SCH_MODE_SP; 26 + hdev->tm_info.pg_info[0].tc_dwrr[i] = 0; 27 + break; 28 + case IEEE_8021QAZ_TSA_ETS: 29 + hdev->tm_info.tc_info[i].tc_sch_mode = 30 + HCLGE_SCH_MODE_DWRR; 31 + hdev->tm_info.pg_info[0].tc_dwrr[i] = 32 + ets->tc_tx_bw[i]; 33 + break; 34 + default: 35 + /* Hardware only supports SP (strict priority) 36 + * or ETS (enhanced transmission selection) 37 + * algorithms, if we receive some other value 38 + * from dcbnl, then throw an error. 39 + */ 40 + return -EINVAL; 41 + } 42 + } 43 + 44 + return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); 45 + } 46 + 47 + static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev, 48 + struct ieee_ets *ets) 49 + { 50 + u32 i; 51 + 52 + memset(ets, 0, sizeof(*ets)); 53 + ets->willing = 1; 54 + ets->ets_cap = hdev->tc_max; 55 + 56 + for (i = 0; i < HNAE3_MAX_TC; i++) { 57 + ets->prio_tc[i] = hdev->tm_info.prio_tc[i]; 58 + ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; 59 + 60 + if (hdev->tm_info.tc_info[i].tc_sch_mode == 61 + HCLGE_SCH_MODE_SP) 62 + ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT; 63 + else 64 + ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; 65 + } 66 + } 67 + 68 + /* IEEE std */ 69 + static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) 70 + { 71 + struct hclge_vport *vport = hclge_get_vport(h); 72 + struct hclge_dev *hdev = vport->back; 73 + 74 + hclge_tm_info_to_ieee_ets(hdev, ets); 75 + 76 + return 0; 77 + } 78 + 79 + static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, 80 + u8 *tc, bool *changed) 81 + { 82 + u32 total_ets_bw = 0; 83 + u8 max_tc = 0; 84 + u8 i; 85 + 86 + for (i = 0; i < HNAE3_MAX_TC; i++) { 87 + if (ets->prio_tc[i] >= hdev->tc_max || 88 + i >= hdev->tc_max) 89 + return -EINVAL; 90 + 91 + if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) 92 + *changed = true; 93 + 94 + if (ets->prio_tc[i] > max_tc) 95 + max_tc = ets->prio_tc[i]; 96 + 97 + switch (ets->tc_tsa[i]) { 98 + case IEEE_8021QAZ_TSA_STRICT: 99 + if (hdev->tm_info.tc_info[i].tc_sch_mode != 100 + HCLGE_SCH_MODE_SP) 101 + *changed = true; 102 + break; 103 + case IEEE_8021QAZ_TSA_ETS: 104 + if (hdev->tm_info.tc_info[i].tc_sch_mode != 105 + HCLGE_SCH_MODE_DWRR) 106 + *changed = true; 107 + 108 + total_ets_bw += ets->tc_tx_bw[i]; 109 + break; 110 + default: 111 + return -EINVAL; 112 + } 113 + } 114 + 115 + if (total_ets_bw != BW_PERCENT) 116 + return -EINVAL; 117 + 118 + *tc = max_tc + 1; 119 + if (*tc != hdev->tm_info.num_tc) 120 + *changed = true; 121 + 122 + return 0; 123 + } 124 + 125 + static int hclge_map_update(struct hnae3_handle *h) 126 + { 127 + struct hclge_vport *vport = hclge_get_vport(h); 128 + struct hclge_dev *hdev = vport->back; 129 + int ret; 130 + 131 + ret = hclge_tm_map_cfg(hdev); 132 + if (ret) 133 + return ret; 134 + 135 + ret = hclge_tm_schd_mode_hw(hdev); 136 + if (ret) 137 + return ret; 138 + 139 + ret = hclge_pause_setup_hw(hdev); 140 + if (ret) 141 + return ret; 142 + 143 + ret = hclge_buffer_alloc(hdev); 144 + if (ret) 145 + return ret; 146 + 147 + return hclge_rss_init_hw(hdev); 148 + } 149 + 150 + static int hclge_client_setup_tc(struct hclge_dev *hdev) 151 + { 152 + struct hclge_vport *vport = hdev->vport; 153 + struct hnae3_client *client; 154 + struct hnae3_handle *handle; 155 + int ret; 156 + u32 i; 157 + 158 + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 159 + handle = &vport[i].nic; 160 + client = handle->client; 161 + 162 + if (!client || !client->ops || !client->ops->setup_tc) 163 + continue; 164 + 165 + ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc); 166 + if (ret) 167 + return ret; 168 + } 169 + 170 + return 0; 171 + } 172 + 173 + static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) 174 + { 175 + struct hclge_vport *vport = hclge_get_vport(h); 176 + struct hclge_dev *hdev = vport->back; 177 + bool map_changed = false; 178 + u8 num_tc = 0; 179 + int ret; 180 + 181 + if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) 182 + return -EINVAL; 183 + 184 + ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed); 185 + if (ret) 186 + return ret; 187 + 188 + hclge_tm_schd_info_update(hdev, num_tc); 189 + 190 + ret = hclge_ieee_ets_to_tm_info(hdev, ets); 191 + if (ret) 192 + return ret; 193 + 194 + if (map_changed) { 195 + ret = hclge_client_setup_tc(hdev); 196 + if (ret) 197 + return ret; 198 + } 199 + 200 + return hclge_tm_dwrr_cfg(hdev); 201 + } 202 + 203 + static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) 204 + { 205 + struct hclge_vport *vport = hclge_get_vport(h); 206 + struct hclge_dev *hdev = vport->back; 207 + u8 i, j, pfc_map, *prio_tc; 208 + 209 + memset(pfc, 0, sizeof(*pfc)); 210 + pfc->pfc_cap = hdev->pfc_max; 211 + prio_tc = hdev->tm_info.prio_tc; 212 + pfc_map = hdev->tm_info.hw_pfc_map; 213 + 214 + /* Pfc setting is based on TC */ 215 + for (i = 0; i < hdev->tm_info.num_tc; i++) { 216 + for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { 217 + if ((prio_tc[j] == i) && (pfc_map & BIT(i))) 218 + pfc->pfc_en |= BIT(j); 219 + } 220 + } 221 + 222 + return 0; 223 + } 224 + 225 + static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) 226 + { 227 + struct hclge_vport *vport = hclge_get_vport(h); 228 + struct hclge_dev *hdev = vport->back; 229 + u8 i, j, pfc_map, *prio_tc; 230 + 231 + if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) 232 + return -EINVAL; 233 + 234 + prio_tc = hdev->tm_info.prio_tc; 235 + pfc_map = 0; 236 + 237 + for (i = 0; i < hdev->tm_info.num_tc; i++) { 238 + for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { 239 + if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) { 240 + pfc_map |= BIT(i); 241 + break; 242 + } 243 + } 244 + } 245 + 246 + if (pfc_map == hdev->tm_info.hw_pfc_map) 247 + return 0; 248 + 249 + hdev->tm_info.hw_pfc_map = pfc_map; 250 + 251 + return hclge_pause_setup_hw(hdev); 252 + } 253 + 254 + /* DCBX configuration */ 255 + static u8 hclge_getdcbx(struct hnae3_handle *h) 256 + { 257 + struct hclge_vport *vport = hclge_get_vport(h); 258 + struct hclge_dev *hdev = vport->back; 259 + 260 + return hdev->dcbx_cap; 261 + } 262 + 263 + static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) 264 + { 265 + struct hclge_vport *vport = hclge_get_vport(h); 266 + struct hclge_dev *hdev = vport->back; 267 + 268 + /* No support for LLD_MANAGED modes or CEE */ 269 + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || 270 + (mode & DCB_CAP_DCBX_VER_CEE) || 271 + !(mode & DCB_CAP_DCBX_HOST)) 272 + return 1; 273 + 274 + hdev->dcbx_cap = mode; 275 + 276 + return 0; 277 + } 278 + 279 + static const struct hnae3_dcb_ops hns3_dcb_ops = { 280 + .ieee_getets = hclge_ieee_getets, 281 + .ieee_setets = hclge_ieee_setets, 282 + .ieee_getpfc = hclge_ieee_getpfc, 283 + .ieee_setpfc = hclge_ieee_setpfc, 284 + .getdcbx = hclge_getdcbx, 285 + .setdcbx = hclge_setdcbx, 286 + .map_update = hclge_map_update, 287 + }; 288 + 289 + void hclge_dcb_ops_set(struct hclge_dev *hdev) 290 + { 291 + struct hclge_vport *vport = hdev->vport; 292 + struct hnae3_knic_private_info *kinfo; 293 + 294 + /* Hdev does not support DCB or vport is 295 + * not a pf, then dcb_ops is not set. 296 + */ 297 + if (!hnae3_dev_dcb_supported(hdev) || 298 + vport->vport_id != 0) 299 + return; 300 + 301 + kinfo = &vport->nic.kinfo; 302 + kinfo->dcb_ops = &hns3_dcb_ops; 303 + hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST; 304 + }
+21
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
··· 1 + /* 2 + * Copyright (c) 2016~2017 Hisilicon Limited. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + */ 9 + 10 + #ifndef __HCLGE_DCB_H__ 11 + #define __HCLGE_DCB_H__ 12 + 13 + #include "hclge_main.h" 14 + 15 + #ifdef CONFIG_HNS3_DCB 16 + void hclge_dcb_ops_set(struct hclge_dev *hdev); 17 + #else 18 + static inline void hclge_dcb_ops_set(struct hclge_dev *hdev) {} 19 + #endif 20 + 21 + #endif /* __HCLGE_DCB_H__ */
+142 -72
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 19 19 #include <linux/platform_device.h> 20 20 21 21 #include "hclge_cmd.h" 22 + #include "hclge_dcb.h" 22 23 #include "hclge_main.h" 23 24 #include "hclge_mdio.h" 24 25 #include "hclge_tm.h" ··· 31 30 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) 32 31 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) 33 32 34 - static int hclge_rss_init_hw(struct hclge_dev *hdev); 35 33 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 36 34 enum hclge_mta_dmac_sel_type mta_mac_sel, 37 35 bool enable); ··· 1058 1058 hdev->hw.mac.phy_addr = cfg.phy_addr; 1059 1059 hdev->num_desc = cfg.tqp_desc_num; 1060 1060 hdev->tm_info.num_pg = 1; 1061 - hdev->tm_info.num_tc = cfg.tc_num; 1061 + hdev->tc_max = cfg.tc_num; 1062 1062 hdev->tm_info.hw_pfc_map = 0; 1063 1063 1064 1064 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); ··· 1067 1067 return ret; 1068 1068 } 1069 1069 1070 - if ((hdev->tm_info.num_tc > HNAE3_MAX_TC) || 1071 - (hdev->tm_info.num_tc < 1)) { 1070 + if ((hdev->tc_max > HNAE3_MAX_TC) || 1071 + (hdev->tc_max < 1)) { 1072 1072 dev_warn(&hdev->pdev->dev, "TC num = %d.\n", 1073 - hdev->tm_info.num_tc); 1074 - hdev->tm_info.num_tc = 1; 1073 + hdev->tc_max); 1074 + hdev->tc_max = 1; 1075 1075 } 1076 1076 1077 + /* Dev does not support DCB */ 1078 + if (!hnae3_dev_dcb_supported(hdev)) { 1079 + hdev->tc_max = 1; 1080 + hdev->pfc_max = 0; 1081 + } else { 1082 + hdev->pfc_max = hdev->tc_max; 1083 + } 1084 + 1085 + hdev->tm_info.num_tc = hdev->tc_max; 1086 + 1077 1087 /* Currently not support uncontiuous tc */ 1078 - for (i = 0; i < cfg.tc_num; i++) 1088 + for (i = 0; i < hdev->tm_info.num_tc; i++) 1079 1089 hnae_set_bit(hdev->hw_tc_map, i, 1); 1080 1090 1081 1091 if (!hdev->num_vmdq_vport && !hdev->num_req_vfs) ··· 1334 1324 return 0; 1335 1325 } 1336 1326 1337 - static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, u16 buf_size) 1327 + static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1328 + struct hclge_pkt_buf_alloc *buf_alloc) 1338 1329 { 1339 1330 /* TX buffer size is unit by 128 byte */ 1340 1331 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 ··· 1348 1337 req = (struct hclge_tx_buff_alloc *)desc.data; 1349 1338 1350 1339 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1351 - for (i = 0; i < HCLGE_TC_NUM; i++) 1340 + for (i = 0; i < HCLGE_TC_NUM; i++) { 1341 + u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1342 + 1352 1343 req->tx_pkt_buff[i] = 1353 1344 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1354 1345 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1346 + } 1355 1347 1356 1348 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1357 1349 if (ret) { ··· 1366 1352 return 0; 1367 1353 } 1368 1354 1369 - static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, u32 buf_size) 1355 + static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1356 + struct hclge_pkt_buf_alloc *buf_alloc) 1370 1357 { 1371 - int ret = hclge_cmd_alloc_tx_buff(hdev, buf_size); 1358 + int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1372 1359 1373 1360 if (ret) { 1374 1361 dev_err(&hdev->pdev->dev, ··· 1402 1387 } 1403 1388 1404 1389 /* Get the number of pfc enabled TCs, which have private buffer */ 1405 - static int hclge_get_pfc_priv_num(struct hclge_dev *hdev) 1390 + static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1391 + struct hclge_pkt_buf_alloc *buf_alloc) 1406 1392 { 1407 1393 struct hclge_priv_buf *priv; 1408 1394 int i, cnt = 0; 1409 1395 1410 1396 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1411 - priv = &hdev->priv_buf[i]; 1397 + priv = &buf_alloc->priv_buf[i]; 1412 1398 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1413 1399 priv->enable) 1414 1400 cnt++; ··· 1419 1403 } 1420 1404 1421 1405 /* Get the number of pfc disabled TCs, which have private buffer */ 1422 - static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev) 1406 + static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1407 + struct hclge_pkt_buf_alloc *buf_alloc) 1423 1408 { 1424 1409 struct hclge_priv_buf *priv; 1425 1410 int i, cnt = 0; 1426 1411 1427 1412 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1428 - priv = &hdev->priv_buf[i]; 1413 + priv = &buf_alloc->priv_buf[i]; 1429 1414 if (hdev->hw_tc_map & BIT(i) && 1430 1415 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1431 1416 priv->enable) ··· 1436 1419 return cnt; 1437 1420 } 1438 1421 1439 - static u32 hclge_get_rx_priv_buff_alloced(struct hclge_dev *hdev) 1422 + static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1440 1423 { 1441 1424 struct hclge_priv_buf *priv; 1442 1425 u32 rx_priv = 0; 1443 1426 int i; 1444 1427 1445 1428 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1446 - priv = &hdev->priv_buf[i]; 1429 + priv = &buf_alloc->priv_buf[i]; 1447 1430 if (priv->enable) 1448 1431 rx_priv += priv->buf_size; 1449 1432 } 1450 1433 return rx_priv; 1451 1434 } 1452 1435 1453 - static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) 1436 + static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1437 + { 1438 + u32 i, total_tx_size = 0; 1439 + 1440 + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1441 + total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1442 + 1443 + return total_tx_size; 1444 + } 1445 + 1446 + static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 1447 + struct hclge_pkt_buf_alloc *buf_alloc, 1448 + u32 rx_all) 1454 1449 { 1455 1450 u32 shared_buf_min, shared_buf_tc, shared_std; 1456 1451 int tc_num, pfc_enable_num; ··· 1483 1454 hdev->mps; 1484 1455 shared_std = max_t(u32, shared_buf_min, shared_buf_tc); 1485 1456 1486 - rx_priv = hclge_get_rx_priv_buff_alloced(hdev); 1457 + rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 1487 1458 if (rx_all <= rx_priv + shared_std) 1488 1459 return false; 1489 1460 1490 1461 shared_buf = rx_all - rx_priv; 1491 - hdev->s_buf.buf_size = shared_buf; 1492 - hdev->s_buf.self.high = shared_buf; 1493 - hdev->s_buf.self.low = 2 * hdev->mps; 1462 + buf_alloc->s_buf.buf_size = shared_buf; 1463 + buf_alloc->s_buf.self.high = shared_buf; 1464 + buf_alloc->s_buf.self.low = 2 * hdev->mps; 1494 1465 1495 1466 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1496 1467 if ((hdev->hw_tc_map & BIT(i)) && 1497 1468 (hdev->tm_info.hw_pfc_map & BIT(i))) { 1498 - hdev->s_buf.tc_thrd[i].low = hdev->mps; 1499 - hdev->s_buf.tc_thrd[i].high = 2 * hdev->mps; 1469 + buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; 1470 + buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; 1500 1471 } else { 1501 - hdev->s_buf.tc_thrd[i].low = 0; 1502 - hdev->s_buf.tc_thrd[i].high = hdev->mps; 1472 + buf_alloc->s_buf.tc_thrd[i].low = 0; 1473 + buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; 1503 1474 } 1504 1475 } 1505 1476 1506 1477 return true; 1507 1478 } 1508 1479 1480 + static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 1481 + struct hclge_pkt_buf_alloc *buf_alloc) 1482 + { 1483 + u32 i, total_size; 1484 + 1485 + total_size = hdev->pkt_buf_size; 1486 + 1487 + /* alloc tx buffer for all enabled tc */ 1488 + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1489 + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1490 + 1491 + if (total_size < HCLGE_DEFAULT_TX_BUF) 1492 + return -ENOMEM; 1493 + 1494 + if (hdev->hw_tc_map & BIT(i)) 1495 + priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 1496 + else 1497 + priv->tx_buf_size = 0; 1498 + 1499 + total_size -= priv->tx_buf_size; 1500 + } 1501 + 1502 + return 0; 1503 + } 1504 + 1509 1505 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 1510 1506 * @hdev: pointer to struct hclge_dev 1511 - * @tx_size: the allocated tx buffer for all TCs 1507 + * @buf_alloc: pointer to buffer calculation data 1512 1508 * @return: 0: calculate sucessful, negative: fail 1513 1509 */ 1514 - int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) 1510 + int hclge_rx_buffer_calc(struct hclge_dev *hdev, 1511 + struct hclge_pkt_buf_alloc *buf_alloc) 1515 1512 { 1516 - u32 rx_all = hdev->pkt_buf_size - tx_size; 1513 + u32 rx_all = hdev->pkt_buf_size; 1517 1514 int no_pfc_priv_num, pfc_priv_num; 1518 1515 struct hclge_priv_buf *priv; 1519 1516 int i; 1517 + 1518 + rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 1520 1519 1521 1520 /* When DCB is not supported, rx private 1522 1521 * buffer is not allocated. 1523 1522 */ 1524 1523 if (!hnae3_dev_dcb_supported(hdev)) { 1525 - if (!hclge_is_rx_buf_ok(hdev, rx_all)) 1524 + if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1526 1525 return -ENOMEM; 1527 1526 1528 1527 return 0; ··· 1558 1501 1559 1502 /* step 1, try to alloc private buffer for all enabled tc */ 1560 1503 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1561 - priv = &hdev->priv_buf[i]; 1504 + priv = &buf_alloc->priv_buf[i]; 1562 1505 if (hdev->hw_tc_map & BIT(i)) { 1563 1506 priv->enable = 1; 1564 1507 if (hdev->tm_info.hw_pfc_map & BIT(i)) { ··· 1579 1522 } 1580 1523 } 1581 1524 1582 - if (hclge_is_rx_buf_ok(hdev, rx_all)) 1525 + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1583 1526 return 0; 1584 1527 1585 1528 /* step 2, try to decrease the buffer size of 1586 1529 * no pfc TC's private buffer 1587 1530 */ 1588 1531 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1589 - priv = &hdev->priv_buf[i]; 1532 + priv = &buf_alloc->priv_buf[i]; 1590 1533 1591 1534 priv->enable = 0; 1592 1535 priv->wl.low = 0; ··· 1609 1552 } 1610 1553 } 1611 1554 1612 - if (hclge_is_rx_buf_ok(hdev, rx_all)) 1555 + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1613 1556 return 0; 1614 1557 1615 1558 /* step 3, try to reduce the number of pfc disabled TCs, 1616 1559 * which have private buffer 1617 1560 */ 1618 1561 /* get the total no pfc enable TC number, which have private buffer */ 1619 - no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev); 1562 + no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 1620 1563 1621 1564 /* let the last to be cleared first */ 1622 1565 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1623 - priv = &hdev->priv_buf[i]; 1566 + priv = &buf_alloc->priv_buf[i]; 1624 1567 1625 1568 if (hdev->hw_tc_map & BIT(i) && 1626 1569 !(hdev->tm_info.hw_pfc_map & BIT(i))) { ··· 1632 1575 no_pfc_priv_num--; 1633 1576 } 1634 1577 1635 - if (hclge_is_rx_buf_ok(hdev, rx_all) || 1578 + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1636 1579 no_pfc_priv_num == 0) 1637 1580 break; 1638 1581 } 1639 1582 1640 - if (hclge_is_rx_buf_ok(hdev, rx_all)) 1583 + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1641 1584 return 0; 1642 1585 1643 1586 /* step 4, try to reduce the number of pfc enabled TCs 1644 1587 * which have private buffer. 1645 1588 */ 1646 - pfc_priv_num = hclge_get_pfc_priv_num(hdev); 1589 + pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 1647 1590 1648 1591 /* let the last to be cleared first */ 1649 1592 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1650 - priv = &hdev->priv_buf[i]; 1593 + priv = &buf_alloc->priv_buf[i]; 1651 1594 1652 1595 if (hdev->hw_tc_map & BIT(i) && 1653 1596 hdev->tm_info.hw_pfc_map & BIT(i)) { ··· 1659 1602 pfc_priv_num--; 1660 1603 } 1661 1604 1662 - if (hclge_is_rx_buf_ok(hdev, rx_all) || 1605 + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1663 1606 pfc_priv_num == 0) 1664 1607 break; 1665 1608 } 1666 - if (hclge_is_rx_buf_ok(hdev, rx_all)) 1609 + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1667 1610 return 0; 1668 1611 1669 1612 return -ENOMEM; 1670 1613 } 1671 1614 1672 - static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev) 1615 + static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 1616 + struct hclge_pkt_buf_alloc *buf_alloc) 1673 1617 { 1674 1618 struct hclge_rx_priv_buff *req; 1675 1619 struct hclge_desc desc; ··· 1682 1624 1683 1625 /* Alloc private buffer TCs */ 1684 1626 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1685 - struct hclge_priv_buf *priv = &hdev->priv_buf[i]; 1627 + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1686 1628 1687 1629 req->buf_num[i] = 1688 1630 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); ··· 1691 1633 } 1692 1634 1693 1635 req->shared_buf = 1694 - cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 1636 + cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 1695 1637 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 1696 1638 1697 1639 ret = hclge_cmd_send(&hdev->hw, &desc, 1); ··· 1706 1648 1707 1649 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) 1708 1650 1709 - static int hclge_rx_priv_wl_config(struct hclge_dev *hdev) 1651 + static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 1652 + struct hclge_pkt_buf_alloc *buf_alloc) 1710 1653 { 1711 1654 struct hclge_rx_priv_wl_buf *req; 1712 1655 struct hclge_priv_buf *priv; ··· 1727 1668 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1728 1669 1729 1670 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1730 - priv = &hdev->priv_buf[i * HCLGE_TC_NUM_ONE_DESC + j]; 1671 + u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 1672 + 1673 + priv = &buf_alloc->priv_buf[idx]; 1731 1674 req->tc_wl[j].high = 1732 1675 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 1733 1676 req->tc_wl[j].high |= ··· 1754 1693 return 0; 1755 1694 } 1756 1695 1757 - static int hclge_common_thrd_config(struct hclge_dev *hdev) 1696 + static int hclge_common_thrd_config(struct hclge_dev *hdev, 1697 + struct hclge_pkt_buf_alloc *buf_alloc) 1758 1698 { 1759 - struct hclge_shared_buf *s_buf = &hdev->s_buf; 1699 + struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 1760 1700 struct hclge_rx_com_thrd *req; 1761 1701 struct hclge_desc desc[2]; 1762 1702 struct hclge_tc_thrd *tc; ··· 1801 1739 return 0; 1802 1740 } 1803 1741 1804 - static int hclge_common_wl_config(struct hclge_dev *hdev) 1742 + static int hclge_common_wl_config(struct hclge_dev *hdev, 1743 + struct hclge_pkt_buf_alloc *buf_alloc) 1805 1744 { 1806 - struct hclge_shared_buf *buf = &hdev->s_buf; 1745 + struct hclge_shared_buf *buf = &buf_alloc->s_buf; 1807 1746 struct hclge_rx_com_wl *req; 1808 1747 struct hclge_desc desc; 1809 1748 int ret; ··· 1834 1771 1835 1772 int hclge_buffer_alloc(struct hclge_dev *hdev) 1836 1773 { 1837 - u32 tx_buf_size = HCLGE_DEFAULT_TX_BUF; 1774 + struct hclge_pkt_buf_alloc *pkt_buf; 1838 1775 int ret; 1839 1776 1840 - hdev->priv_buf = devm_kmalloc_array(&hdev->pdev->dev, HCLGE_MAX_TC_NUM, 1841 - sizeof(struct hclge_priv_buf), 1842 - GFP_KERNEL | __GFP_ZERO); 1843 - if (!hdev->priv_buf) 1777 + pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 1778 + if (!pkt_buf) 1844 1779 return -ENOMEM; 1845 1780 1846 - ret = hclge_tx_buffer_alloc(hdev, tx_buf_size); 1781 + ret = hclge_tx_buffer_calc(hdev, pkt_buf); 1782 + if (ret) { 1783 + dev_err(&hdev->pdev->dev, 1784 + "could not calc tx buffer size for all TCs %d\n", ret); 1785 + goto out; 1786 + } 1787 + 1788 + ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 1847 1789 if (ret) { 1848 1790 dev_err(&hdev->pdev->dev, 1849 1791 "could not alloc tx buffers %d\n", ret); 1850 - return ret; 1792 + goto out; 1851 1793 } 1852 1794 1853 - ret = hclge_rx_buffer_calc(hdev, tx_buf_size); 1795 + ret = hclge_rx_buffer_calc(hdev, pkt_buf); 1854 1796 if (ret) { 1855 1797 dev_err(&hdev->pdev->dev, 1856 1798 "could not calc rx priv buffer size for all TCs %d\n", 1857 1799 ret); 1858 - return ret; 1800 + goto out; 1859 1801 } 1860 1802 1861 - ret = hclge_rx_priv_buf_alloc(hdev); 1803 + ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 1862 1804 if (ret) { 1863 1805 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 1864 1806 ret); 1865 - return ret; 1807 + goto out; 1866 1808 } 1867 1809 1868 1810 if (hnae3_dev_dcb_supported(hdev)) { 1869 - ret = hclge_rx_priv_wl_config(hdev); 1811 + ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 1870 1812 if (ret) { 1871 1813 dev_err(&hdev->pdev->dev, 1872 1814 "could not configure rx private waterline %d\n", 1873 1815 ret); 1874 - return ret; 1816 + goto out; 1875 1817 } 1876 1818 1877 - ret = hclge_common_thrd_config(hdev); 1819 + ret = hclge_common_thrd_config(hdev, pkt_buf); 1878 1820 if (ret) { 1879 1821 dev_err(&hdev->pdev->dev, 1880 1822 "could not configure common threshold %d\n", 1881 1823 ret); 1882 - return ret; 1824 + goto out; 1883 1825 } 1884 1826 } 1885 1827 1886 - ret = hclge_common_wl_config(hdev); 1887 - if (ret) { 1828 + ret = hclge_common_wl_config(hdev, pkt_buf); 1829 + if (ret) 1888 1830 dev_err(&hdev->pdev->dev, 1889 1831 "could not configure common waterline %d\n", ret); 1890 - return ret; 1891 - } 1892 1832 1893 - return 0; 1833 + out: 1834 + kfree(pkt_buf); 1835 + return ret; 1894 1836 } 1895 1837 1896 1838 static int hclge_init_roce_base_info(struct hclge_vport *vport) ··· 2665 2597 return hdev->rss_size_max; 2666 2598 } 2667 2599 2668 - static int hclge_rss_init_hw(struct hclge_dev *hdev) 2600 + int hclge_rss_init_hw(struct hclge_dev *hdev) 2669 2601 { 2670 2602 const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 2671 2603 struct hclge_vport *vport = hdev->vport; ··· 4248 4180 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 4249 4181 return ret; 4250 4182 } 4183 + 4184 + hclge_dcb_ops_set(hdev); 4251 4185 4252 4186 setup_timer(&hdev->service_timer, hclge_service_timer, 4253 4187 (unsigned long)hdev);
+6 -2
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
··· 421 421 #define HCLGE_FLAG_TC_BASE_SCH_MODE 1 422 422 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 423 423 u8 tx_sch_mode; 424 + u8 tc_max; 425 + u8 pfc_max; 424 426 425 427 u8 default_up; 428 + u8 dcbx_cap; 426 429 struct hclge_tm_info tm_info; 427 430 428 431 u16 num_msi; ··· 466 463 467 464 u32 pkt_buf_size; /* Total pf buf size for tx/rx */ 468 465 u32 mps; /* Max packet size */ 469 - struct hclge_priv_buf *priv_buf; 470 - struct hclge_shared_buf s_buf; 471 466 472 467 enum hclge_mta_dmac_sel_type mta_mac_sel_type; 473 468 bool enable_mta; /* Mutilcast filter enable */ ··· 518 517 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); 519 518 int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, 520 519 bool is_kill, u16 vlan, u8 qos, __be16 proto); 520 + 521 + int hclge_buffer_alloc(struct hclge_dev *hdev); 522 + int hclge_rss_init_hw(struct hclge_dev *hdev); 521 523 #endif
+201 -30
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
··· 124 124 return hclge_cmd_send(&hdev->hw, &desc, 1); 125 125 } 126 126 127 + static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, 128 + u8 pfc_bitmap) 129 + { 130 + struct hclge_desc desc; 131 + struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data; 132 + 133 + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); 134 + 135 + pfc->tx_rx_en_bitmap = tx_rx_bitmap; 136 + pfc->pri_en_bitmap = pfc_bitmap; 137 + 138 + return hclge_cmd_send(&hdev->hw, &desc, 1); 139 + } 140 + 127 141 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) 128 142 { 129 143 u8 tc; ··· 301 287 return hclge_cmd_send(&hdev->hw, &desc, 1); 302 288 } 303 289 290 + static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) 291 + { 292 + struct hclge_port_shapping_cmd *shap_cfg_cmd; 293 + struct hclge_desc desc; 294 + u32 shapping_para = 0; 295 + u8 ir_u, ir_b, ir_s; 296 + int ret; 297 + 298 + ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE, 299 + HCLGE_SHAPER_LVL_PORT, 300 + &ir_b, &ir_u, &ir_s); 301 + if (ret) 302 + return ret; 303 + 304 + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); 305 + shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 306 + 307 + hclge_tm_set_field(shapping_para, IR_B, ir_b); 308 + hclge_tm_set_field(shapping_para, IR_U, ir_u); 309 + hclge_tm_set_field(shapping_para, IR_S, ir_s); 310 + hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF); 311 + hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF); 312 + 313 + shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); 314 + 315 + return hclge_cmd_send(&hdev->hw, &desc, 1); 316 + } 317 + 304 318 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, 305 319 enum hclge_shap_bucket bucket, u8 pri_id, 306 320 u8 ir_b, u8 ir_u, u8 ir_s, ··· 388 346 return hclge_cmd_send(&hdev->hw, &desc, 1); 389 347 } 390 348 391 - static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id) 349 + static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) 392 350 { 393 351 struct hclge_desc desc; 394 352 395 353 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); 396 354 397 - if (hdev->tm_info.tc_info[qs_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) 355 + if (mode == HCLGE_SCH_MODE_DWRR) 398 356 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 399 357 else 400 358 desc.data[1] = 0; ··· 486 444 hdev->tm_info.prio_tc[i] = 487 445 (i >= hdev->tm_info.num_tc) ? 0 : i; 488 446 489 - hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 447 + /* DCB is enabled if we have more than 1 TC */ 448 + if (hdev->tm_info.num_tc > 1) 449 + hdev->flag |= HCLGE_FLAG_DCB_ENABLE; 450 + else 451 + hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 490 452 } 491 453 492 454 static void hclge_tm_pg_info_init(struct hclge_dev *hdev) ··· 516 470 } 517 471 } 518 472 473 + static void hclge_pfc_info_init(struct hclge_dev *hdev) 474 + { 475 + if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) { 476 + if (hdev->fc_mode_last_time == HCLGE_FC_PFC) 477 + dev_warn(&hdev->pdev->dev, 478 + "DCB is disable, but last mode is FC_PFC\n"); 479 + 480 + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 481 + } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { 482 + /* fc_mode_last_time record the last fc_mode when 483 + * DCB is enabled, so that fc_mode can be set to 484 + * the correct value when DCB is disabled. 485 + */ 486 + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 487 + hdev->tm_info.fc_mode = HCLGE_FC_PFC; 488 + } 489 + } 490 + 519 491 static int hclge_tm_schd_info_init(struct hclge_dev *hdev) 520 492 { 521 493 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && ··· 546 482 547 483 hclge_tm_vport_info_update(hdev); 548 484 549 - hdev->tm_info.fc_mode = HCLGE_FC_NONE; 550 - hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 485 + hclge_pfc_info_init(hdev); 551 486 552 487 return 0; 553 488 } ··· 659 596 { 660 597 struct hclge_vport *vport = hdev->vport; 661 598 int ret; 662 - u32 i; 599 + u32 i, k; 663 600 664 601 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 665 602 /* Cfg qs -> pri mapping, one by one mapping */ 666 - for (i = 0; i < hdev->tm_info.num_tc; i++) { 667 - ret = hclge_tm_qs_to_pri_map_cfg(hdev, i, i); 668 - if (ret) 669 - return ret; 670 - } 603 + for (k = 0; k < hdev->num_alloc_vport; k++) 604 + for (i = 0; i < hdev->tm_info.num_tc; i++) { 605 + ret = hclge_tm_qs_to_pri_map_cfg( 606 + hdev, vport[k].qs_offset + i, i); 607 + if (ret) 608 + return ret; 609 + } 671 610 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { 672 - int k; 673 611 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ 674 612 for (k = 0; k < hdev->num_alloc_vport; k++) 675 613 for (i = 0; i < HNAE3_MAX_TC; i++) { ··· 819 755 820 756 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) 821 757 { 758 + struct hclge_vport *vport = hdev->vport; 822 759 struct hclge_pg_info *pg_info; 823 760 u8 dwrr; 824 761 int ret; 825 - u32 i; 762 + u32 i, k; 826 763 827 764 for (i = 0; i < hdev->tm_info.num_tc; i++) { 828 765 pg_info = ··· 834 769 if (ret) 835 770 return ret; 836 771 837 - ret = hclge_tm_qs_weight_cfg(hdev, i, dwrr); 838 - if (ret) 839 - return ret; 772 + for (k = 0; k < hdev->num_alloc_vport; k++) { 773 + ret = hclge_tm_qs_weight_cfg( 774 + hdev, vport[k].qs_offset + i, 775 + vport[k].dwrr); 776 + if (ret) 777 + return ret; 778 + } 840 779 } 841 780 842 781 return 0; ··· 904 835 return 0; 905 836 } 906 837 907 - static int hclge_tm_map_cfg(struct hclge_dev *hdev) 838 + int hclge_tm_map_cfg(struct hclge_dev *hdev) 908 839 { 909 840 int ret; 841 + 842 + ret = hclge_up_to_tc_map(hdev); 843 + if (ret) 844 + return ret; 910 845 911 846 ret = hclge_tm_pg_to_pri_map(hdev); 912 847 if (ret) ··· 922 849 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) 923 850 { 924 851 int ret; 852 + 853 + ret = hclge_tm_port_shaper_cfg(hdev); 854 + if (ret) 855 + return ret; 925 856 926 857 ret = hclge_tm_pg_shaper_cfg(hdev); 927 858 if (ret) ··· 975 898 return ret; 976 899 977 900 for (i = 0; i < kinfo->num_tc; i++) { 978 - ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i); 901 + u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; 902 + 903 + ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, 904 + sch_mode); 979 905 if (ret) 980 906 return ret; 981 907 } ··· 990 910 { 991 911 struct hclge_vport *vport = hdev->vport; 992 912 int ret; 993 - u8 i; 913 + u8 i, k; 994 914 995 915 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 996 916 for (i = 0; i < hdev->tm_info.num_tc; i++) { ··· 998 918 if (ret) 999 919 return ret; 1000 920 1001 - ret = hclge_tm_qs_schd_mode_cfg(hdev, i); 1002 - if (ret) 1003 - return ret; 921 + for (k = 0; k < hdev->num_alloc_vport; k++) { 922 + ret = hclge_tm_qs_schd_mode_cfg( 923 + hdev, vport[k].qs_offset + i, 924 + HCLGE_SCH_MODE_DWRR); 925 + if (ret) 926 + return ret; 927 + } 1004 928 } 1005 929 } else { 1006 930 for (i = 0; i < hdev->num_alloc_vport; i++) { ··· 1019 935 return 0; 1020 936 } 1021 937 1022 - static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) 938 + int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) 1023 939 { 1024 940 int ret; 1025 941 ··· 1053 969 return hclge_tm_schd_mode_hw(hdev); 1054 970 } 1055 971 972 + static int hclge_pfc_setup_hw(struct hclge_dev *hdev) 973 + { 974 + u8 enable_bitmap = 0; 975 + 976 + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 977 + enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK | 978 + HCLGE_RX_MAC_PAUSE_EN_MSK; 979 + 980 + return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, 981 + hdev->tm_info.hw_pfc_map); 982 + } 983 + 984 + static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) 985 + { 986 + bool tx_en, rx_en; 987 + 988 + switch (hdev->tm_info.fc_mode) { 989 + case HCLGE_FC_NONE: 990 + tx_en = false; 991 + rx_en = false; 992 + break; 993 + case HCLGE_FC_RX_PAUSE: 994 + tx_en = false; 995 + rx_en = true; 996 + break; 997 + case HCLGE_FC_TX_PAUSE: 998 + tx_en = true; 999 + rx_en = false; 1000 + break; 1001 + case HCLGE_FC_FULL: 1002 + tx_en = true; 1003 + rx_en = true; 1004 + break; 1005 + default: 1006 + tx_en = true; 1007 + rx_en = true; 1008 + } 1009 + 1010 + return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 1011 + } 1012 + 1056 1013 int hclge_pause_setup_hw(struct hclge_dev *hdev) 1057 1014 { 1058 - bool en = hdev->tm_info.fc_mode != HCLGE_FC_PFC; 1059 1015 int ret; 1060 1016 u8 i; 1061 1017 1062 - ret = hclge_mac_pause_en_cfg(hdev, en, en); 1063 - if (ret) 1064 - return ret; 1018 + if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) 1019 + return hclge_mac_pause_setup_hw(hdev); 1065 1020 1066 - /* Only DCB-supported dev supports qset back pressure setting */ 1021 + /* Only DCB-supported dev supports qset back pressure and pfc cmd */ 1067 1022 if (!hnae3_dev_dcb_supported(hdev)) 1068 1023 return 0; 1024 + 1025 + /* When MAC is GE Mode, hdev does not support pfc setting */ 1026 + ret = hclge_pfc_setup_hw(hdev); 1027 + if (ret) 1028 + dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret); 1069 1029 1070 1030 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1071 1031 ret = hclge_tm_qs_bp_cfg(hdev, i); ··· 1117 989 return ret; 1118 990 } 1119 991 1120 - return hclge_up_to_tc_map(hdev); 992 + return 0; 993 + } 994 + 995 + int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) 996 + { 997 + struct hclge_vport *vport = hdev->vport; 998 + struct hnae3_knic_private_info *kinfo; 999 + u32 i, k; 1000 + 1001 + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 1002 + if (prio_tc[i] >= hdev->tm_info.num_tc) 1003 + return -EINVAL; 1004 + hdev->tm_info.prio_tc[i] = prio_tc[i]; 1005 + 1006 + for (k = 0; k < hdev->num_alloc_vport; k++) { 1007 + kinfo = &vport[k].nic.kinfo; 1008 + kinfo->prio_tc[i] = prio_tc[i]; 1009 + } 1010 + } 1011 + return 0; 1012 + } 1013 + 1014 + void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) 1015 + { 1016 + u8 i, bit_map = 0; 1017 + 1018 + hdev->tm_info.num_tc = num_tc; 1019 + 1020 + for (i = 0; i < hdev->tm_info.num_tc; i++) 1021 + bit_map |= BIT(i); 1022 + 1023 + if (!bit_map) { 1024 + bit_map = 1; 1025 + hdev->tm_info.num_tc = 1; 1026 + } 1027 + 1028 + hdev->hw_tc_map = bit_map; 1029 + 1030 + hclge_tm_schd_info_init(hdev); 1121 1031 } 1122 1032 1123 1033 int hclge_tm_init_hw(struct hclge_dev *hdev) ··· 1179 1013 1180 1014 int hclge_tm_schd_init(struct hclge_dev *hdev) 1181 1015 { 1182 - int ret = hclge_tm_schd_info_init(hdev); 1016 + int ret; 1183 1017 1018 + /* fc_mode is HCLGE_FC_FULL on reset */ 1019 + hdev->tm_info.fc_mode = HCLGE_FC_FULL; 1020 + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 1021 + 1022 + ret = hclge_tm_schd_info_init(hdev); 1184 1023 if (ret) 1185 1024 return ret; 1186 1025
+15
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
··· 94 94 u32 rsvd1; 95 95 }; 96 96 97 + struct hclge_pfc_en_cmd { 98 + u8 tx_rx_en_bitmap; 99 + u8 pri_en_bitmap; 100 + }; 101 + 102 + struct hclge_port_shapping_cmd { 103 + __le32 port_shapping_para; 104 + }; 105 + 97 106 #define hclge_tm_set_field(dest, string, val) \ 98 107 hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ 99 108 (HCLGE_TM_SHAP_##string##_LSH), val) ··· 112 103 113 104 int hclge_tm_schd_init(struct hclge_dev *hdev); 114 105 int hclge_pause_setup_hw(struct hclge_dev *hdev); 106 + int hclge_tm_schd_mode_hw(struct hclge_dev *hdev); 107 + int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); 108 + void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); 109 + int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); 110 + int hclge_tm_map_cfg(struct hclge_dev *hdev); 111 + int hclge_tm_init_hw(struct hclge_dev *hdev); 115 112 #endif
+106
drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
··· 1 + /* 2 + * Copyright (c) 2016-2017 Hisilicon Limited. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + */ 9 + 10 + #include "hnae3.h" 11 + #include "hns3_enet.h" 12 + 13 + static 14 + int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) 15 + { 16 + struct hns3_nic_priv *priv = netdev_priv(ndev); 17 + struct hnae3_handle *h = priv->ae_handle; 18 + 19 + if (h->kinfo.dcb_ops->ieee_getets) 20 + return h->kinfo.dcb_ops->ieee_getets(h, ets); 21 + 22 + return -EOPNOTSUPP; 23 + } 24 + 25 + static 26 + int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) 27 + { 28 + struct hns3_nic_priv *priv = netdev_priv(ndev); 29 + struct hnae3_handle *h = priv->ae_handle; 30 + 31 + if (h->kinfo.dcb_ops->ieee_setets) 32 + return h->kinfo.dcb_ops->ieee_setets(h, ets); 33 + 34 + return -EOPNOTSUPP; 35 + } 36 + 37 + static 38 + int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) 39 + { 40 + struct hns3_nic_priv *priv = netdev_priv(ndev); 41 + struct hnae3_handle *h = priv->ae_handle; 42 + 43 + if (h->kinfo.dcb_ops->ieee_getpfc) 44 + return h->kinfo.dcb_ops->ieee_getpfc(h, pfc); 45 + 46 + return -EOPNOTSUPP; 47 + } 48 + 49 + static 50 + int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) 51 + { 52 + struct hns3_nic_priv *priv = netdev_priv(ndev); 53 + struct hnae3_handle *h = priv->ae_handle; 54 + 55 + if (h->kinfo.dcb_ops->ieee_setpfc) 56 + return h->kinfo.dcb_ops->ieee_setpfc(h, pfc); 57 + 58 + return -EOPNOTSUPP; 59 + } 60 + 61 + /* DCBX configuration */ 62 + static u8 hns3_dcbnl_getdcbx(struct net_device *ndev) 63 + { 64 + struct hns3_nic_priv *priv = netdev_priv(ndev); 65 + struct hnae3_handle *h = priv->ae_handle; 66 + 67 + if (h->kinfo.dcb_ops->getdcbx) 68 + return h->kinfo.dcb_ops->getdcbx(h); 69 + 70 + return 0; 71 + } 72 + 73 + /* return 0 if successful, otherwise fail */ 74 + static u8 hns3_dcbnl_setdcbx(struct net_device *ndev, u8 mode) 75 + { 76 + struct hns3_nic_priv *priv = netdev_priv(ndev); 77 + struct hnae3_handle *h = priv->ae_handle; 78 + 79 + if (h->kinfo.dcb_ops->setdcbx) 80 + return h->kinfo.dcb_ops->setdcbx(h, mode); 81 + 82 + return 1; 83 + } 84 + 85 + static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = { 86 + .ieee_getets = hns3_dcbnl_ieee_getets, 87 + .ieee_setets = hns3_dcbnl_ieee_setets, 88 + .ieee_getpfc = hns3_dcbnl_ieee_getpfc, 89 + .ieee_setpfc = hns3_dcbnl_ieee_setpfc, 90 + .getdcbx = hns3_dcbnl_getdcbx, 91 + .setdcbx = hns3_dcbnl_setdcbx, 92 + }; 93 + 94 + /* hclge_dcbnl_setup - DCBNL setup 95 + * @handle: the corresponding vport handle 96 + * Set up DCBNL 97 + */ 98 + void hns3_dcbnl_setup(struct hnae3_handle *handle) 99 + { 100 + struct net_device *dev = handle->kinfo.netdev; 101 + 102 + if (!handle->kinfo.dcb_ops) 103 + return; 104 + 105 + dev->dcbnl_ops = &hns3_dcbnl_ops; 106 + }
+89 -15
drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
··· 196 196 tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; 197 197 } 198 198 199 + static int hns3_nic_set_real_num_queue(struct net_device *netdev) 200 + { 201 + struct hns3_nic_priv *priv = netdev_priv(netdev); 202 + struct hnae3_handle *h = priv->ae_handle; 203 + struct hnae3_knic_private_info *kinfo = &h->kinfo; 204 + unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; 205 + int ret; 206 + 207 + ret = netif_set_real_num_tx_queues(netdev, queue_size); 208 + if (ret) { 209 + netdev_err(netdev, 210 + "netif_set_real_num_tx_queues fail, ret=%d!\n", 211 + ret); 212 + return ret; 213 + } 214 + 215 + ret = netif_set_real_num_rx_queues(netdev, queue_size); 216 + if (ret) { 217 + netdev_err(netdev, 218 + "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 219 + return ret; 220 + } 221 + 222 + return 0; 223 + } 224 + 199 225 static int hns3_nic_net_up(struct net_device *netdev) 200 226 { 201 227 struct hns3_nic_priv *priv = netdev_priv(netdev); ··· 258 232 259 233 static int hns3_nic_net_open(struct net_device *netdev) 260 234 { 261 - struct hns3_nic_priv *priv = netdev_priv(netdev); 262 - struct hnae3_handle *h = priv->ae_handle; 263 235 int ret; 264 236 265 237 netif_carrier_off(netdev); 266 238 267 - ret = netif_set_real_num_tx_queues(netdev, h->kinfo.num_tqps); 268 - if (ret) { 269 - netdev_err(netdev, 270 - "netif_set_real_num_tx_queues fail, ret=%d!\n", 271 - ret); 239 + ret = hns3_nic_set_real_num_queue(netdev); 240 + if (ret) 272 241 return ret; 273 - } 274 - 275 - ret = netif_set_real_num_rx_queues(netdev, h->kinfo.num_tqps); 276 - if (ret) { 277 - netdev_err(netdev, 278 - "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); 279 - return ret; 280 - } 281 242 282 243 ret = hns3_nic_net_up(netdev); 283 244 if (ret) { ··· 2803 2790 goto out_reg_netdev_fail; 2804 2791 } 2805 2792 2793 + hns3_dcbnl_setup(handle); 2794 + 2806 2795 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ 2807 2796 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 2808 2797 ··· 2861 2846 } 2862 2847 } 2863 2848 2849 + static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) 2850 + { 2851 + struct hnae3_knic_private_info *kinfo = &handle->kinfo; 2852 + struct net_device *ndev = kinfo->netdev; 2853 + bool if_running = netif_running(ndev); 2854 + int ret; 2855 + u8 i; 2856 + 2857 + if (tc > HNAE3_MAX_TC) 2858 + return -EINVAL; 2859 + 2860 + if (!ndev) 2861 + return -ENODEV; 2862 + 2863 + ret = netdev_set_num_tc(ndev, tc); 2864 + if (ret) 2865 + return ret; 2866 + 2867 + if (if_running) { 2868 + (void)hns3_nic_net_stop(ndev); 2869 + msleep(100); 2870 + } 2871 + 2872 + ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? 2873 + kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; 2874 + if (ret) 2875 + goto err_out; 2876 + 2877 + if (tc <= 1) { 2878 + netdev_reset_tc(ndev); 2879 + goto out; 2880 + } 2881 + 2882 + for (i = 0; i < HNAE3_MAX_TC; i++) { 2883 + struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; 2884 + 2885 + if (tc_info->enable) 2886 + netdev_set_tc_queue(ndev, 2887 + tc_info->tc, 2888 + tc_info->tqp_count, 2889 + tc_info->tqp_offset); 2890 + } 2891 + 2892 + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 2893 + netdev_set_prio_tc_map(ndev, i, 2894 + kinfo->prio_tc[i]); 2895 + } 2896 + 2897 + out: 2898 + ret = hns3_nic_set_real_num_queue(ndev); 2899 + 2900 + err_out: 2901 + if (if_running) 2902 + (void)hns3_nic_net_open(ndev); 2903 + 2904 + return ret; 2905 + } 2906 + 2864 2907 const struct hnae3_client_ops client_ops = { 2865 2908 .init_instance = hns3_client_init, 2866 2909 .uninit_instance = hns3_client_uninit, 2867 2910 .link_status_change = hns3_link_status_change, 2911 + .setup_tc = hns3_client_setup_tc, 2868 2912 }; 2869 2913 2870 2914 /* hns3_init_module - Driver registration routine
+7
drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
··· 590 590 void hns3_ethtool_set_ops(struct net_device *netdev); 591 591 592 592 int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); 593 + 594 + #ifdef CONFIG_HNS3_DCB 595 + void hns3_dcbnl_setup(struct hnae3_handle *handle); 596 + #else 597 + static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {} 598 + #endif 599 + 593 600 #endif