Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-hns3-updates-for-next'

Huazhong Tan says:

====================
net: hns3: updates for -next

There are several updates relating to the interrupt coalesce for
the HNS3 ethernet driver.

based on the frame quantity).
a fixed value in code.
based on the gap time).
its new usage.

change log:
V4 - remove #5~#10 from this series, which needs more discussion.
V3 - fix a typo error in #1 reported by Jakub Kicinski.
rewrite #9 commit log.
remove #11 from this series.
V2 - reorder #2 & #3 to fix compiler error.
fix some checkpatch warnings in #10 & #11.

previous version:
V3: https://patchwork.ozlabs.org/project/netdev/cover/1605151998-12633-1-git-send-email-tanhuazhong@huawei.com/
V2: https://patchwork.ozlabs.org/project/netdev/cover/1604892159-19990-1-git-send-email-tanhuazhong@huawei.com/
V1: https://patchwork.ozlabs.org/project/netdev/cover/1604730681-32559-1-git-send-email-tanhuazhong@huawei.com/
====================

Link: https://lore.kernel.org/r/1605514854-11205-1-git-send-email-tanhuazhong@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+182 -37
+1
drivers/net/ethernet/hisilicon/hns3/hnae3.h
··· 278 278 u16 rss_ind_tbl_size; 279 279 u16 rss_key_size; 280 280 u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */ 281 + u16 max_int_gl; /* max value of interrupt coalesce based on INT_GL */ 281 282 u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */ 282 283 }; 283 284
+1
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
··· 349 349 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); 350 350 dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); 351 351 dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max); 352 + dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl); 352 353 } 353 354 354 355 static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
+75 -24
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 211 211 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing 212 212 */ 213 213 214 - if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && 215 - !tqp_vector->rx_group.coal.gl_adapt_enable) 214 + if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && 215 + !tqp_vector->rx_group.coal.adapt_enable) 216 216 /* According to the hardware, the range of rl_reg is 217 217 * 0-59 and the unit is 4. 218 218 */ ··· 224 224 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, 225 225 u32 gl_value) 226 226 { 227 - u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value); 227 + u32 new_val; 228 228 229 - writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 229 + if (tqp_vector->rx_group.coal.unit_1us) 230 + new_val = gl_value | HNS3_INT_GL_1US; 231 + else 232 + new_val = hns3_gl_usec_to_reg(gl_value); 233 + 234 + writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); 230 235 } 231 236 232 237 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, 233 238 u32 gl_value) 234 239 { 235 - u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value); 240 + u32 new_val; 236 241 237 - writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 242 + if (tqp_vector->tx_group.coal.unit_1us) 243 + new_val = gl_value | HNS3_INT_GL_1US; 244 + else 245 + new_val = hns3_gl_usec_to_reg(gl_value); 246 + 247 + writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); 238 248 } 239 249 240 - static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, 241 - struct hns3_nic_priv *priv) 250 + void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, 251 + u32 ql_value) 242 252 { 253 + writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); 254 + } 255 + 256 + void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, 257 + u32 ql_value) 258 + { 259 + writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); 260 + } 261 + 262 + static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector, 263 + struct hns3_nic_priv *priv) 264 + { 265 + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); 266 + struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; 267 + struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; 268 + 243 269 /* initialize the configuration for interrupt coalescing. 244 270 * 1. GL (Interrupt Gap Limiter) 245 271 * 2. RL (Interrupt Rate Limiter) 272 + * 3. QL (Interrupt Quantity Limiter) 246 273 * 247 274 * Default: enable interrupt coalescing self-adaptive and GL 248 275 */ 249 - tqp_vector->tx_group.coal.gl_adapt_enable = 1; 250 - tqp_vector->rx_group.coal.gl_adapt_enable = 1; 276 + tx_coal->adapt_enable = 1; 277 + rx_coal->adapt_enable = 1; 251 278 252 - tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; 253 - tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; 279 + tx_coal->int_gl = HNS3_INT_GL_50K; 280 + rx_coal->int_gl = HNS3_INT_GL_50K; 254 281 255 - tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; 256 - tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; 282 + rx_coal->flow_level = HNS3_FLOW_LOW; 283 + tx_coal->flow_level = HNS3_FLOW_LOW; 284 + 285 + /* device version above V3(include V3), GL can configure 1us 286 + * unit, so uses 1us unit. 287 + */ 288 + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { 289 + tx_coal->unit_1us = 1; 290 + rx_coal->unit_1us = 1; 291 + } 292 + 293 + if (ae_dev->dev_specs.int_ql_max) { 294 + tx_coal->ql_enable = 1; 295 + rx_coal->ql_enable = 1; 296 + tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; 297 + rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; 298 + tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; 299 + rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; 300 + } 257 301 } 258 302 259 - static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 260 - struct hns3_nic_priv *priv) 303 + static void 304 + hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector, 305 + struct hns3_nic_priv *priv) 261 306 { 307 + struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; 308 + struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; 262 309 struct hnae3_handle *h = priv->ae_handle; 263 310 264 - hns3_set_vector_coalesce_tx_gl(tqp_vector, 265 - tqp_vector->tx_group.coal.int_gl); 266 - hns3_set_vector_coalesce_rx_gl(tqp_vector, 267 - tqp_vector->rx_group.coal.int_gl); 311 + hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); 312 + hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); 268 313 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); 314 + 315 + if (tx_coal->ql_enable) 316 + hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); 317 + 318 + if (rx_coal->ql_enable) 319 + hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); 269 320 } 270 321 271 322 static int hns3_nic_set_real_num_queue(struct net_device *netdev) ··· 3384 3333 tqp_vector->last_jiffies + msecs_to_jiffies(1000))) 3385 3334 return; 3386 3335 3387 - if (rx_group->coal.gl_adapt_enable) { 3336 + if (rx_group->coal.adapt_enable) { 3388 3337 rx_update = hns3_get_new_int_gl(rx_group); 3389 3338 if (rx_update) 3390 3339 hns3_set_vector_coalesce_rx_gl(tqp_vector, 3391 3340 rx_group->coal.int_gl); 3392 3341 } 3393 3342 3394 - if (tx_group->coal.gl_adapt_enable) { 3343 + if (tx_group->coal.adapt_enable) { 3395 3344 tx_update = hns3_get_new_int_gl(tx_group); 3396 3345 if (tx_update) 3397 3346 hns3_set_vector_coalesce_tx_gl(tqp_vector, ··· 3587 3536 3588 3537 for (i = 0; i < priv->vector_num; i++) { 3589 3538 tqp_vector = &priv->tqp_vector[i]; 3590 - hns3_vector_gl_rl_init_hw(tqp_vector, priv); 3539 + hns3_vector_coalesce_init_hw(tqp_vector, priv); 3591 3540 tqp_vector->num_tqps = 0; 3592 3541 } 3593 3542 ··· 3683 3632 tqp_vector->idx = i; 3684 3633 tqp_vector->mask_addr = vector[i].io_addr; 3685 3634 tqp_vector->vector_irq = vector[i].vector; 3686 - hns3_vector_gl_rl_init(tqp_vector, priv); 3635 + hns3_vector_coalesce_init(tqp_vector, priv); 3687 3636 } 3688 3637 3689 3638 out:
+15 -2
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
··· 181 181 #define HNS3_VECTOR_GL2_OFFSET 0x300 182 182 #define HNS3_VECTOR_RL_OFFSET 0x900 183 183 #define HNS3_VECTOR_RL_EN_B 6 184 + #define HNS3_VECTOR_TX_QL_OFFSET 0xe00 185 + #define HNS3_VECTOR_RX_QL_OFFSET 0xf00 184 186 185 187 #define HNS3_RING_EN_B 0 186 188 ··· 420 418 HNS3_FLOW_ULTRA = 3, 421 419 }; 422 420 423 - #define HNS3_INT_GL_MAX 0x1FE0 424 421 #define HNS3_INT_GL_50K 0x0014 425 422 #define HNS3_INT_GL_20K 0x0032 426 423 #define HNS3_INT_GL_18K 0x0036 427 424 #define HNS3_INT_GL_8K 0x007C 428 425 426 + #define HNS3_INT_GL_1US BIT(31) 427 + 429 428 #define HNS3_INT_RL_MAX 0x00EC 430 429 #define HNS3_INT_RL_ENABLE_MASK 0x40 431 430 431 + #define HNS3_INT_QL_DEFAULT_CFG 0x20 432 + 432 433 struct hns3_enet_coalesce { 433 434 u16 int_gl; 434 - u8 gl_adapt_enable; 435 + u16 int_ql; 436 + u16 int_ql_max; 437 + u8 adapt_enable:1; 438 + u8 ql_enable:1; 439 + u8 unit_1us:1; 435 440 enum hns3_flow_level_range flow_level; 436 441 }; 437 442 ··· 604 595 u32 gl_value); 605 596 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, 606 597 u32 rl_value); 598 + void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, 599 + u32 ql_value); 600 + void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, 601 + u32 ql_value); 607 602 608 603 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable); 609 604 void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
+60 -11
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
··· 1105 1105 rx_vector = priv->ring[queue_num + queue].tqp_vector; 1106 1106 1107 1107 cmd->use_adaptive_tx_coalesce = 1108 - tx_vector->tx_group.coal.gl_adapt_enable; 1108 + tx_vector->tx_group.coal.adapt_enable; 1109 1109 cmd->use_adaptive_rx_coalesce = 1110 - rx_vector->rx_group.coal.gl_adapt_enable; 1110 + rx_vector->rx_group.coal.adapt_enable; 1111 1111 1112 1112 cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl; 1113 1113 cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl; 1114 1114 1115 1115 cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; 1116 1116 cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; 1117 + 1118 + cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql; 1119 + cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql; 1117 1120 1118 1121 return 0; 1119 1122 } ··· 1130 1127 static int hns3_check_gl_coalesce_para(struct net_device *netdev, 1131 1128 struct ethtool_coalesce *cmd) 1132 1129 { 1130 + struct hnae3_handle *handle = hns3_get_handle(netdev); 1131 + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 1133 1132 u32 rx_gl, tx_gl; 1134 1133 1135 - if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) { 1134 + if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) { 1136 1135 netdev_err(netdev, 1137 - "Invalid rx-usecs value, rx-usecs range is 0-%d\n", 1138 - HNS3_INT_GL_MAX); 1136 + "invalid rx-usecs value, rx-usecs range is 0-%u\n", 1137 + ae_dev->dev_specs.max_int_gl); 1139 1138 return -EINVAL; 1140 1139 } 1141 1140 1142 - if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) { 1141 + if (cmd->tx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) { 1143 1142 netdev_err(netdev, 1144 - "Invalid tx-usecs value, tx-usecs range is 0-%d\n", 1145 - HNS3_INT_GL_MAX); 1143 + "invalid tx-usecs value, tx-usecs range is 0-%u\n", 1144 + ae_dev->dev_specs.max_int_gl); 1146 1145 return -EINVAL; 1147 1146 } 1147 + 1148 + /* device version above V3(include V3), GL uses 1us unit, 1149 + * so the round down is not needed. 1150 + */ 1151 + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 1152 + return 0; 1148 1153 1149 1154 rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs); 1150 1155 if (rx_gl != cmd->rx_coalesce_usecs) { ··· 1199 1188 return 0; 1200 1189 } 1201 1190 1191 + static int hns3_check_ql_coalesce_param(struct net_device *netdev, 1192 + struct ethtool_coalesce *cmd) 1193 + { 1194 + struct hnae3_handle *handle = hns3_get_handle(netdev); 1195 + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 1196 + 1197 + if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) && 1198 + !ae_dev->dev_specs.int_ql_max) { 1199 + netdev_err(netdev, "coalesced frames is not supported\n"); 1200 + return -EOPNOTSUPP; 1201 + } 1202 + 1203 + if (cmd->tx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max || 1204 + cmd->rx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max) { 1205 + netdev_err(netdev, 1206 + "invalid coalesced_frames value, range is 0-%u\n", 1207 + ae_dev->dev_specs.int_ql_max); 1208 + return -ERANGE; 1209 + } 1210 + 1211 + return 0; 1212 + } 1213 + 1202 1214 static int hns3_check_coalesce_para(struct net_device *netdev, 1203 1215 struct ethtool_coalesce *cmd) 1204 1216 { ··· 1240 1206 "Check rl coalesce param fail. ret = %d\n", ret); 1241 1207 return ret; 1242 1208 } 1209 + 1210 + ret = hns3_check_ql_coalesce_param(netdev, cmd); 1211 + if (ret) 1212 + return ret; 1243 1213 1244 1214 if (cmd->use_adaptive_tx_coalesce == 1 || 1245 1215 cmd->use_adaptive_rx_coalesce == 1) { ··· 1268 1230 tx_vector = priv->ring[queue].tqp_vector; 1269 1231 rx_vector = priv->ring[queue_num + queue].tqp_vector; 1270 1232 1271 - tx_vector->tx_group.coal.gl_adapt_enable = 1233 + tx_vector->tx_group.coal.adapt_enable = 1272 1234 cmd->use_adaptive_tx_coalesce; 1273 - rx_vector->rx_group.coal.gl_adapt_enable = 1235 + rx_vector->rx_group.coal.adapt_enable = 1274 1236 cmd->use_adaptive_rx_coalesce; 1275 1237 1276 1238 tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs; 1277 1239 rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs; 1240 + 1241 + tx_vector->tx_group.coal.int_ql = cmd->tx_max_coalesced_frames; 1242 + rx_vector->rx_group.coal.int_ql = cmd->rx_max_coalesced_frames; 1278 1243 1279 1244 hns3_set_vector_coalesce_tx_gl(tx_vector, 1280 1245 tx_vector->tx_group.coal.int_gl); ··· 1286 1245 1287 1246 hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); 1288 1247 hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); 1248 + 1249 + if (tx_vector->tx_group.coal.ql_enable) 1250 + hns3_set_vector_coalesce_tx_ql(tx_vector, 1251 + tx_vector->tx_group.coal.int_ql); 1252 + if (rx_vector->rx_group.coal.ql_enable) 1253 + hns3_set_vector_coalesce_rx_ql(rx_vector, 1254 + rx_vector->rx_group.coal.int_ql); 1289 1255 } 1290 1256 1291 1257 static int hns3_set_coalesce(struct net_device *netdev, ··· 1519 1471 #define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \ 1520 1472 ETHTOOL_COALESCE_USE_ADAPTIVE | \ 1521 1473 ETHTOOL_COALESCE_RX_USECS_HIGH | \ 1522 - ETHTOOL_COALESCE_TX_USECS_HIGH) 1474 + ETHTOOL_COALESCE_TX_USECS_HIGH | \ 1475 + ETHTOOL_COALESCE_MAX_FRAMES) 1523 1476 1524 1477 static const struct ethtool_ops hns3vf_ethtool_ops = { 1525 1478 .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+8
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
··· 1103 1103 __le32 max_tm_rate; 1104 1104 }; 1105 1105 1106 + #define HCLGE_DEF_MAX_INT_GL 0x1FE0U 1107 + 1108 + struct hclge_dev_specs_1_cmd { 1109 + __le32 rsv0; 1110 + __le16 max_int_gl; 1111 + u8 rsv1[18]; 1112 + }; 1113 + 1106 1114 int hclge_cmd_init(struct hclge_dev *hdev); 1107 1115 static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) 1108 1116 {
+7
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 1366 1366 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; 1367 1367 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE; 1368 1368 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; 1369 + ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; 1369 1370 } 1370 1371 1371 1372 static void hclge_parse_dev_specs(struct hclge_dev *hdev, ··· 1374 1373 { 1375 1374 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1376 1375 struct hclge_dev_specs_0_cmd *req0; 1376 + struct hclge_dev_specs_1_cmd *req1; 1377 1377 1378 1378 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data; 1379 + req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data; 1379 1380 1380 1381 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 1381 1382 ae_dev->dev_specs.rss_ind_tbl_size = 1382 1383 le16_to_cpu(req0->rss_ind_tbl_size); 1384 + ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 1383 1385 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 1384 1386 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); 1387 + ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 1385 1388 } 1386 1389 1387 1390 static void hclge_check_dev_specs(struct hclge_dev *hdev) ··· 1400 1395 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE; 1401 1396 if (!dev_specs->max_tm_rate) 1402 1397 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; 1398 + if (!dev_specs->max_int_gl) 1399 + dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; 1403 1400 } 1404 1401 1405 1402 static int hclge_query_dev_specs(struct hclge_dev *hdev)
+8
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
··· 285 285 u8 rsv1[5]; 286 286 }; 287 287 288 + #define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U 289 + 290 + struct hclgevf_dev_specs_1_cmd { 291 + __le32 rsv0; 292 + __le16 max_int_gl; 293 + u8 rsv1[18]; 294 + }; 295 + 288 296 static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) 289 297 { 290 298 writel(value, base + reg);
+7
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
··· 2991 2991 HCLGEVF_MAX_NON_TSO_BD_NUM; 2992 2992 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 2993 2993 ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; 2994 + ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 2994 2995 } 2995 2996 2996 2997 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, ··· 2999 2998 { 3000 2999 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3001 3000 struct hclgevf_dev_specs_0_cmd *req0; 3001 + struct hclgevf_dev_specs_1_cmd *req1; 3002 3002 3003 3003 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 3004 + req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 3004 3005 3005 3006 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 3006 3007 ae_dev->dev_specs.rss_ind_tbl_size = 3007 3008 le16_to_cpu(req0->rss_ind_tbl_size); 3009 + ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 3008 3010 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 3011 + ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 3009 3012 } 3010 3013 3011 3014 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) ··· 3022 3017 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 3023 3018 if (!dev_specs->rss_key_size) 3024 3019 dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; 3020 + if (!dev_specs->max_int_gl) 3021 + dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 3025 3022 } 3026 3023 3027 3024 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)