Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-08-17 (ice)

This series contains updates to ice driver only.

Jan removes unused functions and refactors code to make, possible,
functions static.

Jake rearranges some functions to be logically grouped.

Marcin removes an unnecessary call to disable VLAN stripping.

Yang Yingliang utilizes list_for_each_entry() helper for a couple list
traversals.

Przemek removes some parameters from ice_aq_alloc_free_res() which were
always the same and reworks ice_aq_wait_for_event() to reduce chance of
race.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
ice: split ice_aq_wait_for_event() func into two
ice: embed &ice_rq_event_info event into struct ice_aq_task
ice: ice_aq_check_events: fix off-by-one check when filling buffer
ice: drop two params from ice_aq_alloc_free_res()
ice: use list_for_each_entry() helper
ice: Remove redundant VSI configuration in eswitch setup
ice: move E810T functions to before device agnostic ones
ice: refactor ice_vsi_is_vlan_pruning_ena
ice: refactor ice_ptp_hw to make functions static
ice: refactor ice_sched to make functions static
ice: Utilize assign_bit() helper
ice: refactor ice_vf_lib to make functions static
ice: refactor ice_lib to make functions static
ice: refactor ice_ddp to make functions static
ice: remove unused methods
====================

Link: https://lore.kernel.org/r/20230817212239.2601543-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+637 -748
+19 -2
drivers/net/ethernet/intel/ice/ice.h
··· 917 917 void ice_fdir_replay_flows(struct ice_hw *hw); 918 918 void ice_fdir_replay_fltrs(struct ice_pf *pf); 919 919 int ice_fdir_create_dflt_rules(struct ice_pf *pf); 920 - int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, 921 - struct ice_rq_event_info *event); 920 + 921 + enum ice_aq_task_state { 922 + ICE_AQ_TASK_NOT_PREPARED, 923 + ICE_AQ_TASK_WAITING, 924 + ICE_AQ_TASK_COMPLETE, 925 + ICE_AQ_TASK_CANCELED, 926 + }; 927 + 928 + struct ice_aq_task { 929 + struct hlist_node entry; 930 + struct ice_rq_event_info event; 931 + enum ice_aq_task_state state; 932 + u16 opcode; 933 + }; 934 + 935 + void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, 936 + u16 opcode); 937 + int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, 938 + unsigned long timeout); 922 939 int ice_open(struct net_device *netdev); 923 940 int ice_open_internal(struct net_device *netdev); 924 941 int ice_stop(struct net_device *netdev);
+8 -16
drivers/net/ethernet/intel/ice/ice_common.c
··· 2000 2000 /** 2001 2001 * ice_aq_alloc_free_res - command to allocate/free resources 2002 2002 * @hw: pointer to the HW struct 2003 - * @num_entries: number of resource entries in buffer 2004 2003 * @buf: Indirect buffer to hold data parameters and response 2005 2004 * @buf_size: size of buffer for indirect commands 2006 2005 * @opc: pass in the command opcode 2007 - * @cd: pointer to command details structure or NULL 2008 2006 * 2009 2007 * Helper function to allocate/free resources using the admin queue commands 2010 2008 */ 2011 - int 2012 - ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 2013 - struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 2014 - enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2009 + int ice_aq_alloc_free_res(struct ice_hw *hw, 2010 + struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 2011 + enum ice_adminq_opc opc) 2015 2012 { 2016 2013 struct ice_aqc_alloc_free_res_cmd *cmd; 2017 2014 struct ice_aq_desc desc; 2018 2015 2019 2016 cmd = &desc.params.sw_res_ctrl; 2020 2017 2021 - if (!buf) 2022 - return -EINVAL; 2023 - 2024 - if (buf_size < flex_array_size(buf, elem, num_entries)) 2018 + if (!buf || buf_size < flex_array_size(buf, elem, 1)) 2025 2019 return -EINVAL; 2026 2020 2027 2021 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2028 2022 2029 2023 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2030 2024 2031 - cmd->num_entries = cpu_to_le16(num_entries); 2025 + cmd->num_entries = cpu_to_le16(1); 2032 2026 2033 - return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2027 + return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 2034 2028 } 2035 2029 2036 2030 /** ··· 2054 2060 if (btm) 2055 2061 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2056 2062 2057 - status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 2058 - ice_aqc_opc_alloc_res, NULL); 2063 + status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 2059 2064 if (status) 2060 2065 goto ice_alloc_res_exit; 2061 2066 ··· 2088 2095 buf->res_type = cpu_to_le16(type); 2089 2096 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2090 2097 2091 - status = ice_aq_alloc_free_res(hw, num, buf, buf_len, 2092 - ice_aqc_opc_free_res, NULL); 2098 + status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2093 2099 if (status) 2094 2100 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2095 2101
+3 -4
drivers/net/ethernet/intel/ice/ice_common.h
··· 38 38 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res); 39 39 int 40 40 ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res); 41 - int 42 - ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 43 - struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 44 - enum ice_adminq_opc opc, struct ice_sq_cd *cd); 41 + int ice_aq_alloc_free_res(struct ice_hw *hw, 42 + struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 43 + enum ice_adminq_opc opc); 45 44 bool ice_is_sbq_supported(struct ice_hw *hw); 46 45 struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw); 47 46 int
+73 -71
drivers/net/ethernet/intel/ice/ice_ddp.c
··· 30 30 * Verifies various attributes of the package file, including length, format 31 31 * version, and the requirement of at least one segment. 32 32 */ 33 - enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) 33 + static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) 34 34 { 35 35 u32 seg_count; 36 36 u32 i; ··· 118 118 * 119 119 * This helper function validates a buffer's header. 120 120 */ 121 - struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) 121 + static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) 122 122 { 123 123 struct ice_buf_hdr *hdr; 124 124 u16 section_count; ··· 1153 1153 } 1154 1154 1155 1155 /** 1156 + * ice_aq_download_pkg 1157 + * @hw: pointer to the hardware structure 1158 + * @pkg_buf: the package buffer to transfer 1159 + * @buf_size: the size of the package buffer 1160 + * @last_buf: last buffer indicator 1161 + * @error_offset: returns error offset 1162 + * @error_info: returns error information 1163 + * @cd: pointer to command details structure or NULL 1164 + * 1165 + * Download Package (0x0C40) 1166 + */ 1167 + static int 1168 + ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1169 + u16 buf_size, bool last_buf, u32 *error_offset, 1170 + u32 *error_info, struct ice_sq_cd *cd) 1171 + { 1172 + struct ice_aqc_download_pkg *cmd; 1173 + struct ice_aq_desc desc; 1174 + int status; 1175 + 1176 + if (error_offset) 1177 + *error_offset = 0; 1178 + if (error_info) 1179 + *error_info = 0; 1180 + 1181 + cmd = &desc.params.download_pkg; 1182 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); 1183 + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1184 + 1185 + if (last_buf) 1186 + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 1187 + 1188 + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1189 + if (status == -EIO) { 1190 + /* Read error from buffer only when the FW returned an error */ 1191 + struct ice_aqc_download_pkg_resp *resp; 1192 + 1193 + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 1194 + if (error_offset) 1195 + *error_offset = le32_to_cpu(resp->error_offset); 1196 + if (error_info) 1197 + *error_info = le32_to_cpu(resp->error_info); 1198 + } 1199 + 1200 + return status; 1201 + } 1202 + 1203 + /** 1156 1204 * ice_dwnld_cfg_bufs 1157 1205 * @hw: pointer to the hardware structure 1158 1206 * @bufs: pointer to an array of buffers ··· 1342 1294 } 1343 1295 1344 1296 /** 1345 - * ice_aq_download_pkg 1346 - * @hw: pointer to the hardware structure 1347 - * @pkg_buf: the package buffer to transfer 1348 - * @buf_size: the size of the package buffer 1349 - * @last_buf: last buffer indicator 1350 - * @error_offset: returns error offset 1351 - * @error_info: returns error information 1352 - * @cd: pointer to command details structure or NULL 1353 - * 1354 - * Download Package (0x0C40) 1355 - */ 1356 - int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1357 - u16 buf_size, bool last_buf, u32 *error_offset, 1358 - u32 *error_info, struct ice_sq_cd *cd) 1359 - { 1360 - struct ice_aqc_download_pkg *cmd; 1361 - struct ice_aq_desc desc; 1362 - int status; 1363 - 1364 - if (error_offset) 1365 - *error_offset = 0; 1366 - if (error_info) 1367 - *error_info = 0; 1368 - 1369 - cmd = &desc.params.download_pkg; 1370 - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); 1371 - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1372 - 1373 - if (last_buf) 1374 - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; 1375 - 1376 - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1377 - if (status == -EIO) { 1378 - /* Read error from buffer only when the FW returned an error */ 1379 - struct ice_aqc_download_pkg_resp *resp; 1380 - 1381 - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; 1382 - if (error_offset) 1383 - *error_offset = le32_to_cpu(resp->error_offset); 1384 - if (error_info) 1385 - *error_info = le32_to_cpu(resp->error_info); 1386 - } 1387 - 1388 - return status; 1389 - } 1390 - 1391 - /** 1392 - * ice_aq_upload_section 1393 - * @hw: pointer to the hardware structure 1394 - * @pkg_buf: the package buffer which will receive the section 1395 - * @buf_size: the size of the package buffer 1396 - * @cd: pointer to command details structure or NULL 1397 - * 1398 - * Upload Section (0x0C41) 1399 - */ 1400 - int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1401 - u16 buf_size, struct ice_sq_cd *cd) 1402 - { 1403 - struct ice_aq_desc desc; 1404 - 1405 - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); 1406 - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1407 - 1408 - return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1409 - } 1410 - 1411 - /** 1412 1297 * ice_aq_update_pkg 1413 1298 * @hw: pointer to the hardware structure 1414 1299 * @pkg_buf: the package cmd buffer ··· 1386 1405 } 1387 1406 1388 1407 return status; 1408 + } 1409 + 1410 + /** 1411 + * ice_aq_upload_section 1412 + * @hw: pointer to the hardware structure 1413 + * @pkg_buf: the package buffer which will receive the section 1414 + * @buf_size: the size of the package buffer 1415 + * @cd: pointer to command details structure or NULL 1416 + * 1417 + * Upload Section (0x0C41) 1418 + */ 1419 + int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 1420 + u16 buf_size, struct ice_sq_cd *cd) 1421 + { 1422 + struct ice_aq_desc desc; 1423 + 1424 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); 1425 + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1426 + 1427 + return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); 1389 1428 } 1390 1429 1391 1430 /** ··· 1471 1470 * success it returns a pointer to the segment header, otherwise it will 1472 1471 * return NULL. 1473 1472 */ 1474 - struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, 1475 - struct ice_pkg_hdr *pkg_hdr) 1473 + static struct ice_generic_seg_hdr * 1474 + ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, 1475 + struct ice_pkg_hdr *pkg_hdr) 1476 1476 { 1477 1477 u32 i; 1478 1478
-10
drivers/net/ethernet/intel/ice/ice_ddp.h
··· 416 416 void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); 417 417 }; 418 418 419 - int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 420 - u16 buf_size, bool last_buf, u32 *error_offset, 421 - u32 *error_info, struct ice_sq_cd *cd); 422 419 int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, 423 420 u16 buf_size, struct ice_sq_cd *cd); 424 421 425 422 void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size); 426 423 427 - enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len); 428 - 429 424 struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw); 430 - 431 - struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, 432 - struct ice_pkg_hdr *pkg_hdr); 433 425 434 426 int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count); 435 427 int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); ··· 430 438 u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); 431 439 void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, 432 440 u32 sect_type); 433 - 434 - struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf); 435 441 436 442 #endif
-4
drivers/net/ethernet/intel/ice/ice_eswitch.c
··· 84 84 struct ice_vsi_vlan_ops *vlan_ops; 85 85 bool rule_added = false; 86 86 87 - vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi); 88 - if (vlan_ops->dis_stripping(ctrl_vsi)) 89 - return -ENODEV; 90 - 91 87 ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); 92 88 93 89 netif_addr_lock_bh(uplink_netdev);
+24 -21
drivers/net/ethernet/intel/ice/ice_fw_update.c
··· 293 293 { 294 294 u16 completion_module, completion_retval; 295 295 struct device *dev = ice_pf_to_dev(pf); 296 - struct ice_rq_event_info event; 296 + struct ice_aq_task task = {}; 297 297 struct ice_hw *hw = &pf->hw; 298 + struct ice_aq_desc *desc; 298 299 u32 completion_offset; 299 300 int err; 300 301 301 - memset(&event, 0, sizeof(event)); 302 - 303 302 dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n", 304 303 block_size, module, offset); 304 + 305 + ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write); 305 306 306 307 err = ice_aq_update_nvm(hw, module, offset, block_size, block, 307 308 last_cmd, 0, NULL); ··· 320 319 * is conservative and is intended to prevent failure to update when 321 320 * firmware is slow to respond. 322 321 */ 323 - err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15 * HZ, &event); 322 + err = ice_aq_wait_for_event(pf, &task, 15 * HZ); 324 323 if (err) { 325 324 dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n", 326 325 module, block_size, offset, err); ··· 328 327 return -EIO; 329 328 } 330 329 331 - completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid); 332 - completion_retval = le16_to_cpu(event.desc.retval); 330 + desc = &task.event.desc; 331 + completion_module = le16_to_cpu(desc->params.nvm.module_typeid); 332 + completion_retval = le16_to_cpu(desc->retval); 333 333 334 - completion_offset = le16_to_cpu(event.desc.params.nvm.offset_low); 335 - completion_offset |= event.desc.params.nvm.offset_high << 16; 334 + completion_offset = le16_to_cpu(desc->params.nvm.offset_low); 335 + completion_offset |= desc->params.nvm.offset_high << 16; 336 336 337 337 if (completion_module != module) { 338 338 dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n", ··· 365 363 */ 366 364 if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) { 367 365 if (hw->dev_caps.common_cap.pcie_reset_avoidance) { 368 - *reset_level = (event.desc.params.nvm.cmd_flags & 369 - ICE_AQC_NVM_RESET_LVL_M); 366 + *reset_level = desc->params.nvm.cmd_flags & 367 + ICE_AQC_NVM_RESET_LVL_M; 370 368 dev_dbg(dev, "Firmware reported required reset level as %u\n", 371 369 *reset_level); 372 370 } else { ··· 481 479 { 482 480 u16 completion_module, completion_retval; 483 481 struct device *dev = ice_pf_to_dev(pf); 484 - struct ice_rq_event_info event; 482 + struct ice_aq_task task = {}; 485 483 struct ice_hw *hw = &pf->hw; 484 + struct ice_aq_desc *desc; 486 485 struct devlink *devlink; 487 486 int err; 488 487 489 488 dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module); 490 489 491 - memset(&event, 0, sizeof(event)); 492 - 493 490 devlink = priv_to_devlink(pf); 494 491 495 492 devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT); 493 + 494 + ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_erase); 496 495 497 496 err = ice_aq_erase_nvm(hw, module, NULL); 498 497 if (err) { ··· 505 502 goto out_notify_devlink; 506 503 } 507 504 508 - err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, ICE_FW_ERASE_TIMEOUT * HZ, &event); 505 + err = ice_aq_wait_for_event(pf, &task, ICE_FW_ERASE_TIMEOUT * HZ); 509 506 if (err) { 510 507 dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n", 511 508 component, module, err); ··· 513 510 goto out_notify_devlink; 514 511 } 515 512 516 - completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid); 517 - completion_retval = le16_to_cpu(event.desc.retval); 513 + desc = &task.event.desc; 514 + completion_module = le16_to_cpu(desc->params.nvm.module_typeid); 515 + completion_retval = le16_to_cpu(desc->retval); 518 516 519 517 if (completion_module != module) { 520 518 dev_err(dev, "Unexpected module_typeid in erase completion for %s: got 0x%x, expected 0x%x\n", ··· 564 560 u8 *emp_reset_available, struct netlink_ext_ack *extack) 565 561 { 566 562 struct device *dev = ice_pf_to_dev(pf); 567 - struct ice_rq_event_info event; 563 + struct ice_aq_task task = {}; 568 564 struct ice_hw *hw = &pf->hw; 569 565 u16 completion_retval; 570 566 u8 response_flags; 571 567 int err; 572 568 573 - memset(&event, 0, sizeof(event)); 569 + ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write_activate); 574 570 575 571 err = ice_nvm_write_activate(hw, activate_flags, &response_flags); 576 572 if (err) { ··· 596 592 } 597 593 } 598 594 599 - err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30 * HZ, 600 - &event); 595 + err = ice_aq_wait_for_event(pf, &task, 30 * HZ); 601 596 if (err) { 602 597 dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n", 603 598 err); ··· 604 601 return err; 605 602 } 606 603 607 - completion_retval = le16_to_cpu(event.desc.retval); 604 + completion_retval = le16_to_cpu(task.event.desc.retval); 608 605 if (completion_retval) { 609 606 dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n", 610 607 ice_aq_str((enum ice_aq_err)completion_retval));
+6 -11
drivers/net/ethernet/intel/ice/ice_lag.c
··· 129 129 struct ice_lag_netdev_list *entry; 130 130 struct net_device *tmp_netdev; 131 131 struct ice_netdev_priv *np; 132 - struct list_head *tmp; 133 132 struct ice_hw *hw; 134 133 135 - list_for_each(tmp, lag->netdev_head) { 136 - entry = list_entry(tmp, struct ice_lag_netdev_list, node); 134 + list_for_each_entry(entry, lag->netdev_head, node) { 137 135 tmp_netdev = entry->netdev; 138 136 if (!tmp_netdev || !netif_is_ice(tmp_netdev)) 139 137 continue; ··· 983 985 /* if unlinnking need to free the shared resource */ 984 986 if (!link && local_lag->bond_swid) { 985 987 buf->elem[0].e.sw_resp = cpu_to_le16(local_lag->bond_swid); 986 - status = ice_aq_alloc_free_res(&local_lag->pf->hw, 1, buf, 987 - buf_len, ice_aqc_opc_free_res, 988 - NULL); 988 + status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf, 989 + buf_len, ice_aqc_opc_free_res); 989 990 if (status) 990 991 dev_err(ice_pf_to_dev(local_lag->pf), "Error freeing SWID during LAG unlink\n"); 991 992 local_lag->bond_swid = 0; ··· 1001 1004 cpu_to_le16(local_lag->pf->hw.port_info->sw_id); 1002 1005 } 1003 1006 1004 - status = ice_aq_alloc_free_res(&local_lag->pf->hw, 1, buf, buf_len, 1005 - ice_aqc_opc_alloc_res, NULL); 1007 + status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf, buf_len, 1008 + ice_aqc_opc_alloc_res); 1006 1009 if (status) 1007 1010 dev_err(ice_pf_to_dev(local_lag->pf), "Error subscribing to SWID 0x%04X\n", 1008 1011 local_lag->bond_swid); ··· 1532 1535 struct ice_lag_netdev_list *entry; 1533 1536 struct ice_netdev_priv *np; 1534 1537 struct net_device *netdev; 1535 - struct list_head *tmp; 1536 1538 struct ice_pf *pf; 1537 1539 1538 - list_for_each(tmp, lag->netdev_head) { 1539 - entry = list_entry(tmp, struct ice_lag_netdev_list, node); 1540 + list_for_each_entry(entry, lag->netdev_head, node) { 1540 1541 netdev = entry->netdev; 1541 1542 np = netdev_priv(netdev); 1542 1543 pf = np->vsi->back;
+32 -50
drivers/net/ethernet/intel/ice/ice_lib.c
··· 1228 1228 } 1229 1229 1230 1230 /** 1231 + * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not 1232 + * @vsi: VSI to check whether or not VLAN pruning is enabled. 1233 + * 1234 + * returns true if Rx VLAN pruning is enabled and false otherwise. 1235 + */ 1236 + static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) 1237 + { 1238 + return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 1239 + } 1240 + 1241 + /** 1231 1242 * ice_vsi_init - Create and initialize a VSI 1232 1243 * @vsi: the VSI being configured 1233 1244 * @vsi_flags: VSI configuration flags ··· 1696 1685 } 1697 1686 1698 1687 /** 1688 + * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length 1689 + * @vsi: VSI 1690 + */ 1691 + static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) 1692 + { 1693 + if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { 1694 + vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; 1695 + vsi->rx_buf_len = ICE_RXBUF_1664; 1696 + #if (PAGE_SIZE < 8192) 1697 + } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && 1698 + (vsi->netdev->mtu <= ETH_DATA_LEN)) { 1699 + vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; 1700 + vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; 1701 + #endif 1702 + } else { 1703 + vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; 1704 + vsi->rx_buf_len = ICE_RXBUF_3072; 1705 + } 1706 + } 1707 + 1708 + /** 1699 1709 * ice_pf_state_is_nominal - checks the PF for nominal state 1700 1710 * @pf: pointer to PF to check 1701 1711 * ··· 1788 1756 &prev_es->tx_errors, &cur_es->tx_errors); 1789 1757 1790 1758 vsi->stat_offsets_loaded = true; 1791 - } 1792 - 1793 - /** 1794 - * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length 1795 - * @vsi: VSI 1796 - */ 1797 - void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) 1798 - { 1799 - if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { 1800 - vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; 1801 - vsi->rx_buf_len = ICE_RXBUF_1664; 1802 - #if (PAGE_SIZE < 8192) 1803 - } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && 1804 - (vsi->netdev->mtu <= ETH_DATA_LEN)) { 1805 - vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; 1806 - vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; 1807 - #endif 1808 - } else { 1809 - vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; 1810 - vsi->rx_buf_len = ICE_RXBUF_3072; 1811 - } 1812 1759 } 1813 1760 1814 1761 /** ··· 2194 2183 } 2195 2184 2196 2185 return false; 2197 - } 2198 - 2199 - /** 2200 - * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not 2201 - * @vsi: VSI to check whether or not VLAN pruning is enabled. 2202 - * 2203 - * returns true if Rx VLAN pruning is enabled and false otherwise. 2204 - */ 2205 - bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) 2206 - { 2207 - if (!vsi) 2208 - return false; 2209 - 2210 - return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA); 2211 2186 } 2212 2187 2213 2188 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) ··· 2938 2941 2939 2942 ice_for_each_q_vector(vsi, i) 2940 2943 synchronize_irq(vsi->q_vectors[i]->irq.virq); 2941 - } 2942 - 2943 - /** 2944 - * ice_napi_del - Remove NAPI handler for the VSI 2945 - * @vsi: VSI for which NAPI handler is to be removed 2946 - */ 2947 - void ice_napi_del(struct ice_vsi *vsi) 2948 - { 2949 - int v_idx; 2950 - 2951 - if (!vsi->netdev) 2952 - return; 2953 - 2954 - ice_for_each_q_vector(vsi, v_idx) 2955 - netif_napi_del(&vsi->q_vectors[v_idx]->napi); 2956 2944 } 2957 2945 2958 2946 /**
-5
drivers/net/ethernet/intel/ice/ice_lib.h
··· 76 76 77 77 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); 78 78 79 - bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi); 80 - 81 79 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); 82 80 83 81 int ice_set_link(struct ice_vsi *vsi, bool ena); ··· 90 92 91 93 struct ice_vsi * 92 94 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); 93 - 94 - void ice_napi_del(struct ice_vsi *vsi); 95 95 96 96 int ice_vsi_release(struct ice_vsi *vsi); 97 97 ··· 126 130 127 131 void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes); 128 132 129 - void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); 130 133 void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); 131 134 void ice_write_itr(struct ice_ring_container *rc, u16 itr); 132 135 void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
+51 -50
drivers/net/ethernet/intel/ice/ice_main.c
··· 1250 1250 return status; 1251 1251 } 1252 1252 1253 - enum ice_aq_task_state { 1254 - ICE_AQ_TASK_WAITING = 0, 1255 - ICE_AQ_TASK_COMPLETE, 1256 - ICE_AQ_TASK_CANCELED, 1257 - }; 1258 - 1259 - struct ice_aq_task { 1260 - struct hlist_node entry; 1261 - 1262 - u16 opcode; 1263 - struct ice_rq_event_info *event; 1264 - enum ice_aq_task_state state; 1265 - }; 1266 - 1267 1253 /** 1268 - * ice_aq_wait_for_event - Wait for an AdminQ event from firmware 1254 + * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware 1269 1255 * @pf: pointer to the PF private structure 1256 + * @task: intermediate helper storage and identifier for waiting 1270 1257 * @opcode: the opcode to wait for 1271 - * @timeout: how long to wait, in jiffies 1272 - * @event: storage for the event info 1273 1258 * 1274 - * Waits for a specific AdminQ completion event on the ARQ for a given PF. The 1275 - * current thread will be put to sleep until the specified event occurs or 1276 - * until the given timeout is reached. 1259 + * Prepares to wait for a specific AdminQ completion event on the ARQ for 1260 + * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event(). 1277 1261 * 1278 - * To obtain only the descriptor contents, pass an event without an allocated 1262 + * Calls are separated to allow caller registering for event before sending 1263 + * the command, which mitigates a race between registering and FW responding. 1264 + * 1265 + * To obtain only the descriptor contents, pass an task->event with null 1279 1266 * msg_buf. If the complete data buffer is desired, allocate the 1280 - * event->msg_buf with enough space ahead of time. 1281 - * 1282 - * Returns: zero on success, or a negative error code on failure. 1267 + * task->event.msg_buf with enough space ahead of time. 1283 1268 */ 1284 - int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, 1285 - struct ice_rq_event_info *event) 1269 + void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, 1270 + u16 opcode) 1286 1271 { 1287 - struct device *dev = ice_pf_to_dev(pf); 1288 - struct ice_aq_task *task; 1289 - unsigned long start; 1290 - long ret; 1291 - int err; 1292 - 1293 - task = kzalloc(sizeof(*task), GFP_KERNEL); 1294 - if (!task) 1295 - return -ENOMEM; 1296 - 1297 1272 INIT_HLIST_NODE(&task->entry); 1298 1273 task->opcode = opcode; 1299 - task->event = event; 1300 1274 task->state = ICE_AQ_TASK_WAITING; 1301 1275 1302 1276 spin_lock_bh(&pf->aq_wait_lock); 1303 1277 hlist_add_head(&task->entry, &pf->aq_wait_list); 1304 1278 spin_unlock_bh(&pf->aq_wait_lock); 1279 + } 1305 1280 1306 - start = jiffies; 1281 + /** 1282 + * ice_aq_wait_for_event - Wait for an AdminQ event from firmware 1283 + * @pf: pointer to the PF private structure 1284 + * @task: ptr prepared by ice_aq_prep_for_event() 1285 + * @timeout: how long to wait, in jiffies 1286 + * 1287 + * Waits for a specific AdminQ completion event on the ARQ for a given PF. The 1288 + * current thread will be put to sleep until the specified event occurs or 1289 + * until the given timeout is reached. 1290 + * 1291 + * Returns: zero on success, or a negative error code on failure. 1292 + */ 1293 + int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, 1294 + unsigned long timeout) 1295 + { 1296 + enum ice_aq_task_state *state = &task->state; 1297 + struct device *dev = ice_pf_to_dev(pf); 1298 + unsigned long start = jiffies; 1299 + long ret; 1300 + int err; 1307 1301 1308 - ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, 1302 + ret = wait_event_interruptible_timeout(pf->aq_wait_queue, 1303 + *state != ICE_AQ_TASK_WAITING, 1309 1304 timeout); 1310 - switch (task->state) { 1305 + switch (*state) { 1306 + case ICE_AQ_TASK_NOT_PREPARED: 1307 + WARN(1, "call to %s without ice_aq_prep_for_event()", __func__); 1308 + err = -EINVAL; 1309 + break; 1311 1310 case ICE_AQ_TASK_WAITING: 1312 1311 err = ret < 0 ? ret : -ETIMEDOUT; 1313 1312 break; ··· 1317 1318 err = ret < 0 ? ret : 0; 1318 1319 break; 1319 1320 default: 1320 - WARN(1, "Unexpected AdminQ wait task state %u", task->state); 1321 + WARN(1, "Unexpected AdminQ wait task state %u", *state); 1321 1322 err = -EINVAL; 1322 1323 break; 1323 1324 } ··· 1325 1326 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", 1326 1327 jiffies_to_msecs(jiffies - start), 1327 1328 jiffies_to_msecs(timeout), 1328 - opcode); 1329 + task->opcode); 1329 1330 1330 1331 spin_lock_bh(&pf->aq_wait_lock); 1331 1332 hlist_del(&task->entry); 1332 1333 spin_unlock_bh(&pf->aq_wait_lock); 1333 - kfree(task); 1334 1334 1335 1335 return err; 1336 1336 } ··· 1355 1357 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, 1356 1358 struct ice_rq_event_info *event) 1357 1359 { 1360 + struct ice_rq_event_info *task_ev; 1358 1361 struct ice_aq_task *task; 1359 1362 bool found = false; 1360 1363 1361 1364 spin_lock_bh(&pf->aq_wait_lock); 1362 1365 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { 1363 - if (task->state || task->opcode != opcode) 1366 + if (task->state != ICE_AQ_TASK_WAITING) 1367 + continue; 1368 + if (task->opcode != opcode) 1364 1369 continue; 1365 1370 1366 - memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); 1367 - task->event->msg_len = event->msg_len; 1371 + task_ev = &task->event; 1372 + memcpy(&task_ev->desc, &event->desc, sizeof(event->desc)); 1373 + task_ev->msg_len = event->msg_len; 1368 1374 1369 1375 /* Only copy the data buffer if a destination was set */ 1370 - if (task->event->msg_buf && 1371 - task->event->buf_len > event->buf_len) { 1372 - memcpy(task->event->msg_buf, event->msg_buf, 1376 + if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) { 1377 + memcpy(task_ev->msg_buf, event->msg_buf, 1373 1378 event->buf_len); 1374 - task->event->buf_len = event->buf_len; 1379 + task_ev->buf_len = event->buf_len; 1375 1380 } 1376 1381 1377 1382 task->state = ICE_AQ_TASK_COMPLETE;
+182 -201
drivers/net/ethernet/intel/ice/ice_ptp_hw.c
··· 293 293 * 294 294 * Read a PHY register for the given port over the device sideband queue. 295 295 */ 296 - int 296 + static int 297 297 ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) 298 298 { 299 299 struct ice_sbq_msg_input msg = {0}; ··· 370 370 * 371 371 * Write a PHY register for the given port over the device sideband queue. 372 372 */ 373 - int 373 + static int 374 374 ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) 375 375 { 376 376 struct ice_sbq_msg_input msg = {0}; ··· 1079 1079 * 1080 1080 * Negative adjustments are supported using 2s complement arithmetic. 1081 1081 */ 1082 - int 1082 + static int 1083 1083 ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time) 1084 1084 { 1085 1085 u32 l_time, u_time; ··· 2869 2869 return 0; 2870 2870 } 2871 2871 2872 + /** 2873 + * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register 2874 + * @hw: pointer to the HW struct 2875 + * @port: the PHY port to read 2876 + * @tstamp_ready: contents of the Tx memory status register 2877 + * 2878 + * E810 devices do not use a Tx memory status register. Instead simply 2879 + * indicate that all timestamps are currently ready. 2880 + */ 2881 + static int 2882 + ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready) 2883 + { 2884 + *tstamp_ready = 0xFFFFFFFFFFFFFFFF; 2885 + return 0; 2886 + } 2887 + 2888 + /* E810T SMA functions 2889 + * 2890 + * The following functions operate specifically on E810T hardware and are used 2891 + * to access the extended GPIOs available. 2892 + */ 2893 + 2894 + /** 2895 + * ice_get_pca9575_handle 2896 + * @hw: pointer to the hw struct 2897 + * @pca9575_handle: GPIO controller's handle 2898 + * 2899 + * Find and return the GPIO controller's handle in the netlist. 2900 + * When found - the value will be cached in the hw structure and following calls 2901 + * will return cached value 2902 + */ 2903 + static int 2904 + ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) 2905 + { 2906 + struct ice_aqc_get_link_topo *cmd; 2907 + struct ice_aq_desc desc; 2908 + int status; 2909 + u8 idx; 2910 + 2911 + /* If handle was read previously return cached value */ 2912 + if (hw->io_expander_handle) { 2913 + *pca9575_handle = hw->io_expander_handle; 2914 + return 0; 2915 + } 2916 + 2917 + /* If handle was not detected read it from the netlist */ 2918 + cmd = &desc.params.get_link_topo; 2919 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 2920 + 2921 + /* Set node type to GPIO controller */ 2922 + cmd->addr.topo_params.node_type_ctx = 2923 + (ICE_AQC_LINK_TOPO_NODE_TYPE_M & 2924 + ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); 2925 + 2926 + #define SW_PCA9575_SFP_TOPO_IDX 2 2927 + #define SW_PCA9575_QSFP_TOPO_IDX 1 2928 + 2929 + /* Check if the SW IO expander controlling SMA exists in the netlist. */ 2930 + if (hw->device_id == ICE_DEV_ID_E810C_SFP) 2931 + idx = SW_PCA9575_SFP_TOPO_IDX; 2932 + else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) 2933 + idx = SW_PCA9575_QSFP_TOPO_IDX; 2934 + else 2935 + return -EOPNOTSUPP; 2936 + 2937 + cmd->addr.topo_params.index = idx; 2938 + 2939 + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2940 + if (status) 2941 + return -EOPNOTSUPP; 2942 + 2943 + /* Verify if we found the right IO expander type */ 2944 + if (desc.params.get_link_topo.node_part_num != 2945 + ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) 2946 + return -EOPNOTSUPP; 2947 + 2948 + /* If present save the handle and return it */ 2949 + hw->io_expander_handle = 2950 + le16_to_cpu(desc.params.get_link_topo.addr.handle); 2951 + *pca9575_handle = hw->io_expander_handle; 2952 + 2953 + return 0; 2954 + } 2955 + 2956 + /** 2957 + * ice_read_sma_ctrl_e810t 2958 + * @hw: pointer to the hw struct 2959 + * @data: pointer to data to be read from the GPIO controller 2960 + * 2961 + * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the 2962 + * PCA9575 expander, so only bits 3-7 in data are valid. 2963 + */ 2964 + int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) 2965 + { 2966 + int status; 2967 + u16 handle; 2968 + u8 i; 2969 + 2970 + status = ice_get_pca9575_handle(hw, &handle); 2971 + if (status) 2972 + return status; 2973 + 2974 + *data = 0; 2975 + 2976 + for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { 2977 + bool pin; 2978 + 2979 + status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, 2980 + &pin, NULL); 2981 + if (status) 2982 + break; 2983 + *data |= (u8)(!pin) << i; 2984 + } 2985 + 2986 + return status; 2987 + } 2988 + 2989 + /** 2990 + * ice_write_sma_ctrl_e810t 2991 + * @hw: pointer to the hw struct 2992 + * @data: data to be written to the GPIO controller 2993 + * 2994 + * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1 2995 + * of the PCA9575 expander, so only bits 3-7 in data are valid. 2996 + */ 2997 + int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) 2998 + { 2999 + int status; 3000 + u16 handle; 3001 + u8 i; 3002 + 3003 + status = ice_get_pca9575_handle(hw, &handle); 3004 + if (status) 3005 + return status; 3006 + 3007 + for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { 3008 + bool pin; 3009 + 3010 + pin = !(data & (1 << i)); 3011 + status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, 3012 + pin, NULL); 3013 + if (status) 3014 + break; 3015 + } 3016 + 3017 + return status; 3018 + } 3019 + 3020 + /** 3021 + * ice_read_pca9575_reg_e810t 3022 + * @hw: pointer to the hw struct 3023 + * @offset: GPIO controller register offset 3024 + * @data: pointer to data to be read from the GPIO controller 3025 + * 3026 + * Read the register from the GPIO controller 3027 + */ 3028 + int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) 3029 + { 3030 + struct ice_aqc_link_topo_addr link_topo; 3031 + __le16 addr; 3032 + u16 handle; 3033 + int err; 3034 + 3035 + memset(&link_topo, 0, sizeof(link_topo)); 3036 + 3037 + err = ice_get_pca9575_handle(hw, &handle); 3038 + if (err) 3039 + return err; 3040 + 3041 + link_topo.handle = cpu_to_le16(handle); 3042 + link_topo.topo_params.node_type_ctx = 3043 + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, 3044 + ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); 3045 + 3046 + addr = cpu_to_le16((u16)offset); 3047 + 3048 + return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); 3049 + } 3050 + 2872 3051 /* Device agnostic functions 2873 3052 * 2874 3053 * The following functions implement shared behavior common to both E822 and ··· 3306 3127 return ice_clear_phy_tstamp_e810(hw, block, idx); 3307 3128 else 3308 3129 return ice_clear_phy_tstamp_e822(hw, block, idx); 3309 - } 3310 - 3311 - /** 3312 - * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register 3313 - * @hw: pointer to the HW struct 3314 - * @port: the PHY port to read 3315 - * @tstamp_ready: contents of the Tx memory status register 3316 - * 3317 - * E810 devices do not use a Tx memory status register. Instead simply 3318 - * indicate that all timestamps are currently ready. 3319 - */ 3320 - static int 3321 - ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready) 3322 - { 3323 - *tstamp_ready = 0xFFFFFFFFFFFFFFFF; 3324 - return 0; 3325 - } 3326 - 3327 - /* E810T SMA functions 3328 - * 3329 - * The following functions operate specifically on E810T hardware and are used 3330 - * to access the extended GPIOs available. 3331 - */ 3332 - 3333 - /** 3334 - * ice_get_pca9575_handle 3335 - * @hw: pointer to the hw struct 3336 - * @pca9575_handle: GPIO controller's handle 3337 - * 3338 - * Find and return the GPIO controller's handle in the netlist. 3339 - * When found - the value will be cached in the hw structure and following calls 3340 - * will return cached value 3341 - */ 3342 - static int 3343 - ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) 3344 - { 3345 - struct ice_aqc_get_link_topo *cmd; 3346 - struct ice_aq_desc desc; 3347 - int status; 3348 - u8 idx; 3349 - 3350 - /* If handle was read previously return cached value */ 3351 - if (hw->io_expander_handle) { 3352 - *pca9575_handle = hw->io_expander_handle; 3353 - return 0; 3354 - } 3355 - 3356 - /* If handle was not detected read it from the netlist */ 3357 - cmd = &desc.params.get_link_topo; 3358 - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 3359 - 3360 - /* Set node type to GPIO controller */ 3361 - cmd->addr.topo_params.node_type_ctx = 3362 - (ICE_AQC_LINK_TOPO_NODE_TYPE_M & 3363 - ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); 3364 - 3365 - #define SW_PCA9575_SFP_TOPO_IDX 2 3366 - #define SW_PCA9575_QSFP_TOPO_IDX 1 3367 - 3368 - /* Check if the SW IO expander controlling SMA exists in the netlist. */ 3369 - if (hw->device_id == ICE_DEV_ID_E810C_SFP) 3370 - idx = SW_PCA9575_SFP_TOPO_IDX; 3371 - else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) 3372 - idx = SW_PCA9575_QSFP_TOPO_IDX; 3373 - else 3374 - return -EOPNOTSUPP; 3375 - 3376 - cmd->addr.topo_params.index = idx; 3377 - 3378 - status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3379 - if (status) 3380 - return -EOPNOTSUPP; 3381 - 3382 - /* Verify if we found the right IO expander type */ 3383 - if (desc.params.get_link_topo.node_part_num != 3384 - ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) 3385 - return -EOPNOTSUPP; 3386 - 3387 - /* If present save the handle and return it */ 3388 - hw->io_expander_handle = 3389 - le16_to_cpu(desc.params.get_link_topo.addr.handle); 3390 - *pca9575_handle = hw->io_expander_handle; 3391 - 3392 - return 0; 3393 - } 3394 - 3395 - /** 3396 - * ice_read_sma_ctrl_e810t 3397 - * @hw: pointer to the hw struct 3398 - * @data: pointer to data to be read from the GPIO controller 3399 - * 3400 - * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the 3401 - * PCA9575 expander, so only bits 3-7 in data are valid. 3402 - */ 3403 - int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) 3404 - { 3405 - int status; 3406 - u16 handle; 3407 - u8 i; 3408 - 3409 - status = ice_get_pca9575_handle(hw, &handle); 3410 - if (status) 3411 - return status; 3412 - 3413 - *data = 0; 3414 - 3415 - for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { 3416 - bool pin; 3417 - 3418 - status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, 3419 - &pin, NULL); 3420 - if (status) 3421 - break; 3422 - *data |= (u8)(!pin) << i; 3423 - } 3424 - 3425 - return status; 3426 - } 3427 - 3428 - /** 3429 - * ice_write_sma_ctrl_e810t 3430 - * @hw: pointer to the hw struct 3431 - * @data: data to be written to the GPIO controller 3432 - * 3433 - * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1 3434 - * of the PCA9575 expander, so only bits 3-7 in data are valid. 3435 - */ 3436 - int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) 3437 - { 3438 - int status; 3439 - u16 handle; 3440 - u8 i; 3441 - 3442 - status = ice_get_pca9575_handle(hw, &handle); 3443 - if (status) 3444 - return status; 3445 - 3446 - for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { 3447 - bool pin; 3448 - 3449 - pin = !(data & (1 << i)); 3450 - status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, 3451 - pin, NULL); 3452 - if (status) 3453 - break; 3454 - } 3455 - 3456 - return status; 3457 - } 3458 - 3459 - /** 3460 - * ice_read_pca9575_reg_e810t 3461 - * @hw: pointer to the hw struct 3462 - * @offset: GPIO controller register offset 3463 - * @data: pointer to data to be read from the GPIO controller 3464 - * 3465 - * Read the register from the GPIO controller 3466 - */ 3467 - int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) 3468 - { 3469 - struct ice_aqc_link_topo_addr link_topo; 3470 - __le16 addr; 3471 - u16 handle; 3472 - int err; 3473 - 3474 - memset(&link_topo, 0, sizeof(link_topo)); 3475 - 3476 - err = ice_get_pca9575_handle(hw, &handle); 3477 - if (err) 3478 - return err; 3479 - 3480 - link_topo.handle = cpu_to_le16(handle); 3481 - link_topo.topo_params.node_type_ctx = 3482 - FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, 3483 - ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); 3484 - 3485 - addr = cpu_to_le16((u16)offset); 3486 - 3487 - return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); 3488 - } 3489 - 3490 - /** 3491 - * ice_is_pca9575_present 3492 - * @hw: pointer to the hw struct 3493 - * 3494 - * Check if the SW IO expander is present in the netlist 3495 - */ 3496 - bool ice_is_pca9575_present(struct ice_hw *hw) 3497 - { 3498 - u16 handle = 0; 3499 - int status; 3500 - 3501 - if (!ice_is_e810t(hw)) 3502 - return false; 3503 - 3504 - status = ice_get_pca9575_handle(hw, &handle); 3505 - 3506 - return !status && handle; 3507 3130 } 3508 3131 3509 3132 /**
-4
drivers/net/ethernet/intel/ice/ice_ptp_hw.h
··· 141 141 int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready); 142 142 143 143 /* E822 family functions */ 144 - int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val); 145 - int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val); 146 144 int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); 147 145 int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val); 148 - int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time); 149 146 void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad); 150 147 151 148 /** ··· 196 199 int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data); 197 200 int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data); 198 201 int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data); 199 - bool ice_is_pca9575_present(struct ice_hw *hw); 200 202 201 203 #define PFTSYN_SEM_BYTES 4 202 204
+1 -1
drivers/net/ethernet/intel/ice/ice_sched.c
··· 3971 3971 * This function sets BW limit of VSI or Aggregator scheduling node 3972 3972 * based on TC information from passed in argument BW. 3973 3973 */ 3974 - int 3974 + static int 3975 3975 ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, 3976 3976 enum ice_agg_type agg_type, u8 tc, 3977 3977 enum ice_rl_type rl_type, u32 bw)
-4
drivers/net/ethernet/intel/ice/ice_sched.h
··· 141 141 int 142 142 ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 143 143 enum ice_rl_type rl_type); 144 - int 145 - ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, 146 - enum ice_agg_type agg_type, u8 tc, 147 - enum ice_rl_type rl_type, u32 bw); 148 144 int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); 149 145 int 150 146 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
+7 -57
drivers/net/ethernet/intel/ice/ice_switch.c
··· 1847 1847 if (opc == ice_aqc_opc_free_res) 1848 1848 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); 1849 1849 1850 - status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); 1850 + status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc); 1851 1851 if (status) 1852 1852 goto ice_aq_alloc_free_vsi_list_exit; 1853 1853 ··· 2101 2101 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << 2102 2102 ICE_AQC_RES_TYPE_S) | 2103 2103 ICE_AQC_RES_TYPE_FLAG_SHARED); 2104 - status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, 2105 - ice_aqc_opc_alloc_res, NULL); 2104 + status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, 2105 + ice_aqc_opc_alloc_res); 2106 2106 if (!status) 2107 2107 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); 2108 2108 kfree(sw_buf); ··· 3409 3409 } 3410 3410 3411 3411 /** 3412 - * ice_mac_fltr_exist - does this MAC filter exist for given VSI 3413 - * @hw: pointer to the hardware structure 3414 - * @mac: MAC address to be checked (for MAC filter) 3415 - * @vsi_handle: check MAC filter for this VSI 3416 - */ 3417 - bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle) 3418 - { 3419 - struct ice_fltr_mgmt_list_entry *entry; 3420 - struct list_head *rule_head; 3421 - struct ice_switch_info *sw; 3422 - struct mutex *rule_lock; /* Lock to protect filter rule list */ 3423 - u16 hw_vsi_id; 3424 - 3425 - if (!ice_is_vsi_valid(hw, vsi_handle)) 3426 - return false; 3427 - 3428 - hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3429 - sw = hw->switch_info; 3430 - rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 3431 - if (!rule_head) 3432 - return false; 3433 - 3434 - rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 3435 - mutex_lock(rule_lock); 3436 - list_for_each_entry(entry, rule_head, list_entry) { 3437 - struct ice_fltr_info *f_info = &entry->fltr_info; 3438 - u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 3439 - 3440 - if (is_zero_ether_addr(mac_addr)) 3441 - continue; 3442 - 3443 - if (f_info->flag != ICE_FLTR_TX || 3444 - f_info->src_id != ICE_SRC_ID_VSI || 3445 - f_info->lkup_type != ICE_SW_LKUP_MAC || 3446 - f_info->fltr_act != ICE_FWD_TO_VSI || 3447 - hw_vsi_id != f_info->fwd_id.hw_vsi_id) 3448 - continue; 3449 - 3450 - if (ether_addr_equal(mac, mac_addr)) { 3451 - mutex_unlock(rule_lock); 3452 - return true; 3453 - } 3454 - } 3455 - mutex_unlock(rule_lock); 3456 - return false; 3457 - } 3458 - 3459 - /** 3460 3412 * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI 3461 3413 * @hw: pointer to the hardware structure 3462 3414 * @vlan_id: VLAN ID ··· 4448 4496 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 4449 4497 ICE_AQC_RES_TYPE_M) | alloc_shared); 4450 4498 4451 - status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 4452 - ice_aqc_opc_alloc_res, NULL); 4499 + status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 4453 4500 if (status) 4454 4501 goto exit; 4455 4502 ··· 4486 4535 ICE_AQC_RES_TYPE_M) | alloc_shared); 4487 4536 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); 4488 4537 4489 - status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 4490 - ice_aqc_opc_free_res, NULL); 4538 + status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 4491 4539 if (status) 4492 4540 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); 4493 4541 ··· 4528 4578 ~ICE_AQC_RES_TYPE_FLAG_SHARED); 4529 4579 4530 4580 buf->elem[0].e.sw_resp = cpu_to_le16(res_id); 4531 - status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 4532 - ice_aqc_opc_share_res, NULL); 4581 + status = ice_aq_alloc_free_res(hw, buf, buf_len, 4582 + ice_aqc_opc_share_res); 4533 4583 if (status) 4534 4584 ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n", 4535 4585 type, res_id, shared ? "SHARED" : "DEDICATED");
-1
drivers/net/ethernet/intel/ice/ice_switch.h
··· 371 371 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); 372 372 int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); 373 373 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); 374 - bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle); 375 374 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle); 376 375 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list); 377 376 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+231 -234
drivers/net/ethernet/intel/ice/ice_vf_lib.c
··· 323 323 } 324 324 325 325 /** 326 + * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN 327 + * @vf: VF to add MAC filters for 328 + * @vsi: Pointer to VSI 329 + * 330 + * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 331 + * always re-adds either a VLAN 0 or port VLAN based filter after reset. 332 + */ 333 + static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 334 + { 335 + struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 336 + struct device *dev = ice_pf_to_dev(vf->pf); 337 + int err; 338 + 339 + if (ice_vf_is_port_vlan_ena(vf)) { 340 + err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); 341 + if (err) { 342 + dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", 343 + vf->vf_id, err); 344 + return err; 345 + } 346 + 347 + err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); 348 + } else { 349 + err = ice_vsi_add_vlan_zero(vsi); 350 + } 351 + 352 + if (err) { 353 + dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", 354 + ice_vf_is_port_vlan_ena(vf) ? 355 + ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); 356 + return err; 357 + } 358 + 359 + err = vlan_ops->ena_rx_filtering(vsi); 360 + if (err) 361 + dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", 362 + vf->vf_id, vsi->idx, err); 363 + 364 + return 0; 365 + } 366 + 367 + /** 368 + * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration 369 + * @vf: VF to re-apply the configuration for 370 + * 371 + * Called after a VF VSI has been re-added/rebuild during reset. The PF driver 372 + * needs to re-apply the host configured Tx rate limiting configuration. 373 + */ 374 + static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) 375 + { 376 + struct device *dev = ice_pf_to_dev(vf->pf); 377 + struct ice_vsi *vsi = ice_get_vf_vsi(vf); 378 + int err; 379 + 380 + if (WARN_ON(!vsi)) 381 + return -EINVAL; 382 + 383 + if (vf->min_tx_rate) { 384 + err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); 385 + if (err) { 386 + dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", 387 + vf->min_tx_rate, vf->vf_id, err); 388 + return err; 389 + } 390 + } 391 + 392 + if (vf->max_tx_rate) { 393 + err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); 394 + if (err) { 395 + dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", 396 + vf->max_tx_rate, vf->vf_id, err); 397 + return err; 398 + } 399 + } 400 + 401 + return 0; 402 + } 403 + 404 + /** 405 + * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value 406 + * @vf: VF to configure trust setting for 407 + */ 408 + static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) 409 + { 410 + assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted); 411 + } 412 + 413 + /** 414 + * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA 415 + * @vf: VF to add MAC filters for 416 + * 417 + * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 418 + * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. 419 + */ 420 + static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) 421 + { 422 + struct device *dev = ice_pf_to_dev(vf->pf); 423 + struct ice_vsi *vsi = ice_get_vf_vsi(vf); 424 + u8 broadcast[ETH_ALEN]; 425 + int status; 426 + 427 + if (WARN_ON(!vsi)) 428 + return -EINVAL; 429 + 430 + if (ice_is_eswitch_mode_switchdev(vf->pf)) 431 + return 0; 432 + 433 + eth_broadcast_addr(broadcast); 434 + status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 435 + if (status) { 436 + dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", 437 + vf->vf_id, status); 438 + return status; 439 + } 440 + 441 + vf->num_mac++; 442 + 443 + if (is_valid_ether_addr(vf->hw_lan_addr)) { 444 + status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, 445 + ICE_FWD_TO_VSI); 446 + if (status) { 447 + dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", 448 + &vf->hw_lan_addr[0], vf->vf_id, 449 + status); 450 + return status; 451 + } 452 + vf->num_mac++; 453 + 454 + ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); 455 + } 456 + 457 + return 0; 458 + } 459 + 460 + /** 461 + * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config 462 + * @vsi: Pointer to VSI 463 + * 464 + * This function moves VSI into corresponding scheduler aggregator node 465 + * based on cached value of "aggregator node info" per VSI 466 + */ 467 + static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) 468 + { 469 + struct ice_pf *pf = vsi->back; 470 + struct device *dev; 471 + int status; 472 + 473 + if (!vsi->agg_node) 474 + return; 475 + 476 + dev = ice_pf_to_dev(pf); 477 + if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 478 + dev_dbg(dev, 479 + "agg_id %u already has reached max_num_vsis %u\n", 480 + vsi->agg_node->agg_id, vsi->agg_node->num_vsis); 481 + return; 482 + } 483 + 484 + status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, 485 + vsi->idx, vsi->tc_cfg.ena_tc); 486 + if (status) 487 + dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", 488 + vsi->idx, vsi->agg_node->agg_id); 489 + else 490 + vsi->agg_node->num_vsis++; 491 + } 492 + 493 + /** 494 + * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset 495 + * @vf: VF to rebuild host configuration on 496 + */ 497 + static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) 498 + { 499 + struct device *dev = ice_pf_to_dev(vf->pf); 500 + struct ice_vsi *vsi = ice_get_vf_vsi(vf); 501 + 502 + if (WARN_ON(!vsi)) 503 + return; 504 + 505 + ice_vf_set_host_trust_cfg(vf); 506 + 507 + if (ice_vf_rebuild_host_mac_cfg(vf)) 508 + dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", 509 + vf->vf_id); 510 + 511 + if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) 512 + dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", 513 + vf->vf_id); 514 + 515 + if (ice_vf_rebuild_host_tx_rate_cfg(vf)) 516 + dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", 517 + vf->vf_id); 518 + 519 + if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) 520 + dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", 521 + vf->vf_id); 522 + 523 + /* rebuild aggregator node config for main VF VSI */ 524 + ice_vf_rebuild_aggregator_node_cfg(vsi); 525 + } 526 + 527 + /** 528 + * ice_set_vf_state_qs_dis - Set VF queues state to disabled 529 + * @vf: pointer to the VF structure 530 + */ 531 + static void ice_set_vf_state_qs_dis(struct ice_vf *vf) 532 + { 533 + /* Clear Rx/Tx enabled queues flag */ 534 + bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); 535 + bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 536 + clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 537 + } 538 + 539 + /** 540 + * ice_vf_set_initialized - VF is ready for VIRTCHNL communication 541 + * @vf: VF to set in initialized state 542 + * 543 + * After this function the VF will be ready to receive/handle the 544 + * VIRTCHNL_OP_GET_VF_RESOURCES message 545 + */ 546 + static void ice_vf_set_initialized(struct ice_vf *vf) 547 + { 548 + ice_set_vf_state_qs_dis(vf); 549 + clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 550 + clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 551 + clear_bit(ICE_VF_STATE_DIS, vf->vf_states); 552 + set_bit(ICE_VF_STATE_INIT, vf->vf_states); 553 + memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); 554 + } 555 + 556 + /** 326 557 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild 327 558 * @vf: the VF being reset 328 559 * ··· 957 726 } 958 727 959 728 /** 960 - * ice_set_vf_state_qs_dis - Set VF queues state to disabled 961 - * @vf: pointer to the VF structure 962 - */ 963 - static void ice_set_vf_state_qs_dis(struct ice_vf *vf) 964 - { 965 - /* Clear Rx/Tx enabled queues flag */ 966 - bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); 967 - bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 968 - clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 969 - } 970 - 971 - /** 972 729 * ice_set_vf_state_dis - Set VF state to disabled 973 730 * @vf: pointer to the VF structure 974 731 */ ··· 1197 978 } 1198 979 1199 980 /** 1200 - * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value 1201 - * @vf: VF to configure trust setting for 1202 - */ 1203 - static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) 1204 - { 1205 - if (vf->trusted) 1206 - set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1207 - else 1208 - clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 1209 - } 1210 - 1211 - /** 1212 - * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA 1213 - * @vf: VF to add MAC filters for 1214 - * 1215 - * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 1216 - * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. 1217 - */ 1218 - static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) 1219 - { 1220 - struct device *dev = ice_pf_to_dev(vf->pf); 1221 - struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1222 - u8 broadcast[ETH_ALEN]; 1223 - int status; 1224 - 1225 - if (WARN_ON(!vsi)) 1226 - return -EINVAL; 1227 - 1228 - if (ice_is_eswitch_mode_switchdev(vf->pf)) 1229 - return 0; 1230 - 1231 - eth_broadcast_addr(broadcast); 1232 - status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); 1233 - if (status) { 1234 - dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", 1235 - vf->vf_id, status); 1236 - return status; 1237 - } 1238 - 1239 - vf->num_mac++; 1240 - 1241 - if (is_valid_ether_addr(vf->hw_lan_addr)) { 1242 - status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, 1243 - ICE_FWD_TO_VSI); 1244 - if (status) { 1245 - dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", 1246 - &vf->hw_lan_addr[0], vf->vf_id, 1247 - status); 1248 - return status; 1249 - } 1250 - vf->num_mac++; 1251 - 1252 - ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); 1253 - } 1254 - 1255 - return 0; 1256 - } 1257 - 1258 - /** 1259 - * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN 1260 - * @vf: VF to add MAC filters for 1261 - * @vsi: Pointer to VSI 1262 - * 1263 - * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver 1264 - * always re-adds either a VLAN 0 or port VLAN based filter after reset. 1265 - */ 1266 - static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) 1267 - { 1268 - struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1269 - struct device *dev = ice_pf_to_dev(vf->pf); 1270 - int err; 1271 - 1272 - if (ice_vf_is_port_vlan_ena(vf)) { 1273 - err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); 1274 - if (err) { 1275 - dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", 1276 - vf->vf_id, err); 1277 - return err; 1278 - } 1279 - 1280 - err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); 1281 - } else { 1282 - err = ice_vsi_add_vlan_zero(vsi); 1283 - } 1284 - 1285 - if (err) { 1286 - dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", 1287 - ice_vf_is_port_vlan_ena(vf) ? 1288 - ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); 1289 - return err; 1290 - } 1291 - 1292 - err = vlan_ops->ena_rx_filtering(vsi); 1293 - if (err) 1294 - dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", 1295 - vf->vf_id, vsi->idx, err); 1296 - 1297 - return 0; 1298 - } 1299 - 1300 - /** 1301 - * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration 1302 - * @vf: VF to re-apply the configuration for 1303 - * 1304 - * Called after a VF VSI has been re-added/rebuild during reset. The PF driver 1305 - * needs to re-apply the host configured Tx rate limiting configuration. 1306 - */ 1307 - static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) 1308 - { 1309 - struct device *dev = ice_pf_to_dev(vf->pf); 1310 - struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1311 - int err; 1312 - 1313 - if (WARN_ON(!vsi)) 1314 - return -EINVAL; 1315 - 1316 - if (vf->min_tx_rate) { 1317 - err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); 1318 - if (err) { 1319 - dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", 1320 - vf->min_tx_rate, vf->vf_id, err); 1321 - return err; 1322 - } 1323 - } 1324 - 1325 - if (vf->max_tx_rate) { 1326 - err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); 1327 - if (err) { 1328 - dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", 1329 - vf->max_tx_rate, vf->vf_id, err); 1330 - return err; 1331 - } 1332 - } 1333 - 1334 - return 0; 1335 - } 1336 - 1337 - /** 1338 - * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config 1339 - * @vsi: Pointer to VSI 1340 - * 1341 - * This function moves VSI into corresponding scheduler aggregator node 1342 - * based on cached value of "aggregator node info" per VSI 1343 - */ 1344 - static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) 1345 - { 1346 - struct ice_pf *pf = vsi->back; 1347 - struct device *dev; 1348 - int status; 1349 - 1350 - if (!vsi->agg_node) 1351 - return; 1352 - 1353 - dev = ice_pf_to_dev(pf); 1354 - if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { 1355 - dev_dbg(dev, 1356 - "agg_id %u already has reached max_num_vsis %u\n", 1357 - vsi->agg_node->agg_id, vsi->agg_node->num_vsis); 1358 - return; 1359 - } 1360 - 1361 - status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, 1362 - vsi->idx, vsi->tc_cfg.ena_tc); 1363 - if (status) 1364 - dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", 1365 - vsi->idx, vsi->agg_node->agg_id); 1366 - else 1367 - vsi->agg_node->num_vsis++; 1368 - } 1369 - 1370 - /** 1371 - * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset 1372 - * @vf: VF to rebuild host configuration on 1373 - */ 1374 - void ice_vf_rebuild_host_cfg(struct ice_vf *vf) 1375 - { 1376 - struct device *dev = ice_pf_to_dev(vf->pf); 1377 - struct ice_vsi *vsi = ice_get_vf_vsi(vf); 1378 - 1379 - if (WARN_ON(!vsi)) 1380 - return; 1381 - 1382 - ice_vf_set_host_trust_cfg(vf); 1383 - 1384 - if (ice_vf_rebuild_host_mac_cfg(vf)) 1385 - dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", 1386 - vf->vf_id); 1387 - 1388 - if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) 1389 - dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", 1390 - vf->vf_id); 1391 - 1392 - if (ice_vf_rebuild_host_tx_rate_cfg(vf)) 1393 - dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", 1394 - vf->vf_id); 1395 - 1396 - if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) 1397 - dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", 1398 - vf->vf_id); 1399 - 1400 - /* rebuild aggregator node config for main VF VSI */ 1401 - ice_vf_rebuild_aggregator_node_cfg(vsi); 1402 - } 1403 - 1404 - /** 1405 981 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access 1406 982 * @vf: VF that control VSI is being invalidated on 1407 983 */ ··· 1322 1308 1323 1309 ice_vsi_release(vsi); 1324 1310 ice_vf_invalidate_vsi(vf); 1325 - } 1326 - 1327 - /** 1328 - * ice_vf_set_initialized - VF is ready for VIRTCHNL communication 1329 - * @vf: VF to set in initialized state 1330 - * 1331 - * After this function the VF will be ready to receive/handle the 1332 - * VIRTCHNL_OP_GET_VF_RESOURCES message 1333 - */ 1334 - void ice_vf_set_initialized(struct ice_vf *vf) 1335 - { 1336 - ice_set_vf_state_qs_dis(vf); 1337 - clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); 1338 - clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); 1339 - clear_bit(ICE_VF_STATE_DIS, vf->vf_states); 1340 - set_bit(ICE_VF_STATE_INIT, vf->vf_states); 1341 - memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); 1342 1311 } 1343 1312 1344 1313 /**
-2
drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
··· 32 32 bool ice_is_vf_trusted(struct ice_vf *vf); 33 33 bool ice_vf_has_no_qs_ena(struct ice_vf *vf); 34 34 bool ice_is_vf_link_up(struct ice_vf *vf); 35 - void ice_vf_rebuild_host_cfg(struct ice_vf *vf); 36 35 void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf); 37 36 void ice_vf_ctrl_vsi_release(struct ice_vf *vf); 38 37 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf); 39 38 int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi); 40 39 void ice_vf_invalidate_vsi(struct ice_vf *vf); 41 40 void ice_vf_vsi_release(struct ice_vf *vf); 42 - void ice_vf_set_initialized(struct ice_vf *vf); 43 41 44 42 #endif /* _ICE_VF_LIB_PRIVATE_H_ */