Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates

This series contains updates to igb, i40e and i40evf.

I provide a code comment fix which David Miller noticed in the last
series of patches I submitted.

Shannon provides a patch to cleanup the NAPI structs when deleting the
netdev.

Anjali provides several patches for i40e, first fixes a bug in the update
filter logic which was causing a kernel panic. Then provides a fix to
rename an error bit to correctly indicate the error. Adds a definition
for a new state variable to keep track of features automatically disabled
due to hardware resource limitations versus user enforced feature disabled.
Anjali provides a patch to add code to handle when there is a filter
programming error due to a full table, which also resolves a previous
compile warning about an unused "*pf" variable introduced in the last i40e
series patch submission.

Jesse provides three i40e patches to cleanup strings to make more
consistent and to align with other Intel drivers.

Akeem cleans up a misleading function header comment for i40e.

Mitch provides a fix for i40e/i40evf to use the correctly reported number
of MSI-X vectors in the PF an VF. Then provides a patch to use
dma_set_mask_and_coherent() which was introduced in v3.13 and simplifies
the DMA mapping code a bit.

v2:
- dropped the 2 ixgbe patches from Emil based on feedback from David Miller,
where the 2 fixes should be handled in the net core to fix all drivers
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+269 -95
+9 -1
drivers/net/ethernet/intel/i40e/i40e.h
··· 152 152 }; 153 153 154 154 #define I40E_DEFAULT_ATR_SAMPLE_RATE 20 155 - #define I40E_FDIR_MAX_RAW_PACKET_SIZE 512 155 + #define I40E_FDIR_MAX_RAW_PACKET_SIZE 512 156 + #define I40E_FDIR_BUFFER_FULL_MARGIN 10 157 + #define I40E_FDIR_BUFFER_HEAD_ROOM 200 158 + 156 159 struct i40e_fdir_filter { 157 160 struct hlist_node fdir_node; 158 161 /* filter ipnut set */ ··· 265 262 #ifdef CONFIG_I40E_VXLAN 266 263 #define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) 267 264 #endif 265 + 266 + /* tracks features that get auto disabled by errors */ 267 + u64 auto_disable_flags; 268 268 269 269 bool stat_offsets_loaded; 270 270 struct i40e_hw_port_stats stats; ··· 556 550 struct i40e_pf *pf, bool add); 557 551 int i40e_add_del_fdir(struct i40e_vsi *vsi, 558 552 struct i40e_fdir_filter *input, bool add); 553 + void i40e_fdir_check_and_reenable(struct i40e_pf *pf); 554 + int i40e_get_current_fd_count(struct i40e_pf *pf); 559 555 void i40e_set_ethtool_ops(struct net_device *netdev); 560 556 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 561 557 u8 *macaddr, s16 vlan,
+17 -8
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
··· 1011 1011 **/ 1012 1012 static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) 1013 1013 { 1014 - if (enable) 1014 + if (enable) { 1015 1015 pf->flags |= flag; 1016 - else 1016 + } else { 1017 1017 pf->flags &= ~flag; 1018 + pf->auto_disable_flags |= flag; 1019 + } 1018 1020 dev_info(&pf->pdev->dev, "requesting a pf reset\n"); 1019 1021 i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); 1020 1022 } ··· 1469 1467 pf->msg_enable); 1470 1468 } 1471 1469 } else if (strncmp(cmd_buf, "pfr", 3) == 0) { 1472 - dev_info(&pf->pdev->dev, "forcing PFR\n"); 1470 + dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); 1473 1471 i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); 1474 1472 1475 1473 } else if (strncmp(cmd_buf, "corer", 5) == 0) { 1476 - dev_info(&pf->pdev->dev, "forcing CoreR\n"); 1474 + dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); 1477 1475 i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); 1478 1476 1479 1477 } else if (strncmp(cmd_buf, "globr", 5) == 0) { 1480 - dev_info(&pf->pdev->dev, "forcing GlobR\n"); 1478 + dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); 1481 1479 i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); 1482 1480 1483 1481 } else if (strncmp(cmd_buf, "empr", 4) == 0) { 1484 - dev_info(&pf->pdev->dev, "forcing EMPR\n"); 1482 + dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); 1485 1483 i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); 1486 1484 1487 1485 } else if (strncmp(cmd_buf, "read", 4) == 0) { ··· 1672 1670 bool add = false; 1673 1671 int ret; 1674 1672 1673 + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 1674 + goto command_write_done; 1675 + 1676 + if (strncmp(cmd_buf, "add", 3) == 0) 1677 + add = true; 1678 + 1679 + if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) 1680 + goto command_write_done; 1681 + 1675 1682 asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, 1676 1683 GFP_KERNEL); 1677 1684 if (!asc_packet) ··· 1695 1684 goto command_write_done; 1696 1685 } 1697 1686 1698 - if (strncmp(cmd_buf, "add", 3) == 0) 1699 - add = true; 1700 1687 cnt = sscanf(&cmd_buf[13], 1701 1688 "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s", 1702 1689 &fd_data.q_index,
+31 -6
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
··· 1357 1357 } 1358 1358 1359 1359 /** 1360 + * i40e_match_fdir_input_set - Match a new filter against an existing one 1361 + * @rule: The filter already added 1362 + * @input: The new filter to comapre against 1363 + * 1364 + * Returns true if the two input set match 1365 + **/ 1366 + static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule, 1367 + struct i40e_fdir_filter *input) 1368 + { 1369 + if ((rule->dst_ip[0] != input->dst_ip[0]) || 1370 + (rule->src_ip[0] != input->src_ip[0]) || 1371 + (rule->dst_port != input->dst_port) || 1372 + (rule->src_port != input->src_port)) 1373 + return false; 1374 + return true; 1375 + } 1376 + 1377 + /** 1360 1378 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry 1361 1379 * @vsi: Pointer to the targeted VSI 1362 1380 * @input: The filter to update or NULL to indicate deletion ··· 1409 1391 1410 1392 /* if there is an old rule occupying our place remove it */ 1411 1393 if (rule && (rule->fd_id == sw_idx)) { 1412 - if (!input || (rule->fd_id != input->fd_id)) { 1413 - cmd->fs.flow_type = rule->flow_type; 1414 - err = i40e_add_del_fdir_ethtool(vsi, cmd, false); 1415 - } 1416 - 1394 + if (input && !i40e_match_fdir_input_set(rule, input)) 1395 + err = i40e_add_del_fdir(vsi, rule, false); 1396 + else if (!input) 1397 + err = i40e_add_del_fdir(vsi, rule, false); 1417 1398 hlist_del(&rule->fdir_node); 1418 1399 kfree(rule); 1419 1400 pf->fdir_pf_active_filters--; ··· 1460 1443 1461 1444 ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); 1462 1445 1446 + i40e_fdir_check_and_reenable(pf); 1463 1447 return ret; 1464 1448 } 1465 1449 ··· 1484 1466 if (!vsi) 1485 1467 return -EINVAL; 1486 1468 1487 - fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 1488 1469 pf = vsi->back; 1470 + 1471 + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 1472 + return -EOPNOTSUPP; 1473 + 1474 + if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) 1475 + return -ENOSPC; 1476 + 1477 + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 1489 1478 1490 1479 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + 1491 1480 pf->hw.func_caps.fd_filters_guaranteed)) {
+138 -52
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 1985 1985 * @netdev: network interface to be adjusted 1986 1986 * @vid: vlan id to be removed 1987 1987 * 1988 - * net_device_ops implementation for adding vlan ids 1988 + * net_device_ops implementation for removing vlan ids 1989 1989 **/ 1990 1990 static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 1991 1991 __always_unused __be16 proto, u16 vid) ··· 2436 2436 struct i40e_pf *pf = vsi->back; 2437 2437 struct hlist_node *node; 2438 2438 2439 + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 2440 + return; 2441 + 2439 2442 hlist_for_each_entry_safe(filter, node, 2440 2443 &pf->fdir_filter_list, fdir_node) { 2441 2444 i40e_add_del_fdir(vsi, filter, true); ··· 2456 2453 i40e_set_vsi_rx_mode(vsi); 2457 2454 i40e_restore_vlan(vsi); 2458 2455 i40e_vsi_config_dcb_rings(vsi); 2459 - if (vsi->type == I40E_VSI_FDIR) 2460 - i40e_fdir_filter_restore(vsi); 2461 2456 err = i40e_vsi_configure_tx(vsi); 2462 2457 if (!err) 2463 2458 err = i40e_vsi_configure_rx(vsi); ··· 2583 2582 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 2584 2583 wr32(hw, I40E_PFINT_LNKLST0, 0); 2585 2584 2586 - /* Associate the queue pair to the vector and enable the q int */ 2585 + /* Associate the queue pair to the vector and enable the queue int */ 2587 2586 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2588 2587 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2589 2588 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); ··· 2892 2891 icr0_remaining); 2893 2892 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 2894 2893 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 2895 - (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) || 2896 - (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) { 2894 + (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { 2897 2895 dev_info(&pf->pdev->dev, "device will be reset\n"); 2898 2896 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 2899 2897 i40e_service_event_schedule(pf); ··· 3755 3755 NULL); 3756 3756 if (aq_ret) { 3757 3757 dev_info(&vsi->back->pdev->dev, 3758 - "%s: AQ command Config VSI BW allocation per TC failed = %d\n", 3759 - __func__, vsi->back->hw.aq.asq_last_status); 3758 + "AQ command Config VSI BW allocation per TC failed = %d\n", 3759 + vsi->back->hw.aq.asq_last_status); 3760 3760 return -EINVAL; 3761 3761 } 3762 3762 ··· 4085 4085 } else if (vsi->netdev) { 4086 4086 netdev_info(vsi->netdev, "NIC Link is Down\n"); 4087 4087 } 4088 + 4089 + /* replay FDIR SB filters */ 4090 + if (vsi->type == I40E_VSI_FDIR) 4091 + i40e_fdir_filter_restore(vsi); 4088 4092 i40e_service_event_schedule(pf); 4089 4093 4090 4094 return 0; ··· 4368 4364 * for the warning interrupt will deal with the shutdown 4369 4365 * and recovery of the switch setup. 4370 4366 */ 4371 - dev_info(&pf->pdev->dev, "GlobalR requested\n"); 4367 + dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); 4372 4368 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4373 4369 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 4374 4370 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); ··· 4379 4375 * 4380 4376 * Same as Global Reset, except does *not* include the MAC/PHY 4381 4377 */ 4382 - dev_info(&pf->pdev->dev, "CoreR requested\n"); 4378 + dev_dbg(&pf->pdev->dev, "CoreR requested\n"); 4383 4379 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4384 4380 val |= I40E_GLGEN_RTRIG_CORER_MASK; 4385 4381 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); ··· 4413 4409 * the switch, since we need to do all the recovery as 4414 4410 * for the Core Reset. 4415 4411 */ 4416 - dev_info(&pf->pdev->dev, "PFR requested\n"); 4412 + dev_dbg(&pf->pdev->dev, "PFR requested\n"); 4417 4413 i40e_handle_reset_warning(pf); 4418 4414 4419 4415 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { ··· 4462 4458 &old_cfg->etscfg.prioritytable, 4463 4459 sizeof(new_cfg->etscfg.prioritytable))) { 4464 4460 need_reconfig = true; 4465 - dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n"); 4461 + dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); 4466 4462 } 4467 4463 4468 4464 if (memcmp(&new_cfg->etscfg.tcbwtable, 4469 4465 &old_cfg->etscfg.tcbwtable, 4470 4466 sizeof(new_cfg->etscfg.tcbwtable))) 4471 - dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 4467 + dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 4472 4468 4473 4469 if (memcmp(&new_cfg->etscfg.tsatable, 4474 4470 &old_cfg->etscfg.tsatable, 4475 4471 sizeof(new_cfg->etscfg.tsatable))) 4476 - dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n"); 4472 + dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); 4477 4473 } 4478 4474 4479 4475 /* Check if PFC configuration has changed */ ··· 4481 4477 &old_cfg->pfc, 4482 4478 sizeof(new_cfg->pfc))) { 4483 4479 need_reconfig = true; 4484 - dev_info(&pf->pdev->dev, "PFC config change detected.\n"); 4480 + dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); 4485 4481 } 4486 4482 4487 4483 /* Check if APP Table has changed */ ··· 4489 4485 &old_cfg->app, 4490 4486 sizeof(new_cfg->app))) { 4491 4487 need_reconfig = true; 4492 - dev_info(&pf->pdev->dev, "APP Table change detected.\n"); 4488 + dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); 4493 4489 } 4494 4490 4495 4491 return need_reconfig; ··· 4539 4535 4540 4536 /* No change detected in DCBX configs */ 4541 4537 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { 4542 - dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 4538 + dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 4543 4539 goto exit; 4544 4540 } 4545 4541 ··· 4597 4593 struct i40e_vf *vf; 4598 4594 u16 vf_id; 4599 4595 4600 - dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n", 4601 - __func__, queue, qtx_ctl); 4596 + dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", 4597 + queue, qtx_ctl); 4602 4598 4603 4599 /* Queue belongs to VF, find the VF and issue VF reset */ 4604 4600 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) ··· 4628 4624 } 4629 4625 4630 4626 /** 4627 + * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW 4628 + * @pf: board private structure 4629 + **/ 4630 + int i40e_get_current_fd_count(struct i40e_pf *pf) 4631 + { 4632 + int val, fcnt_prog; 4633 + val = rd32(&pf->hw, I40E_PFQF_FDSTAT); 4634 + fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + 4635 + ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> 4636 + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 4637 + return fcnt_prog; 4638 + } 4639 + 4640 + /** 4641 + * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 4642 + * @pf: board private structure 4643 + **/ 4644 + void i40e_fdir_check_and_reenable(struct i40e_pf *pf) 4645 + { 4646 + u32 fcnt_prog, fcnt_avail; 4647 + 4648 + /* Check if, FD SB or ATR was auto disabled and if there is enough room 4649 + * to re-enable 4650 + */ 4651 + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 4652 + (pf->flags & I40E_FLAG_FD_SB_ENABLED)) 4653 + return; 4654 + fcnt_prog = i40e_get_current_fd_count(pf); 4655 + fcnt_avail = pf->hw.fdir_shared_filter_count + 4656 + pf->fdir_pf_filter_count; 4657 + if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { 4658 + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 4659 + (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 4660 + pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 4661 + dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 4662 + } 4663 + } 4664 + /* Wait for some more space to be available to turn on ATR */ 4665 + if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { 4666 + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 4667 + (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 4668 + pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 4669 + dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); 4670 + } 4671 + } 4672 + } 4673 + 4674 + /** 4631 4675 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 4632 4676 * @pf: board private structure 4633 4677 **/ ··· 4684 4632 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT)) 4685 4633 return; 4686 4634 4687 - pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; 4688 - 4689 4635 /* if interface is down do nothing */ 4690 4636 if (test_bit(__I40E_DOWN, &pf->state)) 4691 4637 return; 4638 + i40e_fdir_check_and_reenable(pf); 4639 + 4640 + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 4641 + (pf->flags & I40E_FLAG_FD_SB_ENABLED)) 4642 + pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; 4692 4643 } 4693 4644 4694 4645 /** ··· 5001 4946 event.msg_size); 5002 4947 break; 5003 4948 case i40e_aqc_opc_lldp_update_mib: 5004 - dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 4949 + dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 5005 4950 #ifdef CONFIG_I40E_DCB 5006 4951 rtnl_lock(); 5007 4952 ret = i40e_handle_lldp_event(pf, &event); ··· 5009 4954 #endif /* CONFIG_I40E_DCB */ 5010 4955 break; 5011 4956 case i40e_aqc_opc_event_lan_overflow: 5012 - dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 4957 + dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 5013 4958 i40e_handle_lan_overflow_event(pf, &event); 5014 4959 break; 5015 4960 case i40e_aqc_opc_send_msg_to_peer: ··· 5286 5231 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 5287 5232 return 0; 5288 5233 5289 - dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 5234 + dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 5290 5235 5291 5236 if (i40e_check_asq_alive(hw)) 5292 5237 i40e_vc_notify_reset(pf); ··· 5333 5278 5334 5279 if (test_bit(__I40E_DOWN, &pf->state)) 5335 5280 goto end_core_reset; 5336 - dev_info(&pf->pdev->dev, "Rebuilding internal switch\n"); 5281 + dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 5337 5282 5338 5283 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 5339 5284 ret = i40e_init_adminq(&pf->hw); ··· 5383 5328 * try to recover minimal use by getting the basic PF VSI working. 5384 5329 */ 5385 5330 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 5386 - dev_info(&pf->pdev->dev, "attempting to rebuild switch\n"); 5331 + dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); 5387 5332 /* find the one VEB connected to the MAC, and find orphans */ 5388 5333 for (v = 0; v < I40E_MAX_VEB; v++) { 5389 5334 if (!pf->veb[v]) ··· 5448 5393 dv.subbuild_version = 0; 5449 5394 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 5450 5395 5451 - dev_info(&pf->pdev->dev, "PF reset done\n"); 5396 + dev_info(&pf->pdev->dev, "reset complete\n"); 5452 5397 5453 5398 end_core_reset: 5454 5399 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); ··· 5497 5442 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) 5498 5443 >> I40E_GL_MDET_TX_QUEUE_SHIFT; 5499 5444 dev_info(&pf->pdev->dev, 5500 - "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n", 5445 + "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n", 5501 5446 event, queue, func); 5502 5447 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 5503 5448 mdd_detected = true; ··· 5511 5456 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) 5512 5457 >> I40E_GL_MDET_RX_QUEUE_SHIFT; 5513 5458 dev_info(&pf->pdev->dev, 5514 - "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n", 5459 + "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 5515 5460 event, queue, func); 5516 5461 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 5517 5462 mdd_detected = true; ··· 6348 6293 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 6349 6294 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 6350 6295 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 6351 - dev_info(&pf->pdev->dev, 6352 - "Flow Director ATR mode Enabled\n"); 6353 6296 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 6354 6297 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 6355 - dev_info(&pf->pdev->dev, 6356 - "Flow Director Side Band mode Enabled\n"); 6357 6298 } else { 6358 6299 dev_info(&pf->pdev->dev, 6359 6300 "Flow Director Side Band mode Disabled in MFP mode\n"); ··· 6373 6322 pf->num_req_vfs = min_t(int, 6374 6323 pf->hw.func_caps.num_vfs, 6375 6324 I40E_MAX_VF_COUNT); 6376 - dev_info(&pf->pdev->dev, 6377 - "Number of VFs being requested for PF[%d] = %d\n", 6378 - pf->hw.pf_id, pf->num_req_vfs); 6379 6325 } 6380 6326 #endif /* CONFIG_PCI_IOV */ 6381 6327 pf->eeprom_version = 0xDEAD; ··· 6854 6806 if (vsi->netdev) { 6855 6807 /* results in a call to i40e_close() */ 6856 6808 unregister_netdev(vsi->netdev); 6857 - free_netdev(vsi->netdev); 6858 - vsi->netdev = NULL; 6859 6809 } 6860 6810 } else { 6861 6811 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) ··· 6872 6826 6873 6827 i40e_vsi_delete(vsi); 6874 6828 i40e_vsi_free_q_vectors(vsi); 6829 + if (vsi->netdev) { 6830 + free_netdev(vsi->netdev); 6831 + vsi->netdev = NULL; 6832 + } 6875 6833 i40e_vsi_clear_rings(vsi); 6876 6834 i40e_vsi_clear(vsi); 6877 6835 ··· 6930 6880 } 6931 6881 6932 6882 if (vsi->base_vector) { 6933 - dev_info(&pf->pdev->dev, 6934 - "VSI %d has non-zero base vector %d\n", 6883 + dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", 6935 6884 vsi->seid, vsi->base_vector); 6936 6885 return -EEXIST; 6937 6886 } ··· 6949 6900 vsi->num_q_vectors, vsi->idx); 6950 6901 if (vsi->base_vector < 0) { 6951 6902 dev_info(&pf->pdev->dev, 6952 - "failed to get q tracking for VSI %d, err=%d\n", 6903 + "failed to get queue tracking for VSI %d, err=%d\n", 6953 6904 vsi->seid, vsi->base_vector); 6954 6905 i40e_vsi_free_q_vectors(vsi); 6955 6906 ret = -ENOENT; ··· 7906 7857 return 0; 7907 7858 } 7908 7859 7860 + #define INFO_STRING_LEN 255 7861 + static void i40e_print_features(struct i40e_pf *pf) 7862 + { 7863 + struct i40e_hw *hw = &pf->hw; 7864 + char *buf, *string; 7865 + 7866 + string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); 7867 + if (!string) { 7868 + dev_err(&pf->pdev->dev, "Features string allocation failed\n"); 7869 + return; 7870 + } 7871 + 7872 + buf = string; 7873 + 7874 + buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); 7875 + #ifdef CONFIG_PCI_IOV 7876 + buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); 7877 + #endif 7878 + buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis, 7879 + pf->vsi[pf->lan_vsi]->num_queue_pairs); 7880 + 7881 + if (pf->flags & I40E_FLAG_RSS_ENABLED) 7882 + buf += sprintf(buf, "RSS "); 7883 + buf += sprintf(buf, "FDir "); 7884 + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) 7885 + buf += sprintf(buf, "ATR "); 7886 + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) 7887 + buf += sprintf(buf, "NTUPLE "); 7888 + if (pf->flags & I40E_FLAG_DCB_ENABLED) 7889 + buf += sprintf(buf, "DCB "); 7890 + if (pf->flags & I40E_FLAG_PTP) 7891 + buf += sprintf(buf, "PTP "); 7892 + 7893 + BUG_ON(buf > (string + INFO_STRING_LEN)); 7894 + dev_info(&pf->pdev->dev, "%s\n", string); 7895 + kfree(string); 7896 + } 7897 + 7909 7898 /** 7910 7899 * i40e_probe - Device initialization routine 7911 7900 * @pdev: PCI device information struct ··· 7970 7883 return err; 7971 7884 7972 7885 /* set up for high or low dma */ 7973 - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 7974 - /* coherent mask for the same size will always succeed if 7975 - * dma_set_mask does 7976 - */ 7977 - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 7978 - } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 7979 - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 7980 - } else { 7981 - dev_err(&pdev->dev, "DMA configuration failed: %d\n", err); 7982 - err = -EIO; 7886 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7887 + if (err) 7888 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7889 + if (err) { 7890 + dev_err(&pdev->dev, 7891 + "DMA configuration failed: 0x%x\n", err); 7983 7892 goto err_dma; 7984 7893 } 7985 7894 ··· 8213 8130 8214 8131 i40e_set_pci_config_data(hw, link_status); 8215 8132 8216 - dev_info(&pdev->dev, "PCI Express: %s %s\n", 8133 + dev_info(&pdev->dev, "PCI-Express: %s %s\n", 8217 8134 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : 8218 8135 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : 8219 8136 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : ··· 8229 8146 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 8230 8147 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 8231 8148 } 8149 + 8150 + /* print a string summarizing features */ 8151 + i40e_print_features(pf); 8232 8152 8233 8153 return 0; 8234 8154
+51 -9
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 430 430 /** 431 431 * i40e_fd_handle_status - check the Programming Status for FD 432 432 * @rx_ring: the Rx ring for this descriptor 433 - * @qw: the descriptor data 433 + * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor. 434 434 * @prog_id: the id originally used for programming 435 435 * 436 436 * This is used to verify if the FD programming or invalidation 437 437 * requested by SW to the HW is successful or not and take actions accordingly. 438 438 **/ 439 - static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id) 439 + static void i40e_fd_handle_status(struct i40e_ring *rx_ring, 440 + union i40e_rx_desc *rx_desc, u8 prog_id) 440 441 { 441 - struct pci_dev *pdev = rx_ring->vsi->back->pdev; 442 + struct i40e_pf *pf = rx_ring->vsi->back; 443 + struct pci_dev *pdev = pf->pdev; 444 + u32 fcnt_prog, fcnt_avail; 442 445 u32 error; 446 + u64 qw; 443 447 448 + qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 444 449 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> 445 450 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; 446 451 447 - /* for now just print the Status */ 448 - dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n", 449 - prog_id, error); 452 + if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { 453 + dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", 454 + rx_desc->wb.qword0.hi_dword.fd_id); 455 + 456 + /* filter programming failed most likely due to table full */ 457 + fcnt_prog = i40e_get_current_fd_count(pf); 458 + fcnt_avail = pf->hw.fdir_shared_filter_count + 459 + pf->fdir_pf_filter_count; 460 + 461 + /* If ATR is running fcnt_prog can quickly change, 462 + * if we are very close to full, it makes sense to disable 463 + * FD ATR/SB and then re-enable it when there is room. 464 + */ 465 + if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { 466 + /* Turn off ATR first */ 467 + if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) { 468 + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 469 + dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n"); 470 + pf->auto_disable_flags |= 471 + I40E_FLAG_FD_ATR_ENABLED; 472 + pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; 473 + } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) { 474 + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 475 + dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); 476 + pf->auto_disable_flags |= 477 + I40E_FLAG_FD_SB_ENABLED; 478 + pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; 479 + } 480 + } else { 481 + dev_info(&pdev->dev, "FD filter programming error"); 482 + } 483 + } else if (error == 484 + (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { 485 + netdev_info(rx_ring->vsi->netdev, "ntuple filter loc = %d, could not be removed\n", 486 + rx_desc->wb.qword0.hi_dword.fd_id); 487 + } 450 488 } 451 489 452 490 /** ··· 881 843 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; 882 844 883 845 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) 884 - i40e_fd_handle_status(rx_ring, qw, id); 846 + i40e_fd_handle_status(rx_ring, rx_desc, id); 885 847 } 886 848 887 849 /** ··· 1574 1536 if (!tx_ring->atr_sample_rate) 1575 1537 return; 1576 1538 1577 - tx_ring->atr_count++; 1578 - 1579 1539 /* snag network header to get L4 type and address */ 1580 1540 hdr.network = skb_network_header(skb); 1581 1541 ··· 1594 1558 } 1595 1559 1596 1560 th = (struct tcphdr *)(hdr.network + hlen); 1561 + 1562 + /* Due to lack of space, no more new filters can be programmed */ 1563 + if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 1564 + return; 1565 + 1566 + tx_ring->atr_count++; 1597 1567 1598 1568 /* sample on all syn/fin packets or once every atr sample rate */ 1599 1569 if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
+5 -1
drivers/net/ethernet/intel/i40e/i40e_type.h
··· 458 458 union { 459 459 __le32 rss; /* RSS Hash */ 460 460 __le32 fcoe_param; /* FCoE DDP Context id */ 461 + /* Flow director filter id in case of 462 + * Programming status desc WB 463 + */ 464 + __le32 fd_id; 461 465 } hi_dword; 462 466 } qword0; 463 467 struct { ··· 702 698 enum i40e_rx_prog_status_desc_error_bits { 703 699 /* Note: These are predefined bit offsets */ 704 700 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, 705 - I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1, 701 + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, 706 702 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, 707 703 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 708 704 };
+5 -4
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
··· 69 69 { 70 70 struct i40e_pf *pf = vf->pf; 71 71 72 - return vector_id <= pf->hw.func_caps.num_msix_vectors_vf; 72 + return vector_id < pf->hw.func_caps.num_msix_vectors_vf; 73 73 } 74 74 75 75 /***********************vf resource mgmt routines*****************/ ··· 126 126 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 127 127 else 128 128 reg_idx = I40E_VPINT_LNKLSTN( 129 - (pf->hw.func_caps.num_msix_vectors_vf 130 - * vf->vf_id) + (vector_id - 1)); 129 + ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + 130 + (vector_id - 1)); 131 131 132 132 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 133 133 /* Special case - No queues mapped on this vector */ ··· 506 506 vf->lan_vsi_index = 0; 507 507 vf->lan_vsi_id = 0; 508 508 } 509 - msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1; 509 + msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 510 + 510 511 /* disable interrupts so the VF starts in a known state */ 511 512 for (i = 0; i < msix_vf; i++) { 512 513 /* format is same for both registers */
+5 -1
drivers/net/ethernet/intel/i40evf/i40e_type.h
··· 464 464 union { 465 465 __le32 rss; /* RSS Hash */ 466 466 __le32 fcoe_param; /* FCoE DDP Context id */ 467 + /* Flow director filter id in case of 468 + * Programming status desc WB 469 + */ 470 + __le32 fd_id; 467 471 } hi_dword; 468 472 } qword0; 469 473 struct { ··· 708 704 enum i40e_rx_prog_status_desc_error_bits { 709 705 /* Note: These are predefined bit offsets */ 710 706 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, 711 - I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1, 707 + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, 712 708 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, 713 709 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 714 710 };
+7 -12
drivers/net/ethernet/intel/i40evf/i40evf_main.c
··· 1141 1141 * (roughly) twice the number of vectors as there are CPU's. 1142 1142 */ 1143 1143 v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; 1144 - v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1); 1144 + v_budget = min(v_budget, (int)adapter->vf_res->max_vectors); 1145 1145 1146 1146 /* A failure in MSI-X entry allocation isn't fatal, but it does 1147 1147 * mean we disable MSI-X capabilities of the adapter. ··· 2182 2182 if (err) 2183 2183 return err; 2184 2184 2185 - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 2186 - /* coherent mask for the same size will always succeed if 2187 - * dma_set_mask does 2188 - */ 2189 - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 2190 - } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 2191 - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2192 - } else { 2193 - dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n", 2194 - __func__, err); 2195 - err = -EIO; 2185 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2186 + if (err) 2187 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2188 + if (err) { 2189 + dev_err(&pdev->dev, 2190 + "DMA configuration failed: 0x%x\n", err); 2196 2191 goto err_dma; 2197 2192 } 2198 2193
+1 -1
drivers/net/ethernet/intel/igb/igb_main.c
··· 1978 1978 } 1979 1979 } 1980 1980 #endif 1981 - /*Re-establish EEE setting */ 1981 + /* Re-establish EEE setting */ 1982 1982 if (hw->phy.media_type == e1000_media_type_copper) { 1983 1983 switch (mac->type) { 1984 1984 case e1000_i350: