Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-01-20 (iavf)

This series contains updates to iavf driver only.

Michal Schmidt converts single iavf workqueue to per adapter to avoid
deadlock issues.

Marcin moves setting of VLAN related netdev features to watchdog task to
avoid RTNL deadlock.

Stefan Assmann schedules immediate watchdog task execution on changing
primary MAC to avoid excessive delay.

* '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
iavf: schedule watchdog immediately when changing primary MAC
iavf: Move netdev_update_features() into watchdog task
iavf: fix temporary deadlock and failure to set MAC address
====================

Link: https://lore.kernel.org/r/20230120211036.430946-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+66 -69
+1 -1
drivers/net/ethernet/intel/iavf/iavf.h
··· 249 249 250 250 /* board specific private data structure */ 251 251 struct iavf_adapter { 252 + struct workqueue_struct *wq; 252 253 struct work_struct reset_task; 253 254 struct work_struct adminq_task; 254 255 struct delayed_work client_task; ··· 460 459 461 460 /* needed by iavf_ethtool.c */ 462 461 extern char iavf_driver_name[]; 463 - extern struct workqueue_struct *iavf_wq; 464 462 465 463 static inline const char *iavf_state_str(enum iavf_state_t state) 466 464 {
+5 -5
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
··· 532 532 if (changed_flags & IAVF_FLAG_LEGACY_RX) { 533 533 if (netif_running(netdev)) { 534 534 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 535 - queue_work(iavf_wq, &adapter->reset_task); 535 + queue_work(adapter->wq, &adapter->reset_task); 536 536 } 537 537 } 538 538 ··· 672 672 673 673 if (netif_running(netdev)) { 674 674 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 675 - queue_work(iavf_wq, &adapter->reset_task); 675 + queue_work(adapter->wq, &adapter->reset_task); 676 676 } 677 677 678 678 return 0; ··· 1433 1433 adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; 1434 1434 spin_unlock_bh(&adapter->fdir_fltr_lock); 1435 1435 1436 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1436 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1437 1437 1438 1438 ret: 1439 1439 if (err && fltr) ··· 1474 1474 spin_unlock_bh(&adapter->fdir_fltr_lock); 1475 1475 1476 1476 if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) 1477 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1477 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1478 1478 1479 1479 return err; 1480 1480 } ··· 1658 1658 spin_unlock_bh(&adapter->adv_rss_lock); 1659 1659 1660 1660 if (!err) 1661 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1661 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1662 1662 1663 1663 mutex_unlock(&adapter->crit_lock); 1664 1664
+51 -62
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 49 49 MODULE_LICENSE("GPL v2"); 50 50 51 51 static const struct net_device_ops iavf_netdev_ops; 52 - struct workqueue_struct *iavf_wq; 53 52 54 53 int iavf_status_to_errno(enum iavf_status status) 55 54 { ··· 276 277 if (!(adapter->flags & 277 278 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { 278 279 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 279 - queue_work(iavf_wq, &adapter->reset_task); 280 + queue_work(adapter->wq, &adapter->reset_task); 280 281 } 281 282 } 282 283 ··· 290 291 void iavf_schedule_request_stats(struct iavf_adapter *adapter) 291 292 { 292 293 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; 293 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 294 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 294 295 } 295 296 296 297 /** ··· 410 411 411 412 if (adapter->state != __IAVF_REMOVE) 412 413 /* schedule work on the private workqueue */ 413 - queue_work(iavf_wq, &adapter->adminq_task); 414 + queue_work(adapter->wq, &adapter->adminq_task); 414 415 415 416 return IRQ_HANDLED; 416 417 } ··· 1033 1034 1034 1035 /* schedule the watchdog task to immediately process the request */ 1035 1036 if (f) { 1036 - queue_work(iavf_wq, &adapter->watchdog_task.work); 1037 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1037 1038 return 0; 1038 1039 } 1039 1040 return -ENOMEM; ··· 1256 1257 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; 1257 1258 if (CLIENT_ENABLED(adapter)) 1258 1259 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; 1259 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1260 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1260 1261 } 1261 1262 1262 1263 /** ··· 1413 1414 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; 1414 1415 } 1415 1416 1416 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 1417 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 1417 1418 } 1418 1419 1419 1420 /** ··· 2247 2248 2248 2249 if (aq_required) { 2249 2250 adapter->aq_required |= aq_required; 2250 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); 2251 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); 2251 2252 } 2252 2253 } 2253 2254 ··· 2692 2693 goto restart_watchdog; 2693 2694 } 2694 2695 2696 + if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && 2697 + adapter->netdev_registered && 2698 + !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) && 2699 + rtnl_trylock()) { 2700 + netdev_update_features(adapter->netdev); 2701 + rtnl_unlock(); 2702 + adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; 2703 + } 2704 + 2695 2705 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) 2696 2706 iavf_change_state(adapter, __IAVF_COMM_FAILED); 2697 2707 ··· 2708 2700 adapter->aq_required = 0; 2709 2701 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2710 2702 mutex_unlock(&adapter->crit_lock); 2711 - queue_work(iavf_wq, &adapter->reset_task); 2703 + queue_work(adapter->wq, &adapter->reset_task); 2712 2704 return; 2713 2705 } 2714 2706 ··· 2716 2708 case __IAVF_STARTUP: 2717 2709 iavf_startup(adapter); 2718 2710 mutex_unlock(&adapter->crit_lock); 2719 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2711 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2720 2712 msecs_to_jiffies(30)); 2721 2713 return; 2722 2714 case __IAVF_INIT_VERSION_CHECK: 2723 2715 iavf_init_version_check(adapter); 2724 2716 mutex_unlock(&adapter->crit_lock); 2725 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2717 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2726 2718 msecs_to_jiffies(30)); 2727 2719 return; 2728 2720 case __IAVF_INIT_GET_RESOURCES: 2729 2721 iavf_init_get_resources(adapter); 2730 2722 mutex_unlock(&adapter->crit_lock); 2731 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2723 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2732 2724 msecs_to_jiffies(1)); 2733 2725 return; 2734 2726 case __IAVF_INIT_EXTENDED_CAPS: 2735 2727 iavf_init_process_extended_caps(adapter); 2736 2728 mutex_unlock(&adapter->crit_lock); 2737 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2729 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2738 2730 msecs_to_jiffies(1)); 2739 2731 return; 2740 2732 case __IAVF_INIT_CONFIG_ADAPTER: 2741 2733 iavf_init_config_adapter(adapter); 2742 2734 mutex_unlock(&adapter->crit_lock); 2743 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2735 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2744 2736 msecs_to_jiffies(1)); 2745 2737 return; 2746 2738 case __IAVF_INIT_FAILED: ··· 2759 2751 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 2760 2752 iavf_shutdown_adminq(hw); 2761 2753 mutex_unlock(&adapter->crit_lock); 2762 - queue_delayed_work(iavf_wq, 2754 + queue_delayed_work(adapter->wq, 2763 2755 &adapter->watchdog_task, (5 * HZ)); 2764 2756 return; 2765 2757 } 2766 2758 /* Try again from failed step*/ 2767 2759 iavf_change_state(adapter, adapter->last_state); 2768 2760 mutex_unlock(&adapter->crit_lock); 2769 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); 2761 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ); 2770 2762 return; 2771 2763 case __IAVF_COMM_FAILED: 2772 2764 if (test_bit(__IAVF_IN_REMOVE_TASK, ··· 2797 2789 adapter->aq_required = 0; 2798 2790 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2799 2791 mutex_unlock(&adapter->crit_lock); 2800 - queue_delayed_work(iavf_wq, 2792 + queue_delayed_work(adapter->wq, 2801 2793 &adapter->watchdog_task, 2802 2794 msecs_to_jiffies(10)); 2803 2795 return; 2804 2796 case __IAVF_RESETTING: 2805 2797 mutex_unlock(&adapter->crit_lock); 2806 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2798 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2799 + HZ * 2); 2807 2800 return; 2808 2801 case __IAVF_DOWN: 2809 2802 case __IAVF_DOWN_PENDING: ··· 2843 2834 adapter->aq_required = 0; 2844 2835 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 2845 2836 dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); 2846 - queue_work(iavf_wq, &adapter->reset_task); 2837 + queue_work(adapter->wq, &adapter->reset_task); 2847 2838 mutex_unlock(&adapter->crit_lock); 2848 - queue_delayed_work(iavf_wq, 2839 + queue_delayed_work(adapter->wq, 2849 2840 &adapter->watchdog_task, HZ * 2); 2850 2841 return; 2851 2842 } ··· 2854 2845 mutex_unlock(&adapter->crit_lock); 2855 2846 restart_watchdog: 2856 2847 if (adapter->state >= __IAVF_DOWN) 2857 - queue_work(iavf_wq, &adapter->adminq_task); 2848 + queue_work(adapter->wq, &adapter->adminq_task); 2858 2849 if (adapter->aq_required) 2859 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 2850 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2860 2851 msecs_to_jiffies(20)); 2861 2852 else 2862 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); 2853 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 2854 + HZ * 2); 2863 2855 } 2864 2856 2865 2857 /** ··· 2962 2952 */ 2963 2953 if (!mutex_trylock(&adapter->crit_lock)) { 2964 2954 if (adapter->state != __IAVF_REMOVE) 2965 - queue_work(iavf_wq, &adapter->reset_task); 2955 + queue_work(adapter->wq, &adapter->reset_task); 2966 2956 2967 2957 goto reset_finish; 2968 2958 } ··· 3126 3116 bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID); 3127 3117 bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID); 3128 3118 3129 - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); 3119 + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2); 3130 3120 3131 3121 /* We were running when the reset started, so we need to restore some 3132 3122 * state here. ··· 3218 3208 if (adapter->state == __IAVF_REMOVE) 3219 3209 return; 3220 3210 3221 - queue_work(iavf_wq, &adapter->adminq_task); 3211 + queue_work(adapter->wq, &adapter->adminq_task); 3222 3212 goto out; 3223 3213 } 3224 3214 ··· 3242 3232 } while (pending); 3243 3233 mutex_unlock(&adapter->crit_lock); 3244 3234 3245 - if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) { 3246 - if (adapter->netdev_registered || 3247 - !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { 3248 - struct net_device *netdev = adapter->netdev; 3249 - 3250 - rtnl_lock(); 3251 - netdev_update_features(netdev); 3252 - rtnl_unlock(); 3253 - /* Request VLAN offload settings */ 3254 - if (VLAN_V2_ALLOWED(adapter)) 3255 - iavf_set_vlan_offload_features 3256 - (adapter, 0, netdev->features); 3257 - 3258 - iavf_set_queue_vlan_tag_loc(adapter); 3259 - } 3260 - 3261 - adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; 3262 - } 3263 3235 if ((adapter->flags & 3264 3236 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || 3265 3237 adapter->state == __IAVF_RESETTING) ··· 4341 4349 4342 4350 if (netif_running(netdev)) { 4343 4351 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 4344 - queue_work(iavf_wq, &adapter->reset_task); 4352 + queue_work(adapter->wq, &adapter->reset_task); 4345 4353 } 4346 4354 4347 4355 return 0; ··· 4890 4898 hw = &adapter->hw; 4891 4899 hw->back = adapter; 4892 4900 4901 + adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, 4902 + iavf_driver_name); 4903 + if (!adapter->wq) { 4904 + err = -ENOMEM; 4905 + goto err_alloc_wq; 4906 + } 4907 + 4893 4908 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 4894 4909 iavf_change_state(adapter, __IAVF_STARTUP); 4895 4910 ··· 4941 4942 INIT_WORK(&adapter->adminq_task, iavf_adminq_task); 4942 4943 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); 4943 4944 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); 4944 - queue_delayed_work(iavf_wq, &adapter->watchdog_task, 4945 + queue_delayed_work(adapter->wq, &adapter->watchdog_task, 4945 4946 msecs_to_jiffies(5 * (pdev->devfn & 0x07))); 4946 4947 4947 4948 /* Setup the wait queue for indicating transition to down status */ ··· 4953 4954 return 0; 4954 4955 4955 4956 err_ioremap: 4957 + destroy_workqueue(adapter->wq); 4958 + err_alloc_wq: 4956 4959 free_netdev(netdev); 4957 4960 err_alloc_etherdev: 4958 4961 pci_disable_pcie_error_reporting(pdev); ··· 5024 5023 return err; 5025 5024 } 5026 5025 5027 - queue_work(iavf_wq, &adapter->reset_task); 5026 + queue_work(adapter->wq, &adapter->reset_task); 5028 5027 5029 5028 netif_device_attach(adapter->netdev); 5030 5029 ··· 5171 5170 } 5172 5171 spin_unlock_bh(&adapter->adv_rss_lock); 5173 5172 5173 + destroy_workqueue(adapter->wq); 5174 + 5174 5175 free_netdev(netdev); 5175 5176 5176 5177 pci_disable_pcie_error_reporting(pdev); ··· 5199 5196 **/ 5200 5197 static int __init iavf_init_module(void) 5201 5198 { 5202 - int ret; 5203 - 5204 5199 pr_info("iavf: %s\n", iavf_driver_string); 5205 5200 5206 5201 pr_info("%s\n", iavf_copyright); 5207 5202 5208 - iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, 5209 - iavf_driver_name); 5210 - if (!iavf_wq) { 5211 - pr_err("%s: Failed to create workqueue\n", iavf_driver_name); 5212 - return -ENOMEM; 5213 - } 5214 - 5215 - ret = pci_register_driver(&iavf_driver); 5216 - if (ret) 5217 - destroy_workqueue(iavf_wq); 5218 - 5219 - return ret; 5203 + return pci_register_driver(&iavf_driver); 5220 5204 } 5221 5205 5222 5206 module_init(iavf_init_module); ··· 5217 5227 static void __exit iavf_exit_module(void) 5218 5228 { 5219 5229 pci_unregister_driver(&iavf_driver); 5220 - destroy_workqueue(iavf_wq); 5221 5230 } 5222 5231 5223 5232 module_exit(iavf_exit_module);
+9 -1
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
··· 1952 1952 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { 1953 1953 adapter->flags |= IAVF_FLAG_RESET_PENDING; 1954 1954 dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); 1955 - queue_work(iavf_wq, &adapter->reset_task); 1955 + queue_work(adapter->wq, &adapter->reset_task); 1956 1956 } 1957 1957 break; 1958 1958 default: ··· 2226 2226 2227 2227 iavf_process_config(adapter); 2228 2228 adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; 2229 + 2230 + /* Request VLAN offload settings */ 2231 + if (VLAN_V2_ALLOWED(adapter)) 2232 + iavf_set_vlan_offload_features(adapter, 0, 2233 + netdev->features); 2234 + 2235 + iavf_set_queue_vlan_tag_loc(adapter); 2236 + 2229 2237 was_mac_changed = !ether_addr_equal(netdev->dev_addr, 2230 2238 adapter->hw.mac.addr); 2231 2239