Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: mana: Support HW link state events

Handle the NIC hardware link state events received from the HW
channel, then set the proper link state accordingly.

And, add a feature bit, GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE,
to inform the NIC hardware this handler exists.

Our MANA NIC only sends out the link state down/up messages
when we need to let the VM rerun DHCP client and change IP
address. So, add netif_carrier_on() in the probe(), let the NIC
show the right initial state in /sys/class/net/ethX/operstate.

Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Link: https://patch.msgid.link/1761770601-16920-1-git-send-email-haiyangz@linux.microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Haiyang Zhang and committed by
Jakub Kicinski
54133f9b 1a2352ad

+71 -6
+1
drivers/net/ethernet/microsoft/mana/gdma_main.c
··· 528 528 case GDMA_EQE_HWC_INIT_DONE: 529 529 case GDMA_EQE_HWC_SOC_SERVICE: 530 530 case GDMA_EQE_RNIC_QP_FATAL: 531 + case GDMA_EQE_HWC_SOC_RECONFIG_DATA: 531 532 if (!eq->eq.callback) 532 533 break; 533 534
+12
drivers/net/ethernet/microsoft/mana/hw_channel.c
··· 118 118 struct gdma_dev *gd = hwc->gdma_dev; 119 119 union hwc_init_type_data type_data; 120 120 union hwc_init_eq_id_db eq_db; 121 + struct mana_context *ac; 121 122 u32 type, val; 122 123 int ret; 123 124 ··· 195 194 switch (type) { 196 195 case HWC_DATA_CFG_HWC_TIMEOUT: 197 196 hwc->hwc_timeout = val; 197 + break; 198 + 199 + case HWC_DATA_HW_LINK_CONNECT: 200 + case HWC_DATA_HW_LINK_DISCONNECT: 201 + ac = gd->gdma_context->mana.driver_data; 202 + if (!ac) 203 + break; 204 + 205 + WRITE_ONCE(ac->link_event, type); 206 + schedule_work(&ac->link_change_work); 207 + 198 208 break; 199 209 200 210 default:
+49 -5
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 20 20 21 21 #include <net/mana/mana.h> 22 22 #include <net/mana/mana_auxiliary.h> 23 + #include <net/mana/hw_channel.h> 23 24 24 25 static DEFINE_IDA(mana_adev_ida); 25 26 ··· 85 84 /* Ensure port state updated before txq state */ 86 85 smp_wmb(); 87 86 88 - netif_carrier_on(ndev); 89 87 netif_tx_wake_all_queues(ndev); 90 88 netdev_dbg(ndev, "%s successful\n", __func__); 91 89 return 0; ··· 98 98 return 0; 99 99 100 100 return mana_detach(ndev, true); 101 + } 102 + 103 + static void mana_link_state_handle(struct work_struct *w) 104 + { 105 + struct mana_context *ac; 106 + struct net_device *ndev; 107 + u32 link_event; 108 + bool link_up; 109 + int i; 110 + 111 + ac = container_of(w, struct mana_context, link_change_work); 112 + 113 + rtnl_lock(); 114 + 115 + link_event = READ_ONCE(ac->link_event); 116 + 117 + if (link_event == HWC_DATA_HW_LINK_CONNECT) 118 + link_up = true; 119 + else if (link_event == HWC_DATA_HW_LINK_DISCONNECT) 120 + link_up = false; 121 + else 122 + goto out; 123 + 124 + /* Process all ports */ 125 + for (i = 0; i < ac->num_ports; i++) { 126 + ndev = ac->ports[i]; 127 + if (!ndev) 128 + continue; 129 + 130 + if (link_up) { 131 + netif_carrier_on(ndev); 132 + 133 + __netdev_notify_peers(ndev); 134 + } else { 135 + netif_carrier_off(ndev); 136 + } 137 + } 138 + 139 + out: 140 + rtnl_unlock(); 101 141 } 102 142 103 143 static bool mana_can_tx(struct gdma_queue *wq) ··· 3099 3059 /* Ensure port state updated before txq state */ 3100 3060 smp_wmb(); 3101 3061 3102 - if (apc->port_is_up) 3103 - netif_carrier_on(ndev); 3104 - 3105 3062 netif_device_attach(ndev); 3106 3063 3107 3064 return 0; ··· 3191 3154 smp_wmb(); 3192 3155 3193 3156 netif_tx_disable(ndev); 3194 - netif_carrier_off(ndev); 3195 3157 3196 3158 if (apc->port_st_save) { 3197 3159 err = mana_dealloc_queues(ndev); ··· 3278 3242 netdev_err(ndev, "Unable to register netdev.\n"); 3279 3243 goto free_indir; 3280 3244 } 3245 + 3246 + netif_carrier_on(ndev); 3281 3247 3282 3248 debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed); 3283 3249 ··· 3469 3431 3470 3432 if (!resuming) { 3471 3433 ac->num_ports = num_ports; 3434 + 3435 + INIT_WORK(&ac->link_change_work, mana_link_state_handle); 3472 3436 } else { 3473 3437 if (ac->num_ports != num_ports) { 3474 3438 dev_err(dev, "The number of vPorts changed: %d->%d\n", ··· 3478 3438 err = -EPROTO; 3479 3439 goto out; 3480 3440 } 3441 + 3442 + enable_work(&ac->link_change_work); 3481 3443 } 3482 3444 3483 3445 if (ac->num_ports == 0) ··· 3541 3499 struct net_device *ndev; 3542 3500 int err; 3543 3501 int i; 3502 + 3503 + disable_work_sync(&ac->link_change_work); 3544 3504 3545 3505 /* adev currently doesn't support suspending, always remove it */ 3546 3506 if (gd->adev)
+3 -1
include/net/mana/gdma.h
··· 590 590 591 591 /* Driver can self reset on FPGA Reconfig EQE notification */ 592 592 #define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17) 593 + #define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6) 593 594 594 595 #define GDMA_DRV_CAP_FLAGS1 \ 595 596 (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ ··· 600 599 GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \ 601 600 GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \ 602 601 GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \ 603 - GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE) 602 + GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \ 603 + GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE) 604 604 605 605 #define GDMA_DRV_CAP_FLAGS2 0 606 606
+2
include/net/mana/hw_channel.h
··· 24 24 #define HWC_INIT_DATA_PF_DEST_CQ_ID 11 25 25 26 26 #define HWC_DATA_CFG_HWC_TIMEOUT 1 27 + #define HWC_DATA_HW_LINK_CONNECT 2 28 + #define HWC_DATA_HW_LINK_DISCONNECT 3 27 29 28 30 #define HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS 30000 29 31
+4
include/net/mana/mana.h
··· 477 477 struct dentry *mana_eqs_debugfs; 478 478 479 479 struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; 480 + 481 + /* Link state change work */ 482 + struct work_struct link_change_work; 483 + u32 link_event; 480 484 }; 481 485 482 486 struct mana_port_context {