Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'nfp-flower-handle-MTU-changes'

Jakub Kicinski says:

====================
nfp: flower: handle MTU changes

This set improves MTU handling for flower offload. The max MTU is
correctly capped and physical port MTU is communicated to the FW
(and indirectly HW).
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+179 -16
+2 -2
drivers/net/ethernet/netronome/nfp/bpf/main.c
··· 221 221 } 222 222 223 223 static int 224 - nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) 224 + nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) 225 225 { 226 226 struct nfp_net *nn = netdev_priv(netdev); 227 227 unsigned int max_mtu; ··· 413 413 .init = nfp_bpf_init, 414 414 .clean = nfp_bpf_clean, 415 415 416 - .change_mtu = nfp_bpf_change_mtu, 416 + .check_mtu = nfp_bpf_check_mtu, 417 417 418 418 .extra_cap = nfp_bpf_extra_cap, 419 419
+39 -2
drivers/net/ethernet/netronome/nfp/flower/cmsg.c
··· 104 104 msg->ports[idx].phys_port = phys_port; 105 105 } 106 106 107 - int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok) 107 + int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok, 108 + unsigned int mtu, bool mtu_only) 108 109 { 109 110 struct nfp_flower_cmsg_portmod *msg; 110 111 struct sk_buff *skb; ··· 119 118 msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id); 120 119 msg->reserved = 0; 121 120 msg->info = carrier_ok; 122 - msg->mtu = cpu_to_be16(repr->netdev->mtu); 121 + 122 + if (mtu_only) 123 + msg->info |= NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY; 124 + 125 + msg->mtu = cpu_to_be16(mtu); 123 126 124 127 nfp_ctrl_tx(repr->app->ctrl, skb); 125 128 ··· 149 144 nfp_ctrl_tx(repr->app->ctrl, skb); 150 145 151 146 return 0; 147 + } 148 + 149 + static bool 150 + nfp_flower_process_mtu_ack(struct nfp_app *app, struct sk_buff *skb) 151 + { 152 + struct nfp_flower_priv *app_priv = app->priv; 153 + struct nfp_flower_cmsg_portmod *msg; 154 + 155 + msg = nfp_flower_cmsg_get_data(skb); 156 + 157 + if (!(msg->info & NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY)) 158 + return false; 159 + 160 + spin_lock_bh(&app_priv->mtu_conf.lock); 161 + if (!app_priv->mtu_conf.requested_val || 162 + app_priv->mtu_conf.portnum != be32_to_cpu(msg->portnum) || 163 + be16_to_cpu(msg->mtu) != app_priv->mtu_conf.requested_val) { 164 + /* Not an ack for requested MTU change. */ 165 + spin_unlock_bh(&app_priv->mtu_conf.lock); 166 + return false; 167 + } 168 + 169 + app_priv->mtu_conf.ack = true; 170 + app_priv->mtu_conf.requested_val = 0; 171 + wake_up(&app_priv->mtu_conf.wait_q); 172 + spin_unlock_bh(&app_priv->mtu_conf.lock); 173 + 174 + return true; 152 175 } 153 176 154 177 static void ··· 301 268 if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) { 302 269 /* We need to deal with stats updates from HW asap */ 303 270 nfp_flower_rx_flow_stats(app, skb); 271 + dev_consume_skb_any(skb); 272 + } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_MOD && 273 + nfp_flower_process_mtu_ack(app, skb)) { 274 + /* Handle MTU acks outside wq to prevent RTNL conflict. */ 304 275 dev_consume_skb_any(skb); 305 276 } else { 306 277 skb_queue_tail(&priv->cmsg_skbs, skb);
+3 -1
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
··· 397 397 }; 398 398 399 399 #define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK BIT(0) 400 + #define NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY BIT(1) 400 401 401 402 /* NFP_FLOWER_CMSG_TYPE_PORT_REIFY */ 402 403 struct nfp_flower_cmsg_portreify { ··· 465 464 nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx, 466 465 unsigned int nbi, unsigned int nbi_port, 467 466 unsigned int phys_port); 468 - int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok); 467 + int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok, 468 + unsigned int mtu, bool mtu_only); 469 469 int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists); 470 470 void nfp_flower_cmsg_process_rx(struct work_struct *work); 471 471 void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
+85 -2
drivers/net/ethernet/netronome/nfp/flower/main.c
··· 52 52 53 53 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL 54 54 55 + #define NFP_FLOWER_FRAME_HEADROOM 158 56 + 55 57 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) 56 58 { 57 59 return "FLOWER"; ··· 159 157 { 160 158 int err; 161 159 162 - err = nfp_flower_cmsg_portmod(repr, true); 160 + err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false); 163 161 if (err) 164 162 return err; 165 163 ··· 173 171 { 174 172 netif_tx_disable(repr->netdev); 175 173 176 - return nfp_flower_cmsg_portmod(repr, false); 174 + return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false); 177 175 } 178 176 179 177 static int ··· 523 521 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); 524 522 init_waitqueue_head(&app_priv->reify_wait_queue); 525 523 524 + init_waitqueue_head(&app_priv->mtu_conf.wait_q); 525 + spin_lock_init(&app_priv->mtu_conf.lock); 526 + 526 527 err = nfp_flower_metadata_init(app); 527 528 if (err) 528 529 goto err_free_app_priv; ··· 557 552 app->priv = NULL; 558 553 } 559 554 555 + static int 556 + nfp_flower_check_mtu(struct nfp_app *app, struct net_device *netdev, 557 + int new_mtu) 558 + { 559 + /* The flower fw reserves NFP_FLOWER_FRAME_HEADROOM bytes of the 560 + * supported max MTU to allow for appending tunnel headers. To prevent 561 + * unexpected behaviour this needs to be accounted for. 562 + */ 563 + if (new_mtu > netdev->max_mtu - NFP_FLOWER_FRAME_HEADROOM) { 564 + nfp_err(app->cpp, "New MTU (%d) is not valid\n", new_mtu); 565 + return -EINVAL; 566 + } 567 + 568 + return 0; 569 + } 570 + 571 + static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv) 572 + { 573 + bool ret; 574 + 575 + spin_lock_bh(&app_priv->mtu_conf.lock); 576 + ret = app_priv->mtu_conf.ack; 577 + spin_unlock_bh(&app_priv->mtu_conf.lock); 578 + 579 + return ret; 580 + } 581 + 582 + static int 583 + nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, 584 + int new_mtu) 585 + { 586 + struct nfp_flower_priv *app_priv = app->priv; 587 + struct nfp_repr *repr = netdev_priv(netdev); 588 + int err, ack; 589 + 590 + /* Only need to config FW for physical port MTU change. */ 591 + if (repr->port->type != NFP_PORT_PHYS_PORT) 592 + return 0; 593 + 594 + if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) { 595 + nfp_err(app->cpp, "Physical port MTU setting not supported\n"); 596 + return -EINVAL; 597 + } 598 + 599 + spin_lock_bh(&app_priv->mtu_conf.lock); 600 + app_priv->mtu_conf.ack = false; 601 + app_priv->mtu_conf.requested_val = new_mtu; 602 + app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id; 603 + spin_unlock_bh(&app_priv->mtu_conf.lock); 604 + 605 + err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu, 606 + true); 607 + if (err) { 608 + spin_lock_bh(&app_priv->mtu_conf.lock); 609 + app_priv->mtu_conf.requested_val = 0; 610 + spin_unlock_bh(&app_priv->mtu_conf.lock); 611 + return err; 612 + } 613 + 614 + /* Wait for fw to ack the change. */ 615 + ack = wait_event_timeout(app_priv->mtu_conf.wait_q, 616 + nfp_flower_check_ack(app_priv), 617 + msecs_to_jiffies(10)); 618 + 619 + if (!ack) { 620 + spin_lock_bh(&app_priv->mtu_conf.lock); 621 + app_priv->mtu_conf.requested_val = 0; 622 + spin_unlock_bh(&app_priv->mtu_conf.lock); 623 + nfp_warn(app->cpp, "MTU change not verified with fw\n"); 624 + return -EIO; 625 + } 626 + 627 + return 0; 628 + } 629 + 560 630 static int nfp_flower_start(struct nfp_app *app) 561 631 { 562 632 return nfp_tunnel_config_start(app); ··· 653 573 654 574 .init = nfp_flower_init, 655 575 .clean = nfp_flower_clean, 576 + 577 + .check_mtu = nfp_flower_check_mtu, 578 + .repr_change_mtu = nfp_flower_repr_change_mtu, 656 579 657 580 .vnic_alloc = nfp_flower_vnic_alloc, 658 581 .vnic_init = nfp_flower_vnic_init,
+19
drivers/net/ethernet/netronome/nfp/flower/main.h
··· 65 65 66 66 /* Extra features bitmap. */ 67 67 #define NFP_FL_FEATS_GENEVE BIT(0) 68 + #define NFP_FL_NBI_MTU_SETTING BIT(1) 68 69 69 70 struct nfp_fl_mask_id { 70 71 struct circ_buf mask_id_free_list; ··· 77 76 struct circ_buf free_list; 78 77 u32 init_unalloc; 79 78 u8 repeated_em_count; 79 + }; 80 + 81 + /** 82 + * struct nfp_mtu_conf - manage MTU setting 83 + * @portnum: NFP port number of repr with requested MTU change 84 + * @requested_val: MTU value requested for repr 85 + * @ack: Received ack that MTU has been correctly set 86 + * @wait_q: Wait queue for MTU acknowledgements 87 + * @lock: Lock for setting/reading MTU variables 88 + */ 89 + struct nfp_mtu_conf { 90 + u32 portnum; 91 + unsigned int requested_val; 92 + bool ack; 93 + wait_queue_head_t wait_q; 94 + spinlock_t lock; 80 95 }; 81 96 82 97 /** ··· 123 106 * @reify_replies: atomically stores the number of replies received 124 107 * from firmware for repr reify 125 108 * @reify_wait_queue: wait queue for repr reify response counting 109 + * @mtu_conf: Configuration of repr MTU value 126 110 */ 127 111 struct nfp_flower_priv { 128 112 struct nfp_app *app; ··· 151 133 struct notifier_block nfp_tun_neigh_nb; 152 134 atomic_t reify_replies; 153 135 wait_queue_head_t reify_wait_queue; 136 + struct nfp_mtu_conf mtu_conf; 154 137 }; 155 138 156 139 struct nfp_fl_key_ls {
+18 -7
drivers/net/ethernet/netronome/nfp/nfp_app.h
··· 86 86 * @repr_clean: representor about to be unregistered 87 87 * @repr_open: representor netdev open callback 88 88 * @repr_stop: representor netdev stop callback 89 - * @change_mtu: MTU change on a netdev has been requested (veto-only, change 90 - * is not guaranteed to be committed) 89 + * @check_mtu: MTU change request on a netdev (verify it is valid) 90 + * @repr_change_mtu: MTU change request on repr (make and verify change) 91 91 * @start: start application logic 92 92 * @stop: stop application logic 93 93 * @ctrl_msg_rx: control message handler ··· 124 124 int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr); 125 125 int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr); 126 126 127 - int (*change_mtu)(struct nfp_app *app, struct net_device *netdev, 128 - int new_mtu); 127 + int (*check_mtu)(struct nfp_app *app, struct net_device *netdev, 128 + int new_mtu); 129 + int (*repr_change_mtu)(struct nfp_app *app, struct net_device *netdev, 130 + int new_mtu); 129 131 130 132 int (*start)(struct nfp_app *app); 131 133 void (*stop)(struct nfp_app *app); ··· 249 247 } 250 248 251 249 static inline int 252 - nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) 250 + nfp_app_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) 253 251 { 254 - if (!app || !app->type->change_mtu) 252 + if (!app || !app->type->check_mtu) 255 253 return 0; 256 - return app->type->change_mtu(app, netdev, new_mtu); 254 + return app->type->check_mtu(app, netdev, new_mtu); 255 + } 256 + 257 + static inline int 258 + nfp_app_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, 259 + int new_mtu) 260 + { 261 + if (!app || !app->type->repr_change_mtu) 262 + return 0; 263 + return app->type->repr_change_mtu(app, netdev, new_mtu); 257 264 } 258 265 259 266 static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
+1 -1
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 3066 3066 struct nfp_net_dp *dp; 3067 3067 int err; 3068 3068 3069 - err = nfp_app_change_mtu(nn->app, netdev, new_mtu); 3069 + err = nfp_app_check_mtu(nn->app, netdev, new_mtu); 3070 3070 if (err) 3071 3071 return err; 3072 3072
+12 -1
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
··· 196 196 static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu) 197 197 { 198 198 struct nfp_repr *repr = netdev_priv(netdev); 199 + int err; 199 200 200 - return nfp_app_change_mtu(repr->app, netdev, new_mtu); 201 + err = nfp_app_check_mtu(repr->app, netdev, new_mtu); 202 + if (err) 203 + return err; 204 + 205 + err = nfp_app_repr_change_mtu(repr->app, netdev, new_mtu); 206 + if (err) 207 + return err; 208 + 209 + netdev->mtu = new_mtu; 210 + 211 + return 0; 201 212 } 202 213 203 214 static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)