Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cxgb4/ch_ipsec: Registering xfrmdev_ops with cxgb4

As ch_ipsec was removed without clearing xfrmdev_ops and netdev
feature(esp-hw-offload). When a recalculation of netdev feature is
triggered by changing tls feature(tls-hw-tx-offload) from user
request, it causes a page fault due to absence of valid xfrmdev_ops.

Fixes: 6dad4e8ab3ec ("chcr: Add support for Inline IPSec")
Signed-off-by: Ayush Sawal <ayush.sawal@chelsio.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Ayush Sawal and committed by
David S. Miller
76f919eb 8794ebfe

+173 -56
+5
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
··· 146 146 CXGB4_ETHTOOL_FLASH_BOOTCFG = 4 147 147 }; 148 148 149 + enum cxgb4_netdev_tls_ops { 150 + CXGB4_TLSDEV_OPS = 1, 151 + CXGB4_XFRMDEV_OPS 152 + }; 153 + 149 154 struct cxgb4_bootcfg_data { 150 155 __le16 signature; 151 156 __u8 reserved[2];
+164 -22
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 6396 6396 } 6397 6397 #endif /* CONFIG_PCI_IOV */ 6398 6398 6399 + #if defined(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) 6400 + 6401 + static int chcr_offload_state(struct adapter *adap, 6402 + enum cxgb4_netdev_tls_ops op_val) 6403 + { 6404 + switch (op_val) { 6405 + #if defined(CONFIG_CHELSIO_TLS_DEVICE) 6406 + case CXGB4_TLSDEV_OPS: 6407 + if (!adap->uld[CXGB4_ULD_CRYPTO].handle) { 6408 + dev_dbg(adap->pdev_dev, "chcr driver is not loaded\n"); 6409 + return -EOPNOTSUPP; 6410 + } 6411 + if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) { 6412 + dev_dbg(adap->pdev_dev, 6413 + "chcr driver has no registered tlsdev_ops\n"); 6414 + return -EOPNOTSUPP; 6415 + } 6416 + break; 6417 + #endif /* CONFIG_CHELSIO_TLS_DEVICE */ 6418 + #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) 6419 + case CXGB4_XFRMDEV_OPS: 6420 + if (!adap->uld[CXGB4_ULD_IPSEC].handle) { 6421 + dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n"); 6422 + return -EOPNOTSUPP; 6423 + } 6424 + if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) { 6425 + dev_dbg(adap->pdev_dev, 6426 + "chipsec driver has no registered xfrmdev_ops\n"); 6427 + return -EOPNOTSUPP; 6428 + } 6429 + break; 6430 + #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 6431 + default: 6432 + dev_dbg(adap->pdev_dev, 6433 + "driver has no support for offload %d\n", op_val); 6434 + return -EOPNOTSUPP; 6435 + } 6436 + 6437 + return 0; 6438 + } 6439 + 6440 + #endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */ 6441 + 6399 6442 #if defined(CONFIG_CHELSIO_TLS_DEVICE) 6400 6443 6401 6444 static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk, ··· 6447 6404 u32 tcp_sn) 6448 6405 { 6449 6406 struct adapter *adap = netdev2adap(netdev); 6450 - int ret = 0; 6407 + int ret; 6451 6408 6452 6409 mutex_lock(&uld_mutex); 6453 - if (!adap->uld[CXGB4_ULD_CRYPTO].handle) { 6454 - dev_err(adap->pdev_dev, "chcr driver is not loaded\n"); 6455 - ret = -EOPNOTSUPP; 6410 + ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS); 6411 + if (ret) 6456 6412 goto out_unlock; 6457 - } 6458 - 6459 - if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) { 6460 - dev_err(adap->pdev_dev, 6461 - "chcr driver has no registered tlsdev_ops()\n"); 6462 - ret = -EOPNOTSUPP; 6463 - goto out_unlock; 6464 - } 6465 6413 6466 6414 ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE); 6467 6415 if (ret) ··· 6478 6444 struct adapter *adap = netdev2adap(netdev); 6479 6445 6480 6446 mutex_lock(&uld_mutex); 6481 - if (!adap->uld[CXGB4_ULD_CRYPTO].handle) { 6482 - dev_err(adap->pdev_dev, "chcr driver is not loaded\n"); 6447 + if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS)) 6483 6448 goto out_unlock; 6484 - } 6485 - 6486 - if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) { 6487 - dev_err(adap->pdev_dev, 6488 - "chcr driver has no registered tlsdev_ops\n"); 6489 - goto out_unlock; 6490 - } 6491 6449 6492 6450 adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_del(netdev, tls_ctx, 6493 6451 direction); ··· 6488 6462 out_unlock: 6489 6463 mutex_unlock(&uld_mutex); 6490 6464 } 6465 + 6466 + #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) 6467 + 6468 + static int cxgb4_xfrm_add_state(struct xfrm_state *x) 6469 + { 6470 + struct adapter *adap = netdev2adap(x->xso.dev); 6471 + int ret; 6472 + 6473 + if (!mutex_trylock(&uld_mutex)) { 6474 + dev_dbg(adap->pdev_dev, 6475 + "crypto uld critical resource is under use\n"); 6476 + return -EBUSY; 6477 + } 6478 + ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS); 6479 + if (ret) 6480 + goto out_unlock; 6481 + 6482 + ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x); 6483 + 6484 + out_unlock: 6485 + mutex_unlock(&uld_mutex); 6486 + 6487 + return ret; 6488 + } 6489 + 6490 + static void cxgb4_xfrm_del_state(struct xfrm_state *x) 6491 + { 6492 + struct adapter *adap = netdev2adap(x->xso.dev); 6493 + 6494 + if (!mutex_trylock(&uld_mutex)) { 6495 + dev_dbg(adap->pdev_dev, 6496 + "crypto uld critical resource is under use\n"); 6497 + return; 6498 + } 6499 + if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) 6500 + goto out_unlock; 6501 + 6502 + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x); 6503 + 6504 + out_unlock: 6505 + mutex_unlock(&uld_mutex); 6506 + } 6507 + 6508 + static void cxgb4_xfrm_free_state(struct xfrm_state *x) 6509 + { 6510 + struct adapter *adap = netdev2adap(x->xso.dev); 6511 + 6512 + if (!mutex_trylock(&uld_mutex)) { 6513 + dev_dbg(adap->pdev_dev, 6514 + "crypto uld critical resource is under use\n"); 6515 + return; 6516 + } 6517 + if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) 6518 + goto out_unlock; 6519 + 6520 + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x); 6521 + 6522 + out_unlock: 6523 + mutex_unlock(&uld_mutex); 6524 + } 6525 + 6526 + static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 6527 + { 6528 + struct adapter *adap = netdev2adap(x->xso.dev); 6529 + bool ret = false; 6530 + 6531 + if (!mutex_trylock(&uld_mutex)) { 6532 + dev_dbg(adap->pdev_dev, 6533 + "crypto uld critical resource is under use\n"); 6534 + return ret; 6535 + } 6536 + if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) 6537 + goto out_unlock; 6538 + 6539 + ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x); 6540 + 6541 + out_unlock: 6542 + mutex_unlock(&uld_mutex); 6543 + return ret; 6544 + } 6545 + 6546 + static void cxgb4_advance_esn_state(struct xfrm_state *x) 6547 + { 6548 + struct adapter *adap = netdev2adap(x->xso.dev); 6549 + 6550 + if (!mutex_trylock(&uld_mutex)) { 6551 + dev_dbg(adap->pdev_dev, 6552 + "crypto uld critical resource is under use\n"); 6553 + return; 6554 + } 6555 + if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) 6556 + goto out_unlock; 6557 + 6558 + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x); 6559 + 6560 + out_unlock: 6561 + mutex_unlock(&uld_mutex); 6562 + } 6563 + 6564 + static const struct xfrmdev_ops cxgb4_xfrmdev_ops = { 6565 + .xdo_dev_state_add = cxgb4_xfrm_add_state, 6566 + .xdo_dev_state_delete = cxgb4_xfrm_del_state, 6567 + .xdo_dev_state_free = cxgb4_xfrm_free_state, 6568 + .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok, 6569 + .xdo_dev_state_advance_esn = cxgb4_advance_esn_state, 6570 + }; 6571 + 6572 + #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 6491 6573 6492 6574 static const struct tlsdev_ops cxgb4_ktls_ops = { 6493 6575 .tls_dev_add = cxgb4_ktls_dev_add, ··· 6862 6728 /* initialize the refcount */ 6863 6729 refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0); 6864 6730 } 6865 - #endif 6731 + #endif /* CONFIG_CHELSIO_TLS_DEVICE */ 6732 + #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) 6733 + if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) { 6734 + netdev->hw_enc_features |= NETIF_F_HW_ESP; 6735 + netdev->features |= NETIF_F_HW_ESP; 6736 + netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops; 6737 + } 6738 + #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 6739 + 6866 6740 netdev->priv_flags |= IFF_UNICAST_FLT; 6867 6741 6868 6742 /* MTU range: 81 - 9600 */
+3
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
··· 479 479 #if IS_ENABLED(CONFIG_TLS_DEVICE) 480 480 const struct tlsdev_ops *tlsdev_ops; 481 481 #endif 482 + #if IS_ENABLED(CONFIG_XFRM_OFFLOAD) 483 + const struct xfrmdev_ops *xfrmdev_ops; 484 + #endif 482 485 }; 483 486 484 487 void cxgb4_uld_enable(struct adapter *adap);
+1 -34
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
··· 79 79 static void chcr_advance_esn_state(struct xfrm_state *x); 80 80 static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state); 81 81 static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop); 82 - static void update_netdev_features(void); 83 82 84 83 static const struct xfrmdev_ops chcr_xfrmdev_ops = { 85 84 .xdo_dev_state_add = chcr_xfrm_add_state, ··· 88 89 .xdo_dev_state_advance_esn = chcr_advance_esn_state, 89 90 }; 90 91 91 - /* Add offload xfrms to Chelsio Interface */ 92 - void chcr_add_xfrmops(const struct cxgb4_lld_info *lld) 93 - { 94 - struct net_device *netdev = NULL; 95 - int i; 96 - 97 - for (i = 0; i < lld->nports; i++) { 98 - netdev = lld->ports[i]; 99 - if (!netdev) 100 - continue; 101 - netdev->xfrmdev_ops = &chcr_xfrmdev_ops; 102 - netdev->hw_enc_features |= NETIF_F_HW_ESP; 103 - netdev->features |= NETIF_F_HW_ESP; 104 - netdev_change_features(netdev); 105 - } 106 - } 107 - 108 92 static struct cxgb4_uld_info ch_ipsec_uld_info = { 109 93 .name = CHIPSEC_DRV_MODULE_NAME, 110 94 .nrxq = MAX_ULD_QSETS, ··· 96 114 .add = ch_ipsec_uld_add, 97 115 .state_change = ch_ipsec_uld_state_change, 98 116 .tx_handler = chcr_ipsec_xmit, 117 + .xfrmdev_ops = &chcr_xfrmdev_ops, 99 118 }; 100 119 101 120 static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop) ··· 791 808 return NETDEV_TX_OK; 792 809 } 793 810 794 - static void update_netdev_features(void) 795 - { 796 - struct ipsec_uld_ctx *u_ctx, *tmp; 797 - 798 - mutex_lock(&dev_mutex); 799 - list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) { 800 - if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE) 801 - chcr_add_xfrmops(&u_ctx->lldi); 802 - } 803 - mutex_unlock(&dev_mutex); 804 - } 805 - 806 811 static int __init chcr_ipsec_init(void) 807 812 { 808 813 cxgb4_register_uld(CXGB4_ULD_IPSEC, &ch_ipsec_uld_info); 809 - 810 - rtnl_lock(); 811 - update_netdev_features(); 812 - rtnl_unlock(); 813 814 814 815 return 0; 815 816 }