Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.5-rc7 760 lines 21 kB view raw
1/* 2 * This file is part of the Chelsio T6 Crypto driver for Linux. 3 * 4 * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * Written and Maintained by: 35 * Atul Gupta (atul.gupta@chelsio.com) 36 */ 37 38#define pr_fmt(fmt) "chcr:" fmt 39 40#include <linux/kernel.h> 41#include <linux/module.h> 42#include <linux/crypto.h> 43#include <linux/cryptohash.h> 44#include <linux/skbuff.h> 45#include <linux/rtnetlink.h> 46#include <linux/highmem.h> 47#include <linux/if_vlan.h> 48#include <linux/ip.h> 49#include <linux/netdevice.h> 50#include <net/esp.h> 51#include <net/xfrm.h> 52#include <crypto/aes.h> 53#include <crypto/algapi.h> 54#include <crypto/hash.h> 55#include <crypto/sha.h> 56#include <crypto/authenc.h> 57#include <crypto/internal/aead.h> 58#include <crypto/null.h> 59#include <crypto/internal/skcipher.h> 60#include <crypto/aead.h> 61#include <crypto/scatterwalk.h> 62#include <crypto/internal/hash.h> 63 64#include "chcr_core.h" 65#include "chcr_algo.h" 66#include "chcr_crypto.h" 67 68/* 69 * Max Tx descriptor space we allow for an Ethernet packet to be inlined 70 * into a WR. 71 */ 72#define MAX_IMM_TX_PKT_LEN 256 73#define GCM_ESP_IV_SIZE 8 74 75static int chcr_xfrm_add_state(struct xfrm_state *x); 76static void chcr_xfrm_del_state(struct xfrm_state *x); 77static void chcr_xfrm_free_state(struct xfrm_state *x); 78static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x); 79static void chcr_advance_esn_state(struct xfrm_state *x); 80 81static const struct xfrmdev_ops chcr_xfrmdev_ops = { 82 .xdo_dev_state_add = chcr_xfrm_add_state, 83 .xdo_dev_state_delete = chcr_xfrm_del_state, 84 .xdo_dev_state_free = chcr_xfrm_free_state, 85 .xdo_dev_offload_ok = chcr_ipsec_offload_ok, 86 .xdo_dev_state_advance_esn = chcr_advance_esn_state, 87}; 88 89/* Add offload xfrms to Chelsio Interface */ 90void chcr_add_xfrmops(const struct cxgb4_lld_info *lld) 91{ 92 struct net_device *netdev = NULL; 93 int i; 94 95 for (i = 0; i < lld->nports; i++) { 96 netdev = lld->ports[i]; 97 if (!netdev) 98 continue; 99 netdev->xfrmdev_ops = &chcr_xfrmdev_ops; 100 netdev->hw_enc_features |= NETIF_F_HW_ESP; 101 netdev->features |= NETIF_F_HW_ESP; 102 rtnl_lock(); 103 netdev_change_features(netdev); 104 rtnl_unlock(); 105 } 106} 107 108static inline int chcr_ipsec_setauthsize(struct xfrm_state *x, 109 struct ipsec_sa_entry *sa_entry) 110{ 111 int hmac_ctrl; 112 int authsize = x->aead->alg_icv_len / 8; 113 114 sa_entry->authsize = authsize; 115 116 switch (authsize) { 117 case ICV_8: 118 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; 119 break; 120 case ICV_12: 121 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; 122 break; 123 case ICV_16: 124 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; 125 break; 126 default: 127 return -EINVAL; 128 } 129 return hmac_ctrl; 130} 131 132static inline int chcr_ipsec_setkey(struct xfrm_state *x, 133 struct ipsec_sa_entry *sa_entry) 134{ 135 int keylen = (x->aead->alg_key_len + 7) / 8; 136 unsigned char *key = x->aead->alg_key; 137 int ck_size, key_ctx_size = 0; 138 unsigned char ghash_h[AEAD_H_SIZE]; 139 struct crypto_aes_ctx aes; 140 int ret = 0; 141 142 if (keylen > 3) { 143 keylen -= 4; /* nonce/salt is present in the last 4 bytes */ 144 memcpy(sa_entry->salt, key + keylen, 4); 145 } 146 147 if (keylen == AES_KEYSIZE_128) { 148 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 149 } else if (keylen == AES_KEYSIZE_192) { 150 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 151 } else if (keylen == AES_KEYSIZE_256) { 152 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 153 } else { 154 pr_err("GCM: Invalid key length %d\n", keylen); 155 ret = -EINVAL; 156 goto out; 157 } 158 159 memcpy(sa_entry->key, key, keylen); 160 sa_entry->enckey_len = keylen; 161 key_ctx_size = sizeof(struct _key_ctx) + 162 ((DIV_ROUND_UP(keylen, 16)) << 4) + 163 AEAD_H_SIZE; 164 165 sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, 166 CHCR_KEYCTX_MAC_KEY_SIZE_128, 167 0, 0, 168 key_ctx_size >> 4); 169 170 /* Calculate the H = CIPH(K, 0 repeated 16 times). 171 * It will go in key context 172 */ 173 ret = aes_expandkey(&aes, key, keylen); 174 if (ret) { 175 sa_entry->enckey_len = 0; 176 goto out; 177 } 178 memset(ghash_h, 0, AEAD_H_SIZE); 179 aes_encrypt(&aes, ghash_h, ghash_h); 180 memzero_explicit(&aes, sizeof(aes)); 181 182 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) * 183 16), ghash_h, AEAD_H_SIZE); 184 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) + 185 AEAD_H_SIZE; 186out: 187 return ret; 188} 189 190/* 191 * chcr_xfrm_add_state 192 * returns 0 on success, negative error if failed to send message to FPGA 193 * positive error if FPGA returned a bad response 194 */ 195static int chcr_xfrm_add_state(struct xfrm_state *x) 196{ 197 struct ipsec_sa_entry *sa_entry; 198 int res = 0; 199 200 if (x->props.aalgo != SADB_AALG_NONE) { 201 pr_debug("CHCR: Cannot offload authenticated xfrm states\n"); 202 return -EINVAL; 203 } 204 if (x->props.calgo != SADB_X_CALG_NONE) { 205 pr_debug("CHCR: Cannot offload compressed xfrm states\n"); 206 return -EINVAL; 207 } 208 if (x->props.family != AF_INET && 209 x->props.family != AF_INET6) { 210 pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n"); 211 return -EINVAL; 212 } 213 if (x->props.mode != XFRM_MODE_TRANSPORT && 214 x->props.mode != XFRM_MODE_TUNNEL) { 215 pr_debug("CHCR: Only transport and tunnel xfrm offload\n"); 216 return -EINVAL; 217 } 218 if (x->id.proto != IPPROTO_ESP) { 219 pr_debug("CHCR: Only ESP xfrm state offloaded\n"); 220 return -EINVAL; 221 } 222 if (x->encap) { 223 pr_debug("CHCR: Encapsulated xfrm state not offloaded\n"); 224 return -EINVAL; 225 } 226 if (!x->aead) { 227 pr_debug("CHCR: Cannot offload xfrm states without aead\n"); 228 return -EINVAL; 229 } 230 if (x->aead->alg_icv_len != 128 && 231 x->aead->alg_icv_len != 96) { 232 pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n"); 233 return -EINVAL; 234 } 235 if ((x->aead->alg_key_len != 128 + 32) && 236 (x->aead->alg_key_len != 256 + 32)) { 237 pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); 238 return -EINVAL; 239 } 240 if (x->tfcpad) { 241 pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n"); 242 return -EINVAL; 243 } 244 if (!x->geniv) { 245 pr_debug("CHCR: Cannot offload xfrm states without geniv\n"); 246 return -EINVAL; 247 } 248 if (strcmp(x->geniv, "seqiv")) { 249 pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n"); 250 return -EINVAL; 251 } 252 253 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); 254 if (!sa_entry) { 255 res = -ENOMEM; 256 goto out; 257 } 258 259 sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry); 260 if (x->props.flags & XFRM_STATE_ESN) 261 sa_entry->esn = 1; 262 chcr_ipsec_setkey(x, sa_entry); 263 x->xso.offload_handle = (unsigned long)sa_entry; 264 try_module_get(THIS_MODULE); 265out: 266 return res; 267} 268 269static void chcr_xfrm_del_state(struct xfrm_state *x) 270{ 271 /* do nothing */ 272 if (!x->xso.offload_handle) 273 return; 274} 275 276static void chcr_xfrm_free_state(struct xfrm_state *x) 277{ 278 struct ipsec_sa_entry *sa_entry; 279 280 if (!x->xso.offload_handle) 281 return; 282 283 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; 284 kfree(sa_entry); 285 module_put(THIS_MODULE); 286} 287 288static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 289{ 290 if (x->props.family == AF_INET) { 291 /* Offload with IP options is not supported yet */ 292 if (ip_hdr(skb)->ihl > 5) 293 return false; 294 } else { 295 /* Offload with IPv6 extension headers is not support yet */ 296 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) 297 return false; 298 } 299 /* Inline single pdu */ 300 if (skb_shinfo(skb)->gso_size) 301 return false; 302 return true; 303} 304 305static void chcr_advance_esn_state(struct xfrm_state *x) 306{ 307 /* do nothing */ 308 if (!x->xso.offload_handle) 309 return; 310} 311 312static inline int is_eth_imm(const struct sk_buff *skb, 313 struct ipsec_sa_entry *sa_entry) 314{ 315 unsigned int kctx_len; 316 int hdrlen; 317 318 kctx_len = sa_entry->kctx_len; 319 hdrlen = sizeof(struct fw_ulptx_wr) + 320 sizeof(struct chcr_ipsec_req) + kctx_len; 321 322 hdrlen += sizeof(struct cpl_tx_pkt); 323 if (sa_entry->esn) 324 hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) 325 << 4); 326 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) 327 return hdrlen; 328 return 0; 329} 330 331static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb, 332 struct ipsec_sa_entry *sa_entry, 333 bool *immediate) 334{ 335 unsigned int kctx_len; 336 unsigned int flits; 337 int aadivlen; 338 int hdrlen; 339 340 kctx_len = sa_entry->kctx_len; 341 hdrlen = is_eth_imm(skb, sa_entry); 342 aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 343 16) : 0; 344 aadivlen <<= 4; 345 346 /* If the skb is small enough, we can pump it out as a work request 347 * with only immediate data. In that case we just have to have the 348 * TX Packet header plus the skb data in the Work Request. 349 */ 350 351 if (hdrlen) { 352 *immediate = true; 353 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); 354 } 355 356 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 357 358 /* Otherwise, we're going to have to construct a Scatter gather list 359 * of the skb body and fragments. We also include the flits necessary 360 * for the TX Packet Work Request and CPL. We always have a firmware 361 * Write Header (incorporated as part of the cpl_tx_pkt_lso and 362 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 363 * message or, if we're doing a Large Send Offload, an LSO CPL message 364 * with an embedded TX Packet Write CPL message. 365 */ 366 flits += (sizeof(struct fw_ulptx_wr) + 367 sizeof(struct chcr_ipsec_req) + 368 kctx_len + 369 sizeof(struct cpl_tx_pkt_core) + 370 aadivlen) / sizeof(__be64); 371 return flits; 372} 373 374inline void *copy_esn_pktxt(struct sk_buff *skb, 375 struct net_device *dev, 376 void *pos, 377 struct ipsec_sa_entry *sa_entry) 378{ 379 struct chcr_ipsec_aadiv *aadiv; 380 struct ulptx_idata *sc_imm; 381 struct ip_esp_hdr *esphdr; 382 struct xfrm_offload *xo; 383 struct sge_eth_txq *q; 384 struct adapter *adap; 385 struct port_info *pi; 386 __be64 seqno; 387 u32 qidx; 388 u32 seqlo; 389 u8 *iv; 390 int eoq; 391 int len; 392 393 pi = netdev_priv(dev); 394 adap = pi->adapter; 395 qidx = skb->queue_mapping; 396 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 397 398 /* end of queue, reset pos to start of queue */ 399 eoq = (void *)q->q.stat - pos; 400 if (!eoq) 401 pos = q->q.desc; 402 403 len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4; 404 memset(pos, 0, len); 405 aadiv = (struct chcr_ipsec_aadiv *)pos; 406 esphdr = (struct ip_esp_hdr *)skb_transport_header(skb); 407 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr); 408 xo = xfrm_offload(skb); 409 410 aadiv->spi = (esphdr->spi); 411 seqlo = htonl(esphdr->seq_no); 412 seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32)); 413 memcpy(aadiv->seq_no, &seqno, 8); 414 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr); 415 memcpy(aadiv->iv, iv, 8); 416 417 if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) { 418 sc_imm = (struct ulptx_idata *)(pos + 419 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 420 sizeof(__be64)) << 3)); 421 sc_imm->cmd_more = FILL_CMD_MORE(0); 422 sc_imm->len = cpu_to_be32(skb->len); 423 } 424 pos += len; 425 return pos; 426} 427 428inline void *copy_cpltx_pktxt(struct sk_buff *skb, 429 struct net_device *dev, 430 void *pos, 431 struct ipsec_sa_entry *sa_entry) 432{ 433 struct cpl_tx_pkt_core *cpl; 434 struct sge_eth_txq *q; 435 struct adapter *adap; 436 struct port_info *pi; 437 u32 ctrl0, qidx; 438 u64 cntrl = 0; 439 int left; 440 441 pi = netdev_priv(dev); 442 adap = pi->adapter; 443 qidx = skb->queue_mapping; 444 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 445 446 left = (void *)q->q.stat - pos; 447 if (!left) 448 pos = q->q.desc; 449 450 cpl = (struct cpl_tx_pkt_core *)pos; 451 452 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 453 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | 454 TXPKT_PF_V(adap->pf); 455 if (skb_vlan_tag_present(skb)) { 456 q->vlan_ins++; 457 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 458 } 459 460 cpl->ctrl0 = htonl(ctrl0); 461 cpl->pack = htons(0); 462 cpl->len = htons(skb->len); 463 cpl->ctrl1 = cpu_to_be64(cntrl); 464 465 pos += sizeof(struct cpl_tx_pkt_core); 466 /* Copy ESN info for HW */ 467 if (sa_entry->esn) 468 pos = copy_esn_pktxt(skb, dev, pos, sa_entry); 469 return pos; 470} 471 472inline void *copy_key_cpltx_pktxt(struct sk_buff *skb, 473 struct net_device *dev, 474 void *pos, 475 struct ipsec_sa_entry *sa_entry) 476{ 477 struct _key_ctx *key_ctx; 478 int left, eoq, key_len; 479 struct sge_eth_txq *q; 480 struct adapter *adap; 481 struct port_info *pi; 482 unsigned int qidx; 483 484 pi = netdev_priv(dev); 485 adap = pi->adapter; 486 qidx = skb->queue_mapping; 487 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 488 key_len = sa_entry->kctx_len; 489 490 /* end of queue, reset pos to start of queue */ 491 eoq = (void *)q->q.stat - pos; 492 left = eoq; 493 if (!eoq) { 494 pos = q->q.desc; 495 left = 64 * q->q.size; 496 } 497 498 /* Copy the Key context header */ 499 key_ctx = (struct _key_ctx *)pos; 500 key_ctx->ctx_hdr = sa_entry->key_ctx_hdr; 501 memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT); 502 pos += sizeof(struct _key_ctx); 503 left -= sizeof(struct _key_ctx); 504 505 if (likely(key_len <= left)) { 506 memcpy(key_ctx->key, sa_entry->key, key_len); 507 pos += key_len; 508 } else { 509 memcpy(pos, sa_entry->key, left); 510 memcpy(q->q.desc, sa_entry->key + left, 511 key_len - left); 512 pos = (u8 *)q->q.desc + (key_len - left); 513 } 514 /* Copy CPL TX PKT XT */ 515 pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry); 516 517 return pos; 518} 519 520inline void *chcr_crypto_wreq(struct sk_buff *skb, 521 struct net_device *dev, 522 void *pos, 523 int credits, 524 struct ipsec_sa_entry *sa_entry) 525{ 526 struct port_info *pi = netdev_priv(dev); 527 struct adapter *adap = pi->adapter; 528 unsigned int ivsize = GCM_ESP_IV_SIZE; 529 struct chcr_ipsec_wr *wr; 530 bool immediate = false; 531 u16 immdatalen = 0; 532 unsigned int flits; 533 u32 ivinoffset; 534 u32 aadstart; 535 u32 aadstop; 536 u32 ciphstart; 537 u16 sc_more = 0; 538 u32 ivdrop = 0; 539 u32 esnlen = 0; 540 u32 wr_mid; 541 u16 ndesc; 542 int qidx = skb_get_queue_mapping(skb); 543 struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset]; 544 unsigned int kctx_len = sa_entry->kctx_len; 545 int qid = q->q.cntxt_id; 546 547 atomic_inc(&adap->chcr_stats.ipsec_cnt); 548 549 flits = calc_tx_sec_flits(skb, sa_entry, &immediate); 550 ndesc = DIV_ROUND_UP(flits, 2); 551 if (sa_entry->esn) 552 ivdrop = 1; 553 554 if (immediate) 555 immdatalen = skb->len; 556 557 if (sa_entry->esn) { 558 esnlen = sizeof(struct chcr_ipsec_aadiv); 559 if (!skb_is_nonlinear(skb)) 560 sc_more = 1; 561 } 562 563 /* WR Header */ 564 wr = (struct chcr_ipsec_wr *)pos; 565 wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); 566 wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc); 567 568 if (unlikely(credits < ETHTXQ_STOP_THRES)) { 569 netif_tx_stop_queue(q->txq); 570 q->q.stops++; 571 if (!q->dbqt) 572 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; 573 } 574 wr_mid |= FW_ULPTX_WR_DATA_F; 575 wr->wreq.flowid_len16 = htonl(wr_mid); 576 577 /* ULPTX */ 578 wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid); 579 wr->req.ulptx.len = htonl(ndesc - 1); 580 581 /* Sub-command */ 582 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more); 583 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + 584 sizeof(wr->req.key_ctx) + 585 kctx_len + 586 sizeof(struct cpl_tx_pkt_core) + 587 esnlen + 588 (esnlen ? 0 : immdatalen)); 589 590 /* CPL_SEC_PDU */ 591 ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) : 592 (skb_transport_offset(skb) + 593 sizeof(struct ip_esp_hdr) + 1); 594 wr->req.sec_cpl.op_ivinsrtofst = htonl( 595 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | 596 CPL_TX_SEC_PDU_CPLLEN_V(2) | 597 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) | 598 CPL_TX_SEC_PDU_IVINSRTOFST_V( 599 ivinoffset)); 600 601 wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen); 602 aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1); 603 aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET : 604 (skb_transport_offset(skb) + 605 sizeof(struct ip_esp_hdr)); 606 ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) + 607 GCM_ESP_IV_SIZE + 1; 608 ciphstart += sa_entry->esn ? esnlen : 0; 609 610 wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 611 aadstart, 612 aadstop, 613 ciphstart, 0); 614 615 wr->req.sec_cpl.cipherstop_lo_authinsert = 616 FILL_SEC_CPL_AUTHINSERT(0, ciphstart, 617 sa_entry->authsize, 618 sa_entry->authsize); 619 wr->req.sec_cpl.seqno_numivs = 620 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1, 621 CHCR_SCMD_CIPHER_MODE_AES_GCM, 622 CHCR_SCMD_AUTH_MODE_GHASH, 623 sa_entry->hmac_ctrl, 624 ivsize >> 1); 625 wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 626 0, ivdrop, 0); 627 628 pos += sizeof(struct fw_ulptx_wr) + 629 sizeof(struct ulp_txpkt) + 630 sizeof(struct ulptx_idata) + 631 sizeof(struct cpl_tx_sec_pdu); 632 633 pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry); 634 635 return pos; 636} 637 638/** 639 * flits_to_desc - returns the num of Tx descriptors for the given flits 640 * @n: the number of flits 641 * 642 * Returns the number of Tx descriptors needed for the supplied number 643 * of flits. 644 */ 645static inline unsigned int flits_to_desc(unsigned int n) 646{ 647 WARN_ON(n > SGE_MAX_WR_LEN / 8); 648 return DIV_ROUND_UP(n, 8); 649} 650 651static inline unsigned int txq_avail(const struct sge_txq *q) 652{ 653 return q->size - 1 - q->in_use; 654} 655 656static void eth_txq_stop(struct sge_eth_txq *q) 657{ 658 netif_tx_stop_queue(q->txq); 659 q->q.stops++; 660} 661 662static inline void txq_advance(struct sge_txq *q, unsigned int n) 663{ 664 q->in_use += n; 665 q->pidx += n; 666 if (q->pidx >= q->size) 667 q->pidx -= q->size; 668} 669 670/* 671 * chcr_ipsec_xmit called from ULD Tx handler 672 */ 673int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev) 674{ 675 struct xfrm_state *x = xfrm_input_state(skb); 676 unsigned int last_desc, ndesc, flits = 0; 677 struct ipsec_sa_entry *sa_entry; 678 u64 *pos, *end, *before, *sgl; 679 struct tx_sw_desc *sgl_sdesc; 680 int qidx, left, credits; 681 bool immediate = false; 682 struct sge_eth_txq *q; 683 struct adapter *adap; 684 struct port_info *pi; 685 struct sec_path *sp; 686 687 if (!x->xso.offload_handle) 688 return NETDEV_TX_BUSY; 689 690 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; 691 692 sp = skb_sec_path(skb); 693 if (sp->len != 1) { 694out_free: dev_kfree_skb_any(skb); 695 return NETDEV_TX_OK; 696 } 697 698 pi = netdev_priv(dev); 699 adap = pi->adapter; 700 qidx = skb->queue_mapping; 701 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 702 703 cxgb4_reclaim_completed_tx(adap, &q->q, true); 704 705 flits = calc_tx_sec_flits(skb, sa_entry, &immediate); 706 ndesc = flits_to_desc(flits); 707 credits = txq_avail(&q->q) - ndesc; 708 709 if (unlikely(credits < 0)) { 710 eth_txq_stop(q); 711 dev_err(adap->pdev_dev, 712 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n", 713 dev->name, qidx, credits, ndesc, txq_avail(&q->q), 714 flits); 715 return NETDEV_TX_BUSY; 716 } 717 718 last_desc = q->q.pidx + ndesc - 1; 719 if (last_desc >= q->q.size) 720 last_desc -= q->q.size; 721 sgl_sdesc = &q->q.sdesc[last_desc]; 722 723 if (!immediate && 724 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { 725 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); 726 q->mapping_err++; 727 goto out_free; 728 } 729 730 pos = (u64 *)&q->q.desc[q->q.pidx]; 731 before = (u64 *)pos; 732 end = (u64 *)pos + flits; 733 /* Setup IPSec CPL */ 734 pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos, 735 credits, sa_entry); 736 if (before > (u64 *)pos) { 737 left = (u8 *)end - (u8 *)q->q.stat; 738 end = (void *)q->q.desc + left; 739 } 740 if (pos == (u64 *)q->q.stat) { 741 left = (u8 *)end - (u8 *)q->q.stat; 742 end = (void *)q->q.desc + left; 743 pos = (void *)q->q.desc; 744 } 745 746 sgl = (void *)pos; 747 if (immediate) { 748 cxgb4_inline_tx_skb(skb, &q->q, sgl); 749 dev_consume_skb_any(skb); 750 } else { 751 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 752 0, sgl_sdesc->addr); 753 skb_orphan(skb); 754 sgl_sdesc->skb = skb; 755 } 756 txq_advance(&q->q, ndesc); 757 758 cxgb4_ring_tx_db(adap, &q->q, ndesc); 759 return NETDEV_TX_OK; 760}