Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'cxgb4-ch_ktls-updates-in-net-next'

Rohit Maheshwari says:

====================
cxgb4/ch_ktls: updates in net-next

This series of patches improves connections setup and statistics.

This series is broken down as follows:

Patch 1 fixes the handling of connection setup failure in HW. Driver
shouldn't return success to tls_dev_add, until HW returns success.

Patch 2 avoids the log flood.

Patch 3 adds ktls statistics at port level.

v1->v2:
- removed conn_up from all places.

v2->v3:
- Corrected timeout handling.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+235 -203
+15 -20
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
··· 3527 3527 3528 3528 static int chcr_stats_show(struct seq_file *seq, void *v) 3529 3529 { 3530 + #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) 3531 + struct ch_ktls_port_stats_debug *ktls_port; 3532 + int i = 0; 3533 + #endif 3530 3534 struct adapter *adap = seq->private; 3531 3535 3532 3536 seq_puts(seq, "Chelsio Crypto Accelerator Stats \n"); ··· 3561 3557 seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n"); 3562 3558 seq_printf(seq, "Tx TLS offload refcount: %20u\n", 3563 3559 refcount_read(&adap->chcr_ktls.ktls_refcount)); 3564 - seq_printf(seq, "Tx HW offload contexts added: %20llu\n", 3565 - atomic64_read(&adap->ch_ktls_stats.ktls_tx_ctx)); 3566 - seq_printf(seq, "Tx connection created: %20llu\n", 3567 - atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_open)); 3568 - seq_printf(seq, "Tx connection failed: %20llu\n", 3569 - atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_fail)); 3570 - seq_printf(seq, "Tx connection closed: %20llu\n", 3571 - atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_close)); 3572 - seq_printf(seq, "Packets passed for encryption : %20llu\n", 3573 - atomic64_read(&adap->ch_ktls_stats.ktls_tx_encrypted_packets)); 3574 - seq_printf(seq, "Bytes passed for encryption : %20llu\n", 3575 - atomic64_read(&adap->ch_ktls_stats.ktls_tx_encrypted_bytes)); 3576 3560 seq_printf(seq, "Tx records send: %20llu\n", 3577 3561 atomic64_read(&adap->ch_ktls_stats.ktls_tx_send_records)); 3578 3562 seq_printf(seq, "Tx partial start of records: %20llu\n", ··· 3573 3581 atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts)); 3574 3582 seq_printf(seq, "TX trim pkts : %20llu\n", 3575 3583 atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts)); 3576 - seq_printf(seq, "Tx out of order packets: %20llu\n", 3577 - atomic64_read(&adap->ch_ktls_stats.ktls_tx_ooo)); 3578 - seq_printf(seq, "Tx drop pkts before HW offload: %20llu\n", 3579 - atomic64_read(&adap->ch_ktls_stats.ktls_tx_skip_no_sync_data)); 3580 - seq_printf(seq, "Tx drop not synced packets: %20llu\n", 3581 - atomic64_read(&adap->ch_ktls_stats.ktls_tx_drop_no_sync_data)); 3582 - seq_printf(seq, "Tx drop bypass req: %20llu\n", 3583 - atomic64_read(&adap->ch_ktls_stats.ktls_tx_drop_bypass_req)); 3584 + while (i < MAX_NPORTS) { 3585 + ktls_port = &adap->ch_ktls_stats.ktls_port[i]; 3586 + seq_printf(seq, "Port %d\n", i); 3587 + seq_printf(seq, "Tx connection created: %20llu\n", 3588 + atomic64_read(&ktls_port->ktls_tx_connection_open)); 3589 + seq_printf(seq, "Tx connection failed: %20llu\n", 3590 + atomic64_read(&ktls_port->ktls_tx_connection_fail)); 3591 + seq_printf(seq, "Tx connection closed: %20llu\n", 3592 + atomic64_read(&ktls_port->ktls_tx_connection_close)); 3593 + i++; 3594 + } 3584 3595 #endif 3585 3596 return 0; 3586 3597 }
+34 -16
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
··· 117 117 "vlan_insertions ", 118 118 "gro_packets ", 119 119 "gro_merged ", 120 - }; 121 - 122 - static char adapter_stats_strings[][ETH_GSTRING_LEN] = { 123 - "db_drop ", 124 - "db_full ", 125 - "db_empty ", 126 - "write_coal_success ", 127 - "write_coal_fail ", 128 120 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) 129 121 "tx_tls_encrypted_packets", 130 122 "tx_tls_encrypted_bytes ", ··· 126 134 "tx_tls_drop_no_sync_data", 127 135 "tx_tls_drop_bypass_req ", 128 136 #endif 137 + }; 138 + 139 + static char adapter_stats_strings[][ETH_GSTRING_LEN] = { 140 + "db_drop ", 141 + "db_full ", 142 + "db_empty ", 143 + "write_coal_success ", 144 + "write_coal_fail ", 129 145 }; 130 146 131 147 static char loopback_stats_strings[][ETH_GSTRING_LEN] = { ··· 257 257 u64 vlan_ins; 258 258 u64 gro_pkts; 259 259 u64 gro_merged; 260 - }; 261 - 262 - struct adapter_stats { 263 - u64 db_drop; 264 - u64 db_full; 265 - u64 db_empty; 266 - u64 wc_success; 267 - u64 wc_fail; 268 260 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) 269 261 u64 tx_tls_encrypted_packets; 270 262 u64 tx_tls_encrypted_bytes; ··· 268 276 #endif 269 277 }; 270 278 279 + struct adapter_stats { 280 + u64 db_drop; 281 + u64 db_full; 282 + u64 db_empty; 283 + u64 wc_success; 284 + u64 wc_fail; 285 + }; 286 + 271 287 static void collect_sge_port_stats(const struct adapter *adap, 272 288 const struct port_info *p, 273 289 struct queue_port_stats *s) 274 290 { 275 291 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; 276 292 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; 293 + #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) 294 + const struct ch_ktls_port_stats_debug *ktls_stats; 295 + #endif 277 296 struct sge_eohw_txq *eohw_tx; 278 297 unsigned int i; 279 298 ··· 309 306 s->vlan_ins += eohw_tx->vlan_ins; 310 307 } 311 308 } 309 + #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) 310 + ktls_stats = &adap->ch_ktls_stats.ktls_port[p->port_id]; 311 + s->tx_tls_encrypted_packets = 312 + atomic64_read(&ktls_stats->ktls_tx_encrypted_packets); 313 + s->tx_tls_encrypted_bytes = 314 + atomic64_read(&ktls_stats->ktls_tx_encrypted_bytes); 315 + s->tx_tls_ctx = atomic64_read(&ktls_stats->ktls_tx_ctx); 316 + s->tx_tls_ooo = atomic64_read(&ktls_stats->ktls_tx_ooo); 317 + s->tx_tls_skip_no_sync_data = 318 + atomic64_read(&ktls_stats->ktls_tx_skip_no_sync_data); 319 + s->tx_tls_drop_no_sync_data = 320 + atomic64_read(&ktls_stats->ktls_tx_drop_no_sync_data); 321 + s->tx_tls_drop_bypass_req = 322 + atomic64_read(&ktls_stats->ktls_tx_drop_bypass_req); 323 + #endif 312 324 } 313 325 314 326 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
+4 -4
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
··· 690 690 * ULD is/are already active, return failure. 691 691 */ 692 692 if (cxgb4_uld_in_use(adap)) { 693 - dev_warn(adap->pdev_dev, 694 - "ULD connections (tid/stid) active. Can't enable kTLS\n"); 693 + dev_dbg(adap->pdev_dev, 694 + "ULD connections (tid/stid) active. Can't enable kTLS\n"); 695 695 return -EINVAL; 696 696 } 697 697 ret = t4_set_params(adap, adap->mbox, adap->pf, ··· 699 699 if (ret) 700 700 return ret; 701 701 refcount_set(&adap->chcr_ktls.ktls_refcount, 1); 702 - pr_info("kTLS has been enabled. Restrictions placed on ULD support\n"); 702 + pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n"); 703 703 } else { 704 704 /* ktls settings already up, just increment refcount. */ 705 705 refcount_inc(&adap->chcr_ktls.ktls_refcount); ··· 716 716 0, 1, &params, &params); 717 717 if (ret) 718 718 return ret; 719 - pr_info("kTLS is disabled. Restrictions on ULD support removed\n"); 719 + pr_debug("kTLS is disabled. Restrictions on ULD support removed\n"); 720 720 } 721 721 } 722 722
+13 -8
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
··· 44 44 #include "cxgb4.h" 45 45 46 46 #define MAX_ULD_QSETS 16 47 + #define MAX_ULD_NPORTS 4 47 48 48 49 /* CPL message priority levels */ 49 50 enum { ··· 366 365 }; 367 366 368 367 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) 369 - struct ch_ktls_stats_debug { 368 + struct ch_ktls_port_stats_debug { 370 369 atomic64_t ktls_tx_connection_open; 371 370 atomic64_t ktls_tx_connection_fail; 372 371 atomic64_t ktls_tx_connection_close; 373 - atomic64_t ktls_tx_send_records; 374 - atomic64_t ktls_tx_end_pkts; 375 - atomic64_t ktls_tx_start_pkts; 376 - atomic64_t ktls_tx_middle_pkts; 377 - atomic64_t ktls_tx_retransmit_pkts; 378 - atomic64_t ktls_tx_complete_pkts; 379 - atomic64_t ktls_tx_trimmed_pkts; 380 372 atomic64_t ktls_tx_encrypted_packets; 381 373 atomic64_t ktls_tx_encrypted_bytes; 382 374 atomic64_t ktls_tx_ctx; ··· 377 383 atomic64_t ktls_tx_skip_no_sync_data; 378 384 atomic64_t ktls_tx_drop_no_sync_data; 379 385 atomic64_t ktls_tx_drop_bypass_req; 386 + }; 387 + 388 + struct ch_ktls_stats_debug { 389 + struct ch_ktls_port_stats_debug ktls_port[MAX_ULD_NPORTS]; 390 + atomic64_t ktls_tx_send_records; 391 + atomic64_t ktls_tx_end_pkts; 392 + atomic64_t ktls_tx_start_pkts; 393 + atomic64_t ktls_tx_middle_pkts; 394 + atomic64_t ktls_tx_retransmit_pkts; 395 + atomic64_t ktls_tx_complete_pkts; 396 + atomic64_t ktls_tx_trimmed_pkts; 380 397 }; 381 398 #endif 382 399
+161 -146
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
··· 125 125 return ret; 126 126 } 127 127 128 - static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info, 129 - int new_state) 130 - { 131 - /* This function can be called from both rx (interrupt context) and tx 132 - * queue contexts. 133 - */ 134 - spin_lock_bh(&tx_info->lock); 135 - switch (tx_info->connection_state) { 136 - case KTLS_CONN_CLOSED: 137 - tx_info->connection_state = new_state; 138 - break; 139 - 140 - case KTLS_CONN_ACT_OPEN_REQ: 141 - /* only go forward if state is greater than current state. */ 142 - if (new_state <= tx_info->connection_state) 143 - break; 144 - /* update to the next state and also initialize TCB */ 145 - tx_info->connection_state = new_state; 146 - fallthrough; 147 - case KTLS_CONN_ACT_OPEN_RPL: 148 - /* if we are stuck in this state, means tcb init might not 149 - * received by HW, try sending it again. 150 - */ 151 - if (!chcr_init_tcb_fields(tx_info)) 152 - tx_info->connection_state = KTLS_CONN_SET_TCB_REQ; 153 - break; 154 - 155 - case KTLS_CONN_SET_TCB_REQ: 156 - /* only go forward if state is greater than current state. */ 157 - if (new_state <= tx_info->connection_state) 158 - break; 159 - /* update to the next state and check if l2t_state is valid */ 160 - tx_info->connection_state = new_state; 161 - fallthrough; 162 - case KTLS_CONN_SET_TCB_RPL: 163 - /* Check if l2t state is valid, then move to ready state. */ 164 - if (cxgb4_check_l2t_valid(tx_info->l2te)) { 165 - tx_info->connection_state = KTLS_CONN_TX_READY; 166 - atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_ctx); 167 - } 168 - break; 169 - 170 - case KTLS_CONN_TX_READY: 171 - /* nothing to be done here */ 172 - break; 173 - 174 - default: 175 - pr_err("unknown KTLS connection state\n"); 176 - break; 177 - } 178 - spin_unlock_bh(&tx_info->lock); 179 - 180 - return tx_info->connection_state; 181 - } 182 128 /* 183 129 * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection. 184 130 * @sk - tcp socket. ··· 244 298 return -EINVAL; 245 299 246 300 tx_info->atid = atid; 247 - tx_info->ip_family = sk->sk_family; 248 301 249 - if (sk->sk_family == AF_INET) { 250 - tx_info->ip_family = AF_INET; 302 + if (tx_info->ip_family == AF_INET) { 251 303 ret = chcr_ktls_act_open_req(sk, tx_info, atid); 252 304 #if IS_ENABLED(CONFIG_IPV6) 253 305 } else { 254 - if (!sk->sk_ipv6only && 255 - ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { 256 - tx_info->ip_family = AF_INET; 257 - ret = chcr_ktls_act_open_req(sk, tx_info, atid); 258 - } else { 259 - tx_info->ip_family = AF_INET6; 260 - ret = cxgb4_clip_get(tx_info->netdev, 261 - (const u32 *) 262 - &sk->sk_v6_rcv_saddr.s6_addr, 263 - 1); 264 - if (ret) 265 - goto out; 266 - ret = chcr_ktls_act_open_req6(sk, tx_info, atid); 267 - } 306 + ret = cxgb4_clip_get(tx_info->netdev, (const u32 *) 307 + &sk->sk_v6_rcv_saddr, 308 + 1); 309 + if (ret) 310 + return ret; 311 + ret = chcr_ktls_act_open_req6(sk, tx_info, atid); 268 312 #endif 269 313 } 270 314 ··· 262 326 * success, if any other return type clear atid and return that failure. 263 327 */ 264 328 if (ret) { 265 - if (ret == NET_XMIT_CN) 329 + if (ret == NET_XMIT_CN) { 266 330 ret = 0; 267 - else 331 + } else { 332 + #if IS_ENABLED(CONFIG_IPV6) 333 + /* clear clip entry */ 334 + if (tx_info->ip_family == AF_INET6) 335 + cxgb4_clip_release(tx_info->netdev, 336 + (const u32 *) 337 + &sk->sk_v6_rcv_saddr, 338 + 1); 339 + #endif 268 340 cxgb4_free_atid(t, atid); 269 - goto out; 341 + } 270 342 } 271 343 272 - /* update the connection state */ 273 - chcr_ktls_update_connection_state(tx_info, KTLS_CONN_ACT_OPEN_REQ); 274 - out: 275 344 return ret; 276 345 } 277 346 ··· 337 396 struct chcr_ktls_ofld_ctx_tx *tx_ctx = 338 397 chcr_get_ktls_tx_context(tls_ctx); 339 398 struct chcr_ktls_info *tx_info = tx_ctx->chcr_info; 340 - struct sock *sk; 399 + struct ch_ktls_port_stats_debug *port_stats; 341 400 342 401 if (!tx_info) 343 402 return; 344 - sk = tx_info->sk; 345 - 346 - spin_lock(&tx_info->lock); 347 - tx_info->connection_state = KTLS_CONN_CLOSED; 348 - spin_unlock(&tx_info->lock); 349 403 350 404 /* clear l2t entry */ 351 405 if (tx_info->l2te) ··· 349 413 #if IS_ENABLED(CONFIG_IPV6) 350 414 /* clear clip entry */ 351 415 if (tx_info->ip_family == AF_INET6) 352 - cxgb4_clip_release(netdev, 353 - (const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8, 416 + cxgb4_clip_release(netdev, (const u32 *) 417 + &tx_info->sk->sk_v6_rcv_saddr, 354 418 1); 355 419 #endif 356 420 ··· 362 426 tx_info->tid, tx_info->ip_family); 363 427 } 364 428 365 - atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_connection_close); 429 + port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; 430 + atomic64_inc(&port_stats->ktls_tx_connection_close); 366 431 kvfree(tx_info); 367 432 tx_ctx->chcr_info = NULL; 368 433 /* release module refcount */ ··· 385 448 u32 start_offload_tcp_sn) 386 449 { 387 450 struct tls_context *tls_ctx = tls_get_ctx(sk); 451 + struct ch_ktls_port_stats_debug *port_stats; 388 452 struct chcr_ktls_ofld_ctx_tx *tx_ctx; 389 453 struct chcr_ktls_info *tx_info; 390 454 struct dst_entry *dst; ··· 399 461 400 462 pi = netdev_priv(netdev); 401 463 adap = pi->adapter; 464 + port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id]; 465 + atomic64_inc(&port_stats->ktls_tx_connection_open); 466 + 402 467 if (direction == TLS_OFFLOAD_CTX_DIR_RX) { 403 468 pr_err("not expecting for RX direction\n"); 404 - ret = -EINVAL; 405 469 goto out; 406 470 } 407 - if (tx_ctx->chcr_info) { 408 - ret = -EINVAL; 471 + 472 + if (tx_ctx->chcr_info) 409 473 goto out; 410 - } 411 474 412 475 tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL); 413 - if (!tx_info) { 414 - ret = -ENOMEM; 476 + if (!tx_info) 415 477 goto out; 416 - } 417 - 418 - spin_lock_init(&tx_info->lock); 419 - 420 - /* clear connection state */ 421 - spin_lock(&tx_info->lock); 422 - tx_info->connection_state = KTLS_CONN_CLOSED; 423 - spin_unlock(&tx_info->lock); 424 478 425 479 tx_info->sk = sk; 480 + spin_lock_init(&tx_info->lock); 426 481 /* initialize tid and atid to -1, 0 is a also a valid id. */ 427 482 tx_info->tid = -1; 428 483 tx_info->atid = -1; ··· 426 495 tx_info->tx_chan = pi->tx_chan; 427 496 tx_info->smt_idx = pi->smt_idx; 428 497 tx_info->port_id = pi->port_id; 498 + tx_info->prev_ack = 0; 499 + tx_info->prev_win = 0; 429 500 430 501 tx_info->rx_qid = chcr_get_first_rx_qid(adap); 431 502 if (unlikely(tx_info->rx_qid < 0)) 432 - goto out2; 503 + goto free_tx_info; 433 504 434 505 tx_info->prev_seq = start_offload_tcp_sn; 435 506 tx_info->tcp_start_seq_number = start_offload_tcp_sn; ··· 439 506 /* save crypto keys */ 440 507 ret = chcr_ktls_save_keys(tx_info, crypto_info, direction); 441 508 if (ret < 0) 442 - goto out2; 509 + goto free_tx_info; 443 510 444 511 /* get peer ip */ 445 512 if (sk->sk_family == AF_INET) { 446 513 memcpy(daaddr, &sk->sk_daddr, 4); 514 + tx_info->ip_family = AF_INET; 447 515 #if IS_ENABLED(CONFIG_IPV6) 448 516 } else { 449 517 if (!sk->sk_ipv6only && 450 - ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) 518 + ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { 451 519 memcpy(daaddr, &sk->sk_daddr, 4); 452 - else 520 + tx_info->ip_family = AF_INET; 521 + } else { 453 522 memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16); 523 + tx_info->ip_family = AF_INET6; 524 + } 454 525 #endif 455 526 } 456 527 ··· 462 525 dst = sk_dst_get(sk); 463 526 if (!dst) { 464 527 pr_err("DST entry not found\n"); 465 - goto out2; 528 + goto free_tx_info; 466 529 } 467 530 n = dst_neigh_lookup(dst, daaddr); 468 531 if (!n || !n->dev) { 469 532 pr_err("neighbour not found\n"); 470 533 dst_release(dst); 471 - goto out2; 534 + goto free_tx_info; 472 535 } 473 536 tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0); 474 537 ··· 477 540 478 541 if (!tx_info->l2te) { 479 542 pr_err("l2t entry not found\n"); 480 - goto out2; 543 + goto free_tx_info; 481 544 } 482 545 483 - tx_ctx->chcr_info = tx_info; 546 + /* Driver shouldn't be removed until any single connection exists */ 547 + if (!try_module_get(THIS_MODULE)) 548 + goto free_l2t; 484 549 550 + init_completion(&tx_info->completion); 485 551 /* create a filter and call cxgb4_l2t_send to send the packet out, which 486 552 * will take care of updating l2t entry in hw if not already done. 487 553 */ 488 - ret = chcr_setup_connection(sk, tx_info); 489 - if (ret) 490 - goto out2; 554 + tx_info->open_state = CH_KTLS_OPEN_PENDING; 491 555 492 - /* Driver shouldn't be removed until any single connection exists */ 493 - if (!try_module_get(THIS_MODULE)) { 494 - ret = -EINVAL; 495 - goto out2; 556 + if (chcr_setup_connection(sk, tx_info)) 557 + goto put_module; 558 + 559 + /* Wait for reply */ 560 + wait_for_completion_timeout(&tx_info->completion, 30 * HZ); 561 + spin_lock_bh(&tx_info->lock); 562 + if (tx_info->open_state) { 563 + /* need to wait for hw response, can't free tx_info yet. */ 564 + if (tx_info->open_state == CH_KTLS_OPEN_PENDING) 565 + tx_info->pending_close = true; 566 + /* free the lock after the cleanup */ 567 + goto put_module; 496 568 } 569 + spin_unlock_bh(&tx_info->lock); 497 570 498 - atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_open); 571 + /* initialize tcb */ 572 + reinit_completion(&tx_info->completion); 573 + /* mark it pending for hw response */ 574 + tx_info->open_state = CH_KTLS_OPEN_PENDING; 575 + 576 + if (chcr_init_tcb_fields(tx_info)) 577 + goto free_tid; 578 + 579 + /* Wait for reply */ 580 + wait_for_completion_timeout(&tx_info->completion, 30 * HZ); 581 + spin_lock_bh(&tx_info->lock); 582 + if (tx_info->open_state) { 583 + /* need to wait for hw response, can't free tx_info yet. */ 584 + tx_info->pending_close = true; 585 + /* free the lock after cleanup */ 586 + goto free_tid; 587 + } 588 + spin_unlock_bh(&tx_info->lock); 589 + 590 + if (!cxgb4_check_l2t_valid(tx_info->l2te)) 591 + goto free_tid; 592 + 593 + atomic64_inc(&port_stats->ktls_tx_ctx); 594 + tx_ctx->chcr_info = tx_info; 595 + 499 596 return 0; 500 - out2: 501 - kvfree(tx_info); 597 + 598 + free_tid: 599 + chcr_ktls_mark_tcb_close(tx_info); 600 + #if IS_ENABLED(CONFIG_IPV6) 601 + /* clear clip entry */ 602 + if (tx_info->ip_family == AF_INET6) 603 + cxgb4_clip_release(netdev, (const u32 *) 604 + &sk->sk_v6_rcv_saddr, 605 + 1); 606 + #endif 607 + cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, 608 + tx_info->tid, tx_info->ip_family); 609 + 610 + put_module: 611 + /* release module refcount */ 612 + module_put(THIS_MODULE); 613 + free_l2t: 614 + cxgb4_l2t_release(tx_info->l2te); 615 + free_tx_info: 616 + if (tx_info->pending_close) 617 + spin_unlock_bh(&tx_info->lock); 618 + else 619 + kvfree(tx_info); 502 620 out: 503 - atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_fail); 504 - return ret; 621 + atomic64_inc(&port_stats->ktls_tx_connection_fail); 622 + return -1; 505 623 } 506 624 507 625 /* ··· 619 627 tx_info = lookup_atid(t, atid); 620 628 621 629 if (!tx_info || tx_info->atid != atid) { 622 - pr_err("tx_info or atid is not correct\n"); 630 + pr_err("%s: incorrect tx_info or atid\n", __func__); 623 631 return -1; 632 + } 633 + 634 + cxgb4_free_atid(t, atid); 635 + tx_info->atid = -1; 636 + 637 + spin_lock(&tx_info->lock); 638 + /* HW response is very close, finish pending cleanup */ 639 + if (tx_info->pending_close) { 640 + spin_unlock(&tx_info->lock); 641 + if (!status) { 642 + /* it's a late success, tcb status is establised, 643 + * mark it close. 644 + */ 645 + chcr_ktls_mark_tcb_close(tx_info); 646 + cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, 647 + tid, tx_info->ip_family); 648 + } 649 + kvfree(tx_info); 650 + return 0; 624 651 } 625 652 626 653 if (!status) { 627 654 tx_info->tid = tid; 628 655 cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family); 629 - 630 - cxgb4_free_atid(t, atid); 631 - tx_info->atid = -1; 632 - /* update the connection state */ 633 - chcr_ktls_update_connection_state(tx_info, 634 - KTLS_CONN_ACT_OPEN_RPL); 656 + tx_info->open_state = CH_KTLS_OPEN_SUCCESS; 657 + } else { 658 + tx_info->open_state = CH_KTLS_OPEN_FAILURE; 635 659 } 660 + spin_unlock(&tx_info->lock); 661 + 662 + complete(&tx_info->completion); 636 663 return 0; 637 664 } 638 665 ··· 669 658 670 659 t = &adap->tids; 671 660 tx_info = lookup_tid(t, tid); 661 + 672 662 if (!tx_info || tx_info->tid != tid) { 673 - pr_err("tx_info or atid is not correct\n"); 663 + pr_err("%s: incorrect tx_info or tid\n", __func__); 674 664 return -1; 675 665 } 676 - /* update the connection state */ 677 - chcr_ktls_update_connection_state(tx_info, KTLS_CONN_SET_TCB_RPL); 666 + 667 + spin_lock(&tx_info->lock); 668 + if (tx_info->pending_close) { 669 + spin_unlock(&tx_info->lock); 670 + kvfree(tx_info); 671 + return 0; 672 + } 673 + tx_info->open_state = false; 674 + spin_unlock(&tx_info->lock); 675 + 676 + complete(&tx_info->completion); 678 677 return 0; 679 678 } 680 679 ··· 786 765 u64 tcp_ack, u64 tcp_win) 787 766 { 788 767 bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0)); 768 + struct ch_ktls_port_stats_debug *port_stats; 789 769 u32 len, cpl = 0, ndesc, wr_len; 790 770 struct fw_ulptx_wr *wr; 791 771 int credits; ··· 820 798 /* reset snd una if it's a re-transmit pkt */ 821 799 if (tcp_seq != tx_info->prev_seq) { 822 800 /* reset snd_una */ 801 + port_stats = 802 + &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; 823 803 pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos, 824 804 TCB_SND_UNA_RAW_W, 825 805 TCB_SND_UNA_RAW_V 826 806 (TCB_SND_UNA_RAW_M), 827 807 TCB_SND_UNA_RAW_V(0), 0); 828 - atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_ooo); 808 + atomic64_inc(&port_stats->ktls_tx_ooo); 829 809 cpl++; 830 810 } 831 811 /* update ack */ ··· 1860 1836 /* nic tls TX handler */ 1861 1837 static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) 1862 1838 { 1839 + struct ch_ktls_port_stats_debug *port_stats; 1863 1840 struct chcr_ktls_ofld_ctx_tx *tx_ctx; 1864 1841 struct ch_ktls_stats_debug *stats; 1865 1842 struct tcphdr *th = tcp_hdr(skb); ··· 1870 1845 u32 tls_end_offset, tcp_seq; 1871 1846 struct tls_context *tls_ctx; 1872 1847 struct sk_buff *local_skb; 1873 - int new_connection_state; 1874 1848 struct sge_eth_txq *q; 1875 1849 struct adapter *adap; 1876 1850 unsigned long flags; ··· 1892 1868 if (unlikely(!tx_info)) 1893 1869 goto out; 1894 1870 1895 - /* check the connection state, we don't need to pass new connection 1896 - * state, state machine will check and update the new state if it is 1897 - * stuck due to responses not received from HW. 1898 - * Start the tx handling only if state is KTLS_CONN_TX_READY. 1899 - */ 1900 - new_connection_state = chcr_ktls_update_connection_state(tx_info, 0); 1901 - if (new_connection_state != KTLS_CONN_TX_READY) 1902 - goto out; 1903 - 1904 1871 /* don't touch the original skb, make a new skb to extract each records 1905 1872 * and send them separately. 1906 1873 */ ··· 1902 1887 1903 1888 adap = tx_info->adap; 1904 1889 stats = &adap->ch_ktls_stats; 1890 + port_stats = &stats->ktls_port[tx_info->port_id]; 1905 1891 1906 1892 qidx = skb->queue_mapping; 1907 1893 q = &adap->sge.ethtxq[qidx + tx_info->first_qset]; ··· 1948 1932 */ 1949 1933 if (unlikely(!record)) { 1950 1934 spin_unlock_irqrestore(&tx_ctx->base.lock, flags); 1951 - atomic64_inc(&stats->ktls_tx_drop_no_sync_data); 1935 + atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data); 1952 1936 goto out; 1953 1937 } 1954 1938 1955 1939 if (unlikely(tls_record_is_start_marker(record))) { 1956 1940 spin_unlock_irqrestore(&tx_ctx->base.lock, flags); 1957 - atomic64_inc(&stats->ktls_tx_skip_no_sync_data); 1941 + atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data); 1958 1942 goto out; 1959 1943 } 1960 1944 ··· 2025 2009 } while (data_len > 0); 2026 2010 2027 2011 tx_info->prev_seq = ntohl(th->seq) + skb->data_len; 2028 - 2029 - atomic64_inc(&stats->ktls_tx_encrypted_packets); 2030 - atomic64_add(skb->data_len, &stats->ktls_tx_encrypted_bytes); 2012 + atomic64_inc(&port_stats->ktls_tx_encrypted_packets); 2013 + atomic64_add(skb->data_len, &port_stats->ktls_tx_encrypted_bytes); 2031 2014 2032 2015 /* tcp finish is set, send a separate tcp msg including all the options 2033 2016 * as well.
+8 -9
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
··· 27 27 #define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\ 28 28 sizeof(struct cpl_tx_sec_pdu)) 29 29 30 - enum chcr_ktls_conn_state { 31 - KTLS_CONN_CLOSED, 32 - KTLS_CONN_ACT_OPEN_REQ, 33 - KTLS_CONN_ACT_OPEN_RPL, 34 - KTLS_CONN_SET_TCB_REQ, 35 - KTLS_CONN_SET_TCB_RPL, 36 - KTLS_CONN_TX_READY, 30 + enum ch_ktls_open_state { 31 + CH_KTLS_OPEN_SUCCESS = 0, 32 + CH_KTLS_OPEN_PENDING = 1, 33 + CH_KTLS_OPEN_FAILURE = 2, 37 34 }; 38 35 39 36 struct chcr_ktls_info { 40 37 struct sock *sk; 41 - spinlock_t lock; /* state machine lock */ 38 + spinlock_t lock; /* lock for pending_close */ 42 39 struct ktls_key_ctx key_ctx; 43 40 struct adapter *adap; 44 41 struct l2t_entry *l2te; 45 42 struct net_device *netdev; 43 + struct completion completion; 46 44 u64 iv; 47 45 u64 record_no; 48 46 int tid; ··· 56 58 u32 tcp_start_seq_number; 57 59 u32 scmd0_short_seqno_numivs; 58 60 u32 scmd0_short_ivgen_hdrlen; 59 - enum chcr_ktls_conn_state connection_state; 60 61 u16 prev_win; 61 62 u8 tx_chan; 62 63 u8 smt_idx; 63 64 u8 port_id; 64 65 u8 ip_family; 65 66 u8 first_qset; 67 + enum ch_ktls_open_state open_state; 68 + bool pending_close; 66 69 }; 67 70 68 71 struct chcr_ktls_ofld_ctx_tx {