Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/mlx5e: kTLS, Add kTLS RX stats

Add global and per-channel ethtool SW stats for the device
offload.
Document the new counters in tls-offload.rst.

Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>

authored by

Tariq Toukan and committed by
Saeed Mahameed
76c1e1ac 0419d8c9

+107 -4
+18
Documentation/networking/tls-offload.rst
··· 428 428 which were part of a TLS stream. 429 429 * ``rx_tls_decrypted_bytes`` - number of TLS payload bytes in RX packets 430 430 which were successfully decrypted. 431 + * ``rx_tls_ctx`` - number of TLS RX HW offload contexts added to device for 432 + decryption. 433 + * ``rx_tls_del`` - number of TLS RX HW offload contexts deleted from device 434 + (connection has finished). 435 + * ``rx_tls_resync_req_pkt`` - number of received TLS packets with a resync 436 + request. 437 + * ``rx_tls_resync_req_start`` - number of times the TLS async resync request 438 + was started. 439 + * ``rx_tls_resync_req_end`` - number of times the TLS async resync request 440 + properly ended with providing the HW tracked tcp-seq. 441 + * ``rx_tls_resync_req_skip`` - number of times the TLS async resync request 442 + procedure was started by not properly ended. 443 + * ``rx_tls_resync_res_ok`` - number of times the TLS resync response call to 444 + the driver was successfully handled. 445 + * ``rx_tls_resync_res_skip`` - number of times the TLS resync response call to 446 + the driver was terminated unsuccessfully. 447 + * ``rx_tls_err`` - number of RX packets which were part of a TLS stream 448 + but were not decrypted due to unexpected error in the state machine. 431 449 * ``tx_tls_encrypted_packets`` - number of TX packets passed to the device 432 450 for encryption of their TLS payload. 433 451 * ``tx_tls_encrypted_bytes`` - number of TLS payload bytes in TX packets
+25 -4
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
··· 46 46 struct tls12_crypto_info_aes_gcm_128 crypto_info; 47 47 struct accel_rule rule; 48 48 struct sock *sk; 49 + struct mlx5e_rq_stats *stats; 49 50 struct completion add_ctx; 50 51 u32 tirn; 51 52 u32 key_id; ··· 204 203 return err; 205 204 206 205 err_out: 206 + priv_rx->stats->tls_resync_req_skip++; 207 207 err = PTR_ERR(cseg); 208 208 complete(&priv_rx->add_ctx); 209 209 goto unlock; ··· 298 296 return cseg; 299 297 300 298 err_out: 299 + priv_rx->stats->tls_resync_req_skip++; 301 300 return ERR_PTR(err); 302 301 } 303 302 ··· 365 362 366 363 cseg = post_static_params(sq, priv_rx); 367 364 if (IS_ERR(cseg)) { 365 + priv_rx->stats->tls_resync_res_skip++; 368 366 err = PTR_ERR(cseg); 369 367 goto unlock; 370 368 } 371 369 /* Do not increment priv_rx refcnt, CQE handling is empty */ 372 370 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 371 + priv_rx->stats->tls_resync_res_ok++; 373 372 unlock: 374 373 spin_unlock(&c->async_icosq_lock); 375 374 ··· 401 396 tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state); 402 397 auth_state = MLX5_GET(tls_progress_params, ctx, auth_state); 403 398 if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || 404 - auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) 399 + auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) { 400 + priv_rx->stats->tls_resync_req_skip++; 405 401 goto out; 402 + } 406 403 407 404 hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); 408 405 tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); 406 + priv_rx->stats->tls_resync_req_end++; 409 407 out: 410 408 refcount_dec(&resync->refcnt); 411 409 kfree(buf); ··· 487 479 seq = th->seq; 488 480 datalen = skb->len - depth; 489 481 tls_offload_rx_resync_async_request_start(sk, seq, datalen); 482 + rq->stats->tls_resync_req_start++; 490 483 } 491 484 492 485 void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, ··· 518 509 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) 519 510 { 520 511 u8 tls_offload = get_cqe_tls_offload(cqe); 512 + struct mlx5e_rq_stats *stats; 521 513 522 514 if (likely(tls_offload == CQE_TLS_OFFLOAD_NOT_DECRYPTED)) 523 515 return; 524 516 517 + stats = rq->stats; 518 + 525 519 switch (tls_offload) { 526 520 case CQE_TLS_OFFLOAD_DECRYPTED: 527 521 skb->decrypted = 1; 522 + stats->tls_decrypted_packets++; 523 + stats->tls_decrypted_bytes += *cqe_bcnt; 528 524 break; 529 525 case CQE_TLS_OFFLOAD_RESYNC: 526 + stats->tls_resync_req_pkt++; 530 527 resync_update_sn(rq, skb); 531 528 break; 532 529 default: /* CQE_TLS_OFFLOAD_ERROR: */ 530 + stats->tls_err++; 533 531 break; 534 532 } 535 533 } ··· 578 562 579 563 priv_rx->crypto_info = 580 564 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 581 - priv_rx->sk = sk; 582 - priv_rx->rxq = mlx5e_accel_sk_get_rxq(sk); 583 565 566 + rxq = mlx5e_accel_sk_get_rxq(sk); 567 + priv_rx->rxq = rxq; 568 + priv_rx->sk = sk; 569 + 570 + priv_rx->stats = &priv->channel_stats[rxq].rq; 584 571 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); 585 572 586 - rxq = priv_rx->rxq; 587 573 rqtn = priv->direct_tir[rxq].rqt.rqtn; 588 574 589 575 err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn); ··· 603 585 err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn); 604 586 if (err) 605 587 goto err_post_wqes; 588 + 589 + priv_rx->stats->tls_ctx++; 606 590 607 591 return 0; 608 592 ··· 666 646 refcount_dec(&resync->refcnt); 667 647 wait_for_resync(netdev, resync); 668 648 649 + priv_rx->stats->tls_del++; 669 650 if (priv_rx->rule.rule) 670 651 mlx5e_accel_fs_del_sk(priv_rx->rule.rule); 671 652
+39
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
··· 163 163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, 164 164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, 165 165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, 166 + #ifdef CONFIG_MLX5_EN_TLS 167 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) }, 168 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) }, 169 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) }, 170 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) }, 171 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) }, 172 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) }, 173 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) }, 174 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) }, 175 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) }, 176 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) }, 177 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) }, 178 + #endif 166 179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, 167 180 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, 168 181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, ··· 288 275 s->rx_congst_umr += rq_stats->congst_umr; 289 276 s->rx_arfs_err += rq_stats->arfs_err; 290 277 s->rx_recover += rq_stats->recover; 278 + #ifdef CONFIG_MLX5_EN_TLS 279 + s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; 280 + s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; 281 + s->rx_tls_ctx += rq_stats->tls_ctx; 282 + s->rx_tls_del += rq_stats->tls_del; 283 + s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; 284 + s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; 285 + s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; 286 + s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; 287 + s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; 288 + s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; 289 + s->rx_tls_err += rq_stats->tls_err; 290 + #endif 291 291 s->ch_events += ch_stats->events; 292 292 s->ch_poll += ch_stats->poll; 293 293 s->ch_arm += ch_stats->arm; ··· 1501 1475 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, 1502 1476 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, 1503 1477 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, 1478 + #ifdef CONFIG_MLX5_EN_TLS 1479 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) }, 1480 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) }, 1481 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) }, 1482 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) }, 1483 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) }, 1484 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) }, 1485 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) }, 1486 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) }, 1487 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) }, 1488 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) }, 1489 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) }, 1490 + #endif 1504 1491 }; 1505 1492 1506 1493 static const struct counter_desc sq_stats_desc[] = {
+25
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
··· 186 186 u64 tx_tls_skip_no_sync_data; 187 187 u64 tx_tls_drop_no_sync_data; 188 188 u64 tx_tls_drop_bypass_req; 189 + 190 + u64 rx_tls_decrypted_packets; 191 + u64 rx_tls_decrypted_bytes; 192 + u64 rx_tls_ctx; 193 + u64 rx_tls_del; 194 + u64 rx_tls_resync_req_pkt; 195 + u64 rx_tls_resync_req_start; 196 + u64 rx_tls_resync_req_end; 197 + u64 rx_tls_resync_req_skip; 198 + u64 rx_tls_resync_res_ok; 199 + u64 rx_tls_resync_res_skip; 200 + u64 rx_tls_err; 189 201 #endif 190 202 191 203 u64 rx_xsk_packets; ··· 317 305 u64 congst_umr; 318 306 u64 arfs_err; 319 307 u64 recover; 308 + #ifdef CONFIG_MLX5_EN_TLS 309 + u64 tls_decrypted_packets; 310 + u64 tls_decrypted_bytes; 311 + u64 tls_ctx; 312 + u64 tls_del; 313 + u64 tls_resync_req_pkt; 314 + u64 tls_resync_req_start; 315 + u64 tls_resync_req_end; 316 + u64 tls_resync_req_skip; 317 + u64 tls_resync_res_ok; 318 + u64 tls_resync_res_skip; 319 + u64 tls_err; 320 + #endif 320 321 }; 321 322 322 323 struct mlx5e_sq_stats {