Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

qed: Add LL2 slowpath handling

For iWARP unaligned MPA flow, a slowpath event of flushing an
MPA connection that entered an unaligned state is required.
The flush ramrod is received on the ll2 queue, and a pre-registered
callback function is called to handle the flush event.

Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Michal Kalderon and committed by
David S. Miller
6f34a284 89d65113

+43 -2
+38 -2
drivers/net/ethernet/qlogic/qed/qed_ll2.c
··· 423 423 } 424 424 425 425 static int 426 + qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn, 427 + struct qed_ll2_info *p_ll2_conn, 428 + union core_rx_cqe_union *p_cqe, 429 + unsigned long *p_lock_flags) 430 + { 431 + struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 432 + struct core_rx_slow_path_cqe *sp_cqe; 433 + 434 + sp_cqe = &p_cqe->rx_cqe_sp; 435 + if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) { 436 + DP_NOTICE(p_hwfn, 437 + "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n", 438 + sp_cqe->ramrod_cmd_id); 439 + return -EINVAL; 440 + } 441 + 442 + if (!p_ll2_conn->cbs.slowpath_cb) { 443 + DP_NOTICE(p_hwfn, 444 + "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n"); 445 + return -EINVAL; 446 + } 447 + 448 + spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); 449 + 450 + p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie, 451 + p_ll2_conn->my_id, 452 + le32_to_cpu(sp_cqe->opaque_data.data[0]), 453 + le32_to_cpu(sp_cqe->opaque_data.data[1])); 454 + 455 + spin_lock_irqsave(&p_rx->lock, *p_lock_flags); 456 + 457 + return 0; 458 + } 459 + 460 + static int 426 461 qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, 427 462 struct qed_ll2_info *p_ll2_conn, 428 463 union core_rx_cqe_union *p_cqe, ··· 530 495 531 496 switch (cqe->rx_cqe_sp.type) { 532 497 case CORE_RX_CQE_TYPE_SLOW_PATH: 533 - DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n"); 534 - rc = -EINVAL; 498 + rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn, 499 + cqe, &flags); 535 500 break; 536 501 case CORE_RX_CQE_TYPE_GSI_OFFLOAD: 537 502 case CORE_RX_CQE_TYPE_REGULAR: ··· 1249 1214 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb; 1250 1215 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb; 1251 1216 p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb; 1217 + p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb; 1252 1218 p_ll2_info->cbs.cookie = cbs->cookie; 1253 1219 1254 1220 return 0;
+5
include/linux/qed/qed_ll2_if.h
··· 151 151 dma_addr_t first_frag_addr, 152 152 bool b_last_fragment, bool b_last_packet); 153 153 154 + typedef 155 + void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle, 156 + u32 opaque_data_0, u32 opaque_data_1); 157 + 154 158 struct qed_ll2_cbs { 155 159 qed_ll2_complete_rx_packet_cb rx_comp_cb; 156 160 qed_ll2_release_rx_packet_cb rx_release_cb; 157 161 qed_ll2_complete_tx_packet_cb tx_comp_cb; 158 162 qed_ll2_release_tx_packet_cb tx_release_cb; 163 + qed_ll2_slowpath_cb slowpath_cb; 159 164 void *cookie; 160 165 }; 161 166