Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sfc: process RX event inner checksum flags

Add support for RX checksum offload of encapsulated packets. This
essentially just means paying attention to the inner checksum flags
in the RX event, and if *either* checksum flag indicates a fail then
don't tell the kernel that checksum offload was successful.
Also, count these checksum errors and export the counts to ethtool -S.

Test the most common "good" case of RX events with a single bitmask
instead of a series of ifs. Move the more specific error checking
in to a separate function for clarity, and don't use unlikely() there
since we know at least one of the bits is bad.

Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Jon Cooper and committed by
David S. Miller
a0ee3541 df6dd79b

+131 -19
+3
drivers/net/ethernet/sfc/bitfield.h
··· 433 433 (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \ 434 434 } while (0) 435 435 436 + #define EFX_AND_QWORD(qword, from, mask) \ 437 + (qword).u64[0] = (from).u64[0] & (mask).u64[0] 438 + 436 439 #define EFX_OR_OWORD(oword, from, mask) \ 437 440 do { \ 438 441 (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
+111 -12
drivers/net/ethernet/sfc/ef10.c
··· 3154 3154 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; 3155 3155 } 3156 3156 3157 + static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, 3158 + unsigned int n_packets, 3159 + unsigned int rx_encap_hdr, 3160 + unsigned int rx_l3_class, 3161 + unsigned int rx_l4_class, 3162 + const efx_qword_t *event) 3163 + { 3164 + struct efx_nic *efx = channel->efx; 3165 + 3166 + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { 3167 + if (!efx->loopback_selftest) 3168 + channel->n_rx_eth_crc_err += n_packets; 3169 + return EFX_RX_PKT_DISCARD; 3170 + } 3171 + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { 3172 + if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && 3173 + rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3174 + rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && 3175 + rx_l3_class != ESE_DZ_L3_CLASS_IP6 && 3176 + rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) 3177 + netdev_WARN(efx->net_dev, 3178 + "invalid class for RX_IPCKSUM_ERR: event=" 3179 + EFX_QWORD_FMT "\n", 3180 + EFX_QWORD_VAL(*event)); 3181 + if (!efx->loopback_selftest) 3182 + *(rx_encap_hdr ? 3183 + &channel->n_rx_outer_ip_hdr_chksum_err : 3184 + &channel->n_rx_ip_hdr_chksum_err) += n_packets; 3185 + return 0; 3186 + } 3187 + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { 3188 + if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && 3189 + ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3190 + rx_l3_class != ESE_DZ_L3_CLASS_IP6) || 3191 + (rx_l4_class != ESE_DZ_L4_CLASS_TCP && 3192 + rx_l4_class != ESE_DZ_L4_CLASS_UDP)))) 3193 + netdev_WARN(efx->net_dev, 3194 + "invalid class for RX_TCPUDP_CKSUM_ERR: event=" 3195 + EFX_QWORD_FMT "\n", 3196 + EFX_QWORD_VAL(*event)); 3197 + if (!efx->loopback_selftest) 3198 + *(rx_encap_hdr ? 3199 + &channel->n_rx_outer_tcp_udp_chksum_err : 3200 + &channel->n_rx_tcp_udp_chksum_err) += n_packets; 3201 + return 0; 3202 + } 3203 + if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) { 3204 + if (unlikely(!rx_encap_hdr)) 3205 + netdev_WARN(efx->net_dev, 3206 + "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event=" 3207 + EFX_QWORD_FMT "\n", 3208 + EFX_QWORD_VAL(*event)); 3209 + else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3210 + rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && 3211 + rx_l3_class != ESE_DZ_L3_CLASS_IP6 && 3212 + rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) 3213 + netdev_WARN(efx->net_dev, 3214 + "invalid class for RX_IP_INNER_CHKSUM_ERR: event=" 3215 + EFX_QWORD_FMT "\n", 3216 + EFX_QWORD_VAL(*event)); 3217 + if (!efx->loopback_selftest) 3218 + channel->n_rx_inner_ip_hdr_chksum_err += n_packets; 3219 + return 0; 3220 + } 3221 + if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) { 3222 + if (unlikely(!rx_encap_hdr)) 3223 + netdev_WARN(efx->net_dev, 3224 + "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" 3225 + EFX_QWORD_FMT "\n", 3226 + EFX_QWORD_VAL(*event)); 3227 + else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3228 + rx_l3_class != ESE_DZ_L3_CLASS_IP6) || 3229 + (rx_l4_class != ESE_DZ_L4_CLASS_TCP && 3230 + rx_l4_class != ESE_DZ_L4_CLASS_UDP))) 3231 + netdev_WARN(efx->net_dev, 3232 + "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" 3233 + EFX_QWORD_FMT "\n", 3234 + EFX_QWORD_VAL(*event)); 3235 + if (!efx->loopback_selftest) 3236 + channel->n_rx_inner_tcp_udp_chksum_err += n_packets; 3237 + return 0; 3238 + } 3239 + 3240 + WARN_ON(1); /* No error bits were recognised */ 3241 + return 0; 3242 + } 3243 + 3157 3244 static int efx_ef10_handle_rx_event(struct efx_channel *channel, 3158 3245 const efx_qword_t *event) 3159 3246 { 3160 - unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class; 3247 + unsigned int rx_bytes, next_ptr_lbits, rx_queue_label; 3248 + unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr; 3161 3249 unsigned int n_descs, n_packets, i; 3162 3250 struct efx_nic *efx = channel->efx; 3251 + struct efx_ef10_nic_data *nic_data = efx->nic_data; 3163 3252 struct efx_rx_queue *rx_queue; 3253 + efx_qword_t errors; 3164 3254 bool rx_cont; 3165 3255 u16 flags = 0; 3166 3256 ··· 3261 3171 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); 3262 3172 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); 3263 3173 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); 3174 + rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); 3264 3175 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); 3265 3176 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); 3177 + rx_encap_hdr = 3178 + nic_data->datapath_caps & 3179 + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ? 3180 + EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) : 3181 + ESE_EZ_ENCAP_HDR_NONE; 3266 3182 3267 3183 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) 3268 3184 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" ··· 3328 3232 n_packets = 1; 3329 3233 } 3330 3234 3331 - if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR))) 3332 - flags |= EFX_RX_PKT_DISCARD; 3333 - 3334 - if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) { 3335 - channel->n_rx_ip_hdr_chksum_err += n_packets; 3336 - } else if (unlikely(EFX_QWORD_FIELD(*event, 3337 - ESF_DZ_RX_TCPUDP_CKSUM_ERR))) { 3338 - channel->n_rx_tcp_udp_chksum_err += n_packets; 3339 - } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || 3340 - rx_l4_class == ESE_DZ_L4_CLASS_UDP) { 3341 - flags |= EFX_RX_PKT_CSUMMED; 3235 + EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1, 3236 + ESF_DZ_RX_IPCKSUM_ERR, 1, 3237 + ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1, 3238 + ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1, 3239 + ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1); 3240 + EFX_AND_QWORD(errors, *event, errors); 3241 + if (unlikely(!EFX_QWORD_IS_ZERO(errors))) { 3242 + flags |= efx_ef10_handle_rx_event_errors(channel, n_packets, 3243 + rx_l3_class, rx_l4_class, 3244 + rx_encap_hdr, event); 3245 + } else { 3246 + if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || 3247 + rx_l4_class == ESE_DZ_L4_CLASS_UDP) 3248 + flags |= EFX_RX_PKT_CSUMMED; 3342 3249 } 3343 3250 3344 3251 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
+5
drivers/net/ethernet/sfc/ethtool.c
··· 77 77 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), 78 78 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), 79 79 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), 80 + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err), 81 + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err), 82 + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err), 83 + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err), 84 + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err), 80 85 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), 81 86 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), 82 87 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
+12 -7
drivers/net/ethernet/sfc/net_driver.h
··· 469 469 u32 *rps_flow_id; 470 470 #endif 471 471 472 - unsigned n_rx_tobe_disc; 473 - unsigned n_rx_ip_hdr_chksum_err; 474 - unsigned n_rx_tcp_udp_chksum_err; 475 - unsigned n_rx_mcast_mismatch; 476 - unsigned n_rx_frm_trunc; 477 - unsigned n_rx_overlength; 478 - unsigned n_skbuff_leaks; 472 + unsigned int n_rx_tobe_disc; 473 + unsigned int n_rx_ip_hdr_chksum_err; 474 + unsigned int n_rx_tcp_udp_chksum_err; 475 + unsigned int n_rx_outer_ip_hdr_chksum_err; 476 + unsigned int n_rx_outer_tcp_udp_chksum_err; 477 + unsigned int n_rx_inner_ip_hdr_chksum_err; 478 + unsigned int n_rx_inner_tcp_udp_chksum_err; 479 + unsigned int n_rx_eth_crc_err; 480 + unsigned int n_rx_mcast_mismatch; 481 + unsigned int n_rx_frm_trunc; 482 + unsigned int n_rx_overlength; 483 + unsigned int n_skbuff_leaks; 479 484 unsigned int n_rx_nodesc_trunc; 480 485 unsigned int n_rx_merge_events; 481 486 unsigned int n_rx_merge_packets;