Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

macsec: Fix traffic counters/statistics

OutOctetsProtected, OutOctetsEncrypted, InOctetsValidated, and
InOctetsDecrypted were incrementing by the total number of octets in frames
instead of by the number of octets of User Data in frames.

The Controlled Port statistics ifOutOctets and ifInOctets were incrementing
by the total number of octets instead of the number of octets of the MSDUs
plus octets of the destination and source MAC addresses.

The Controlled Port statistics ifInDiscards and ifInErrors were not
incrementing each time the counters they aggregate were.

The Controlled Port statistic ifInErrors was not included in the output of
macsec_get_stats64 so the value was not present in ip commands output.

The ReceiveSA counters InPktsNotValid, InPktsNotUsingSA, and InPktsUnusedSA
were not incrementing.

Signed-off-by: Clayton Yager <Clayton_Yager@selinc.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Clayton Yager and committed by
David S. Miller
91ec9bd5 a3e7b29e

+49 -9
+49 -9
drivers/net/macsec.c
··· 162 162 return sa; 163 163 } 164 164 165 + static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc) 166 + { 167 + struct macsec_rx_sa *sa = NULL; 168 + int an; 169 + 170 + for (an = 0; an < MACSEC_NUM_AN; an++) { 171 + sa = macsec_rxsa_get(rx_sc->sa[an]); 172 + if (sa) 173 + break; 174 + } 175 + return sa; 176 + } 177 + 165 178 static void free_rx_sc_rcu(struct rcu_head *head) 166 179 { 167 180 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); ··· 513 500 skb->protocol = eth_hdr(skb)->h_proto; 514 501 } 515 502 503 + static unsigned int macsec_msdu_len(struct sk_buff *skb) 504 + { 505 + struct macsec_dev *macsec = macsec_priv(skb->dev); 506 + struct macsec_secy *secy = &macsec->secy; 507 + bool sci_present = macsec_skb_cb(skb)->has_sci; 508 + 509 + return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; 510 + } 511 + 516 512 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 517 513 struct macsec_tx_sa *tx_sa) 518 514 { 515 + unsigned int msdu_len = macsec_msdu_len(skb); 519 516 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 520 517 521 518 u64_stats_update_begin(&txsc_stats->syncp); 522 519 if (tx_sc->encrypt) { 523 - txsc_stats->stats.OutOctetsEncrypted += skb->len; 520 + txsc_stats->stats.OutOctetsEncrypted += msdu_len; 524 521 txsc_stats->stats.OutPktsEncrypted++; 525 522 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 526 523 } else { 527 - txsc_stats->stats.OutOctetsProtected += skb->len; 524 + txsc_stats->stats.OutOctetsProtected += msdu_len; 528 525 txsc_stats->stats.OutPktsProtected++; 529 526 this_cpu_inc(tx_sa->stats->OutPktsProtected); 530 527 } ··· 564 541 aead_request_free(macsec_skb_cb(skb)->req); 565 542 566 543 rcu_read_lock_bh(); 567 - macsec_encrypt_finish(skb, dev); 568 544 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 569 - len = skb->len; 545 + /* packet is encrypted/protected so tx_bytes must be calculated */ 546 + len = macsec_msdu_len(skb) + 2 * ETH_ALEN; 547 + macsec_encrypt_finish(skb, dev); 570 548 ret = dev_queue_xmit(skb); 571 549 count_tx(dev, ret, len); 572 550 rcu_read_unlock_bh(); ··· 726 702 727 703 macsec_skb_cb(skb)->req = req; 728 704 macsec_skb_cb(skb)->tx_sa = tx_sa; 705 + macsec_skb_cb(skb)->has_sci = sci_present; 729 706 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 730 707 731 708 dev_hold(skb->dev); ··· 768 743 u64_stats_update_begin(&rxsc_stats->syncp); 769 744 rxsc_stats->stats.InPktsLate++; 770 745 u64_stats_update_end(&rxsc_stats->syncp); 746 + secy->netdev->stats.rx_dropped++; 771 747 return false; 772 748 } 773 749 774 750 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 751 + unsigned int msdu_len = macsec_msdu_len(skb); 775 752 u64_stats_update_begin(&rxsc_stats->syncp); 776 753 if (hdr->tci_an & MACSEC_TCI_E) 777 - rxsc_stats->stats.InOctetsDecrypted += skb->len; 754 + rxsc_stats->stats.InOctetsDecrypted += msdu_len; 778 755 else 779 - rxsc_stats->stats.InOctetsValidated += skb->len; 756 + rxsc_stats->stats.InOctetsValidated += msdu_len; 780 757 u64_stats_update_end(&rxsc_stats->syncp); 781 758 } 782 759 ··· 791 764 u64_stats_update_begin(&rxsc_stats->syncp); 792 765 rxsc_stats->stats.InPktsNotValid++; 793 766 u64_stats_update_end(&rxsc_stats->syncp); 767 + this_cpu_inc(rx_sa->stats->InPktsNotValid); 768 + secy->netdev->stats.rx_errors++; 794 769 return false; 795 770 } 796 771 ··· 885 856 886 857 macsec_finalize_skb(skb, macsec->secy.icv_len, 887 858 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 859 + len = skb->len; 888 860 macsec_reset_skb(skb, macsec->secy.netdev); 889 861 890 - len = skb->len; 891 862 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 892 863 count_rx(dev, len); 893 864 ··· 1078 1049 u64_stats_update_begin(&secy_stats->syncp); 1079 1050 secy_stats->stats.InPktsNoTag++; 1080 1051 u64_stats_update_end(&secy_stats->syncp); 1052 + macsec->secy.netdev->stats.rx_dropped++; 1081 1053 continue; 1082 1054 } 1083 1055 ··· 1188 1158 u64_stats_update_begin(&secy_stats->syncp); 1189 1159 secy_stats->stats.InPktsBadTag++; 1190 1160 u64_stats_update_end(&secy_stats->syncp); 1161 + secy->netdev->stats.rx_errors++; 1191 1162 goto drop_nosa; 1192 1163 } 1193 1164 ··· 1199 1168 /* If validateFrames is Strict or the C bit in the 1200 1169 * SecTAG is set, discard 1201 1170 */ 1171 + struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc); 1202 1172 if (hdr->tci_an & MACSEC_TCI_C || 1203 1173 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1204 1174 u64_stats_update_begin(&rxsc_stats->syncp); 1205 1175 rxsc_stats->stats.InPktsNotUsingSA++; 1206 1176 u64_stats_update_end(&rxsc_stats->syncp); 1177 + secy->netdev->stats.rx_errors++; 1178 + if (active_rx_sa) 1179 + this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); 1207 1180 goto drop_nosa; 1208 1181 } 1209 1182 ··· 1217 1182 u64_stats_update_begin(&rxsc_stats->syncp); 1218 1183 rxsc_stats->stats.InPktsUnusedSA++; 1219 1184 u64_stats_update_end(&rxsc_stats->syncp); 1185 + if (active_rx_sa) 1186 + this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA); 1220 1187 goto deliver; 1221 1188 } 1222 1189 ··· 1239 1202 u64_stats_update_begin(&rxsc_stats->syncp); 1240 1203 rxsc_stats->stats.InPktsLate++; 1241 1204 u64_stats_update_end(&rxsc_stats->syncp); 1205 + macsec->secy.netdev->stats.rx_dropped++; 1242 1206 goto drop; 1243 1207 } 1244 1208 } ··· 1268 1230 deliver: 1269 1231 macsec_finalize_skb(skb, secy->icv_len, 1270 1232 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1233 + len = skb->len; 1271 1234 macsec_reset_skb(skb, secy->netdev); 1272 1235 1273 1236 if (rx_sa) ··· 1276 1237 macsec_rxsc_put(rx_sc); 1277 1238 1278 1239 skb_orphan(skb); 1279 - len = skb->len; 1280 1240 ret = gro_cells_receive(&macsec->gro_cells, skb); 1281 1241 if (ret == NET_RX_SUCCESS) 1282 1242 count_rx(dev, len); ··· 1317 1279 u64_stats_update_begin(&secy_stats->syncp); 1318 1280 secy_stats->stats.InPktsNoSCI++; 1319 1281 u64_stats_update_end(&secy_stats->syncp); 1282 + macsec->secy.netdev->stats.rx_errors++; 1320 1283 continue; 1321 1284 } 1322 1285 ··· 3443 3404 return NETDEV_TX_OK; 3444 3405 } 3445 3406 3407 + len = skb->len; 3446 3408 skb = macsec_encrypt(skb, dev); 3447 3409 if (IS_ERR(skb)) { 3448 3410 if (PTR_ERR(skb) != -EINPROGRESS) ··· 3454 3414 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 3455 3415 3456 3416 macsec_encrypt_finish(skb, dev); 3457 - len = skb->len; 3458 3417 ret = dev_queue_xmit(skb); 3459 3418 count_tx(dev, ret, len); 3460 3419 return ret; ··· 3701 3662 3702 3663 s->rx_dropped = dev->stats.rx_dropped; 3703 3664 s->tx_dropped = dev->stats.tx_dropped; 3665 + s->rx_errors = dev->stats.rx_errors; 3704 3666 } 3705 3667 3706 3668 static int macsec_get_iflink(const struct net_device *dev)