Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'tcp_drop_reason'

Menglong Dong says:

====================
net: add skb drop reasons to TCP packet receive

In the commit c504e5c2f964 ("net: skb: introduce kfree_skb_reason()"),
we added the support of reporting the reasons of skb drops to kfree_skb
tracepoint. And in this series patches, reasons for skb drops are added
to TCP layer (both TCPv4 and TCPv6 are considered).
Following functions are processed:

tcp_v4_rcv()
tcp_v6_rcv()
tcp_v4_inbound_md5_hash()
tcp_v6_inbound_md5_hash()
tcp_add_backlog()
tcp_v4_do_rcv()
tcp_v6_do_rcv()
tcp_rcv_established()
tcp_data_queue()
tcp_data_queue_ofo()

The functions we handled are mostly for packet ingress, as skb drops
hardly happens in the egress path of TCP layer. However, it's a little
complex for TCP state processing, as I find that it's hard to report skb
drop reasons to where it is freed. For example, when skb is dropped in
tcp_rcv_state_process(), the reason can be caused by the call of
tcp_v4_conn_request(), and it's hard to return a drop reason from
tcp_v4_conn_request(). So such cases are skipped for this moment.

Following new drop reasons are introduced (what they mean can be see
in the document for them):

/* SKB_DROP_REASON_TCP_MD5* corresponding to LINUX_MIB_TCPMD5* */
SKB_DROP_REASON_TCP_MD5NOTFOUND
SKB_DROP_REASON_TCP_MD5UNEXPECTED
SKB_DROP_REASON_TCP_MD5FAILURE
SKB_DROP_REASON_SOCKET_BACKLOG
SKB_DROP_REASON_TCP_FLAGS
SKB_DROP_REASON_TCP_ZEROWINDOW
SKB_DROP_REASON_TCP_OLD_DATA
SKB_DROP_REASON_TCP_OVERWINDOW
/* corresponding to LINUX_MIB_TCPOFOMERGE */
SKB_DROP_REASON_TCP_OFOMERGE

Here is a example to get TCP packet drop reasons from ftrace:

$ echo 1 > /sys/kernel/debug/tracing/events/skb/kfree_skb/enable
$ cat /sys/kernel/debug/tracing/trace
$ <idle>-0 [036] ..s1. 647.428165: kfree_skb: skbaddr=000000004d037db6 protocol=2048 location=0000000074cd1243 reason: NO_SOCKET
$ <idle>-0 [020] ..s2. 639.676674: kfree_skb: skbaddr=00000000bcbfa42d protocol=2048 location=00000000bfe89d35 reason: PROTO_MEM

From the reason 'PROTO_MEM' we can know that the skb is dropped because
the memory configured in net.ipv4.tcp_mem is up to the limition.

Changes since v2:
- remove the 'inline' of tcp_drop() in the 1th patch, as Jakub
suggested

Changes since v1:
- enrich the document for this series patches in the cover letter,
as Eric suggested
- fix compile warning report by Jakub in the 6th patch
- let NO_SOCKET trump the XFRM failure in the 2th and 3th patches
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+134 -30
+34
include/linux/skbuff.h
··· 346 346 * udp packet drop out of 347 347 * udp_memory_allocated. 348 348 */ 349 + SKB_DROP_REASON_TCP_MD5NOTFOUND, /* no MD5 hash and one 350 + * expected, corresponding 351 + * to LINUX_MIB_TCPMD5NOTFOUND 352 + */ 353 + SKB_DROP_REASON_TCP_MD5UNEXPECTED, /* MD5 hash and we're not 354 + * expecting one, corresponding 355 + * to LINUX_MIB_TCPMD5UNEXPECTED 356 + */ 357 + SKB_DROP_REASON_TCP_MD5FAILURE, /* MD5 hash and its wrong, 358 + * corresponding to 359 + * LINUX_MIB_TCPMD5FAILURE 360 + */ 361 + SKB_DROP_REASON_SOCKET_BACKLOG, /* failed to add skb to socket 362 + * backlog (see 363 + * LINUX_MIB_TCPBACKLOGDROP) 364 + */ 365 + SKB_DROP_REASON_TCP_FLAGS, /* TCP flags invalid */ 366 + SKB_DROP_REASON_TCP_ZEROWINDOW, /* TCP receive window size is zero, 367 + * see LINUX_MIB_TCPZEROWINDOWDROP 368 + */ 369 + SKB_DROP_REASON_TCP_OLD_DATA, /* the TCP data reveived is already 370 + * received before (spurious retrans 371 + * may happened), see 372 + * LINUX_MIB_DELAYEDACKLOST 373 + */ 374 + SKB_DROP_REASON_TCP_OVERWINDOW, /* the TCP data is out of window, 375 + * the seq of the first byte exceed 376 + * the right edges of receive 377 + * window 378 + */ 379 + SKB_DROP_REASON_TCP_OFOMERGE, /* the data of skb is already in 380 + * the ofo queue, corresponding to 381 + * LINUX_MIB_TCPOFOMERGE 382 + */ 349 383 SKB_DROP_REASON_MAX, 350 384 }; 351 385
+2 -1
include/net/tcp.h
··· 1367 1367 __skb_checksum_complete(skb); 1368 1368 } 1369 1369 1370 - bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); 1370 + bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, 1371 + enum skb_drop_reason *reason); 1371 1372 1372 1373 #ifdef CONFIG_INET 1373 1374 void __sk_defer_free_flush(struct sock *sk);
+10
include/trace/events/skb.h
··· 27 27 EM(SKB_DROP_REASON_IP_NOPROTO, IP_NOPROTO) \ 28 28 EM(SKB_DROP_REASON_SOCKET_RCVBUFF, SOCKET_RCVBUFF) \ 29 29 EM(SKB_DROP_REASON_PROTO_MEM, PROTO_MEM) \ 30 + EM(SKB_DROP_REASON_TCP_MD5NOTFOUND, TCP_MD5NOTFOUND) \ 31 + EM(SKB_DROP_REASON_TCP_MD5UNEXPECTED, \ 32 + TCP_MD5UNEXPECTED) \ 33 + EM(SKB_DROP_REASON_TCP_MD5FAILURE, TCP_MD5FAILURE) \ 34 + EM(SKB_DROP_REASON_SOCKET_BACKLOG, SOCKET_BACKLOG) \ 35 + EM(SKB_DROP_REASON_TCP_FLAGS, TCP_FLAGS) \ 36 + EM(SKB_DROP_REASON_TCP_ZEROWINDOW, TCP_ZEROWINDOW) \ 37 + EM(SKB_DROP_REASON_TCP_OLD_DATA, TCP_OLD_DATA) \ 38 + EM(SKB_DROP_REASON_TCP_OVERWINDOW, TCP_OVERWINDOW) \ 39 + EM(SKB_DROP_REASON_TCP_OFOMERGE, TCP_OFOMERGE) \ 30 40 EMe(SKB_DROP_REASON_MAX, MAX) 31 41 32 42 #undef EM
+32 -10
net/ipv4/tcp_input.c
··· 4684 4684 return res; 4685 4685 } 4686 4686 4687 - static void tcp_drop(struct sock *sk, struct sk_buff *skb) 4687 + static void tcp_drop_reason(struct sock *sk, struct sk_buff *skb, 4688 + enum skb_drop_reason reason) 4688 4689 { 4689 4690 sk_drops_add(sk, skb); 4690 - __kfree_skb(skb); 4691 + kfree_skb_reason(skb, reason); 4692 + } 4693 + 4694 + static void tcp_drop(struct sock *sk, struct sk_buff *skb) 4695 + { 4696 + tcp_drop_reason(sk, skb, SKB_DROP_REASON_NOT_SPECIFIED); 4691 4697 } 4692 4698 4693 4699 /* This one checks to see if we can put data from the ··· 4779 4773 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { 4780 4774 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); 4781 4775 sk->sk_data_ready(sk); 4782 - tcp_drop(sk, skb); 4776 + tcp_drop_reason(sk, skb, SKB_DROP_REASON_PROTO_MEM); 4783 4777 return; 4784 4778 } 4785 4779 ··· 4842 4836 /* All the bits are present. Drop. */ 4843 4837 NET_INC_STATS(sock_net(sk), 4844 4838 LINUX_MIB_TCPOFOMERGE); 4845 - tcp_drop(sk, skb); 4839 + tcp_drop_reason(sk, skb, 4840 + SKB_DROP_REASON_TCP_OFOMERGE); 4846 4841 skb = NULL; 4847 4842 tcp_dsack_set(sk, seq, end_seq); 4848 4843 goto add_sack; ··· 4862 4855 TCP_SKB_CB(skb1)->end_seq); 4863 4856 NET_INC_STATS(sock_net(sk), 4864 4857 LINUX_MIB_TCPOFOMERGE); 4865 - tcp_drop(sk, skb1); 4858 + tcp_drop_reason(sk, skb1, 4859 + SKB_DROP_REASON_TCP_OFOMERGE); 4866 4860 goto merge_right; 4867 4861 } 4868 4862 } else if (tcp_ooo_try_coalesce(sk, skb1, ··· 4891 4883 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4892 4884 TCP_SKB_CB(skb1)->end_seq); 4893 4885 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); 4894 - tcp_drop(sk, skb1); 4886 + tcp_drop_reason(sk, skb1, SKB_DROP_REASON_TCP_OFOMERGE); 4895 4887 } 4896 4888 /* If there is no skb after us, we are the last_skb ! */ 4897 4889 if (!skb1) ··· 4990 4982 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4991 4983 { 4992 4984 struct tcp_sock *tp = tcp_sk(sk); 4985 + enum skb_drop_reason reason; 4993 4986 bool fragstolen; 4994 4987 int eaten; 4995 4988 ··· 5009 5000 skb_dst_drop(skb); 5010 5001 __skb_pull(skb, tcp_hdr(skb)->doff * 4); 5011 5002 5003 + reason = SKB_DROP_REASON_NOT_SPECIFIED; 5012 5004 tp->rx_opt.dsack = 0; 5013 5005 5014 5006 /* Queue data for delivery to the user. ··· 5018 5008 */ 5019 5009 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 5020 5010 if (tcp_receive_window(tp) == 0) { 5011 + reason = SKB_DROP_REASON_TCP_ZEROWINDOW; 5021 5012 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); 5022 5013 goto out_of_window; 5023 5014 } ··· 5028 5017 if (skb_queue_len(&sk->sk_receive_queue) == 0) 5029 5018 sk_forced_mem_schedule(sk, skb->truesize); 5030 5019 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) { 5020 + reason = SKB_DROP_REASON_PROTO_MEM; 5031 5021 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); 5032 5022 sk->sk_data_ready(sk); 5033 5023 goto drop; ··· 5065 5053 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 5066 5054 tcp_rcv_spurious_retrans(sk, skb); 5067 5055 /* A retransmit, 2nd most common case. Force an immediate ack. */ 5056 + reason = SKB_DROP_REASON_TCP_OLD_DATA; 5068 5057 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 5069 5058 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 5070 5059 ··· 5073 5060 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); 5074 5061 inet_csk_schedule_ack(sk); 5075 5062 drop: 5076 - tcp_drop(sk, skb); 5063 + tcp_drop_reason(sk, skb, reason); 5077 5064 return; 5078 5065 } 5079 5066 5080 5067 /* Out of window. F.e. zero window probe. */ 5081 - if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 5068 + if (!before(TCP_SKB_CB(skb)->seq, 5069 + tp->rcv_nxt + tcp_receive_window(tp))) { 5070 + reason = SKB_DROP_REASON_TCP_OVERWINDOW; 5082 5071 goto out_of_window; 5072 + } 5083 5073 5084 5074 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 5085 5075 /* Partial packet, seq < rcv_next < end_seq */ ··· 5092 5076 * remembering D-SACK for its head made in previous line. 5093 5077 */ 5094 5078 if (!tcp_receive_window(tp)) { 5079 + reason = SKB_DROP_REASON_TCP_ZEROWINDOW; 5095 5080 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); 5096 5081 goto out_of_window; 5097 5082 } ··· 5798 5781 */ 5799 5782 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) 5800 5783 { 5784 + enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 5801 5785 const struct tcphdr *th = (const struct tcphdr *)skb->data; 5802 5786 struct tcp_sock *tp = tcp_sk(sk); 5803 5787 unsigned int len = skb->len; ··· 5887 5869 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; 5888 5870 return; 5889 5871 } else { /* Header too small */ 5872 + reason = SKB_DROP_REASON_PKT_TOO_SMALL; 5890 5873 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5891 5874 goto discard; 5892 5875 } ··· 5943 5924 if (len < (th->doff << 2) || tcp_checksum_complete(skb)) 5944 5925 goto csum_error; 5945 5926 5946 - if (!th->ack && !th->rst && !th->syn) 5927 + if (!th->ack && !th->rst && !th->syn) { 5928 + reason = SKB_DROP_REASON_TCP_FLAGS; 5947 5929 goto discard; 5930 + } 5948 5931 5949 5932 /* 5950 5933 * Standard slow path. ··· 5972 5951 return; 5973 5952 5974 5953 csum_error: 5954 + reason = SKB_DROP_REASON_TCP_CSUM; 5975 5955 trace_tcp_bad_csum(skb); 5976 5956 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); 5977 5957 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5978 5958 5979 5959 discard: 5980 - tcp_drop(sk, skb); 5960 + tcp_drop_reason(sk, skb, reason); 5981 5961 } 5982 5962 EXPORT_SYMBOL(tcp_rcv_established); 5983 5963
+24 -8
net/ipv4/tcp_ipv4.c
··· 1412 1412 /* Called with rcu_read_lock() */ 1413 1413 static bool tcp_v4_inbound_md5_hash(const struct sock *sk, 1414 1414 const struct sk_buff *skb, 1415 - int dif, int sdif) 1415 + int dif, int sdif, 1416 + enum skb_drop_reason *reason) 1416 1417 { 1417 1418 #ifdef CONFIG_TCP_MD5SIG 1418 1419 /* ··· 1446 1445 return false; 1447 1446 1448 1447 if (hash_expected && !hash_location) { 1448 + *reason = SKB_DROP_REASON_TCP_MD5NOTFOUND; 1449 1449 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 1450 1450 return true; 1451 1451 } 1452 1452 1453 1453 if (!hash_expected && hash_location) { 1454 + *reason = SKB_DROP_REASON_TCP_MD5UNEXPECTED; 1454 1455 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 1455 1456 return true; 1456 1457 } ··· 1465 1462 NULL, skb); 1466 1463 1467 1464 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 1465 + *reason = SKB_DROP_REASON_TCP_MD5FAILURE; 1468 1466 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 1469 1467 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n", 1470 1468 &iph->saddr, ntohs(th->source), ··· 1708 1704 */ 1709 1705 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) 1710 1706 { 1707 + enum skb_drop_reason reason; 1711 1708 struct sock *rsk; 1712 1709 1713 1710 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ ··· 1731 1726 return 0; 1732 1727 } 1733 1728 1729 + reason = SKB_DROP_REASON_NOT_SPECIFIED; 1734 1730 if (tcp_checksum_complete(skb)) 1735 1731 goto csum_err; 1736 1732 ··· 1759 1753 reset: 1760 1754 tcp_v4_send_reset(rsk, skb); 1761 1755 discard: 1762 - kfree_skb(skb); 1756 + kfree_skb_reason(skb, reason); 1763 1757 /* Be careful here. If this function gets more complicated and 1764 1758 * gcc suffers from register pressure on the x86, sk (in %ebx) 1765 1759 * might be destroyed here. This current version compiles correctly, ··· 1768 1762 return 0; 1769 1763 1770 1764 csum_err: 1765 + reason = SKB_DROP_REASON_TCP_CSUM; 1771 1766 trace_tcp_bad_csum(skb); 1772 1767 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); 1773 1768 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); ··· 1814 1807 return 0; 1815 1808 } 1816 1809 1817 - bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) 1810 + bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, 1811 + enum skb_drop_reason *reason) 1818 1812 { 1819 1813 u32 limit, tail_gso_size, tail_gso_segs; 1820 1814 struct skb_shared_info *shinfo; ··· 1841 1833 if (unlikely(tcp_checksum_complete(skb))) { 1842 1834 bh_unlock_sock(sk); 1843 1835 trace_tcp_bad_csum(skb); 1836 + *reason = SKB_DROP_REASON_TCP_CSUM; 1844 1837 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); 1845 1838 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 1846 1839 return true; ··· 1930 1921 1931 1922 if (unlikely(sk_add_backlog(sk, skb, limit))) { 1932 1923 bh_unlock_sock(sk); 1924 + *reason = SKB_DROP_REASON_SOCKET_BACKLOG; 1933 1925 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP); 1934 1926 return true; 1935 1927 } ··· 1981 1971 int tcp_v4_rcv(struct sk_buff *skb) 1982 1972 { 1983 1973 struct net *net = dev_net(skb->dev); 1974 + enum skb_drop_reason drop_reason; 1984 1975 int sdif = inet_sdif(skb); 1985 1976 int dif = inet_iif(skb); 1986 1977 const struct iphdr *iph; 1987 1978 const struct tcphdr *th; 1988 1979 bool refcounted; 1989 1980 struct sock *sk; 1990 - int drop_reason; 1991 1981 int ret; 1992 1982 1993 1983 drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; ··· 2035 2025 struct sock *nsk; 2036 2026 2037 2027 sk = req->rsk_listener; 2038 - if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) { 2028 + if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif, 2029 + &drop_reason))) { 2039 2030 sk_drops_add(sk, skb); 2040 2031 reqsk_put(req); 2041 2032 goto discard_it; ··· 2068 2057 iph = ip_hdr(skb); 2069 2058 tcp_v4_fill_cb(skb, iph, th); 2070 2059 nsk = tcp_check_req(sk, skb, req, false, &req_stolen); 2060 + } else { 2061 + drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 2071 2062 } 2072 2063 if (!nsk) { 2073 2064 reqsk_put(req); ··· 2105 2092 } 2106 2093 } 2107 2094 2108 - if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 2095 + if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { 2096 + drop_reason = SKB_DROP_REASON_XFRM_POLICY; 2109 2097 goto discard_and_relse; 2098 + } 2110 2099 2111 - if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif)) 2100 + if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif, &drop_reason)) 2112 2101 goto discard_and_relse; 2113 2102 2114 2103 nf_reset_ct(skb); ··· 2139 2124 if (!sock_owned_by_user(sk)) { 2140 2125 ret = tcp_v4_do_rcv(sk, skb); 2141 2126 } else { 2142 - if (tcp_add_backlog(sk, skb)) 2127 + if (tcp_add_backlog(sk, skb, &drop_reason)) 2143 2128 goto discard_and_relse; 2144 2129 } 2145 2130 bh_unlock_sock(sk); ··· 2181 2166 2182 2167 do_time_wait: 2183 2168 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 2169 + drop_reason = SKB_DROP_REASON_XFRM_POLICY; 2184 2170 inet_twsk_put(inet_twsk(sk)); 2185 2171 goto discard_it; 2186 2172 }
+32 -11
net/ipv6/tcp_ipv6.c
··· 775 775 776 776 static bool tcp_v6_inbound_md5_hash(const struct sock *sk, 777 777 const struct sk_buff *skb, 778 - int dif, int sdif) 778 + int dif, int sdif, 779 + enum skb_drop_reason *reason) 779 780 { 780 781 #ifdef CONFIG_TCP_MD5SIG 781 782 const __u8 *hash_location = NULL; ··· 799 798 return false; 800 799 801 800 if (hash_expected && !hash_location) { 801 + *reason = SKB_DROP_REASON_TCP_MD5NOTFOUND; 802 802 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 803 803 return true; 804 804 } 805 805 806 806 if (!hash_expected && hash_location) { 807 + *reason = SKB_DROP_REASON_TCP_MD5UNEXPECTED; 807 808 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 808 809 return true; 809 810 } ··· 816 813 NULL, skb); 817 814 818 815 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 816 + *reason = SKB_DROP_REASON_TCP_MD5FAILURE; 819 817 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 820 818 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n", 821 819 genhash ? "failed" : "mismatch", ··· 1476 1472 { 1477 1473 struct ipv6_pinfo *np = tcp_inet6_sk(sk); 1478 1474 struct sk_buff *opt_skb = NULL; 1475 + enum skb_drop_reason reason; 1479 1476 struct tcp_sock *tp; 1480 1477 1481 1478 /* Imagine: socket is IPv6. IPv4 packet arrives, ··· 1511 1506 if (np->rxopt.all) 1512 1507 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); 1513 1508 1509 + reason = SKB_DROP_REASON_NOT_SPECIFIED; 1514 1510 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1515 1511 struct dst_entry *dst; 1516 1512 ··· 1565 1559 discard: 1566 1560 if (opt_skb) 1567 1561 __kfree_skb(opt_skb); 1568 - kfree_skb(skb); 1562 + kfree_skb_reason(skb, reason); 1569 1563 return 0; 1570 1564 csum_err: 1565 + reason = SKB_DROP_REASON_TCP_CSUM; 1571 1566 trace_tcp_bad_csum(skb); 1572 1567 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); 1573 1568 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); ··· 1634 1627 1635 1628 INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) 1636 1629 { 1630 + enum skb_drop_reason drop_reason; 1637 1631 int sdif = inet6_sdif(skb); 1638 1632 int dif = inet6_iif(skb); 1639 1633 const struct tcphdr *th; ··· 1644 1636 int ret; 1645 1637 struct net *net = dev_net(skb->dev); 1646 1638 1639 + drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 1647 1640 if (skb->pkt_type != PACKET_HOST) 1648 1641 goto discard_it; 1649 1642 ··· 1658 1649 1659 1650 th = (const struct tcphdr *)skb->data; 1660 1651 1661 - if (unlikely(th->doff < sizeof(struct tcphdr)/4)) 1652 + if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) { 1653 + drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL; 1662 1654 goto bad_packet; 1655 + } 1663 1656 if (!pskb_may_pull(skb, th->doff*4)) 1664 1657 goto discard_it; 1665 1658 ··· 1688 1677 struct sock *nsk; 1689 1678 1690 1679 sk = req->rsk_listener; 1691 - if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) { 1680 + if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif, 1681 + &drop_reason)) { 1692 1682 sk_drops_add(sk, skb); 1693 1683 reqsk_put(req); 1694 1684 goto discard_it; ··· 1718 1706 hdr = ipv6_hdr(skb); 1719 1707 tcp_v6_fill_cb(skb, hdr, th); 1720 1708 nsk = tcp_check_req(sk, skb, req, false, &req_stolen); 1709 + } else { 1710 + drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 1721 1711 } 1722 1712 if (!nsk) { 1723 1713 reqsk_put(req); ··· 1755 1741 } 1756 1742 } 1757 1743 1758 - if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1744 + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { 1745 + drop_reason = SKB_DROP_REASON_XFRM_POLICY; 1746 + goto discard_and_relse; 1747 + } 1748 + 1749 + if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif, &drop_reason)) 1759 1750 goto discard_and_relse; 1760 1751 1761 - if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) 1752 + if (tcp_filter(sk, skb)) { 1753 + drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 1762 1754 goto discard_and_relse; 1763 - 1764 - if (tcp_filter(sk, skb)) 1765 - goto discard_and_relse; 1755 + } 1766 1756 th = (const struct tcphdr *)skb->data; 1767 1757 hdr = ipv6_hdr(skb); 1768 1758 tcp_v6_fill_cb(skb, hdr, th); ··· 1787 1769 if (!sock_owned_by_user(sk)) { 1788 1770 ret = tcp_v6_do_rcv(sk, skb); 1789 1771 } else { 1790 - if (tcp_add_backlog(sk, skb)) 1772 + if (tcp_add_backlog(sk, skb, &drop_reason)) 1791 1773 goto discard_and_relse; 1792 1774 } 1793 1775 bh_unlock_sock(sk); ··· 1797 1779 return ret ? -1 : 0; 1798 1780 1799 1781 no_tcp_socket: 1782 + drop_reason = SKB_DROP_REASON_NO_SOCKET; 1800 1783 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1801 1784 goto discard_it; 1802 1785 ··· 1805 1786 1806 1787 if (tcp_checksum_complete(skb)) { 1807 1788 csum_error: 1789 + drop_reason = SKB_DROP_REASON_TCP_CSUM; 1808 1790 trace_tcp_bad_csum(skb); 1809 1791 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); 1810 1792 bad_packet: ··· 1815 1795 } 1816 1796 1817 1797 discard_it: 1818 - kfree_skb(skb); 1798 + kfree_skb_reason(skb, drop_reason); 1819 1799 return 0; 1820 1800 1821 1801 discard_and_relse: ··· 1826 1806 1827 1807 do_time_wait: 1828 1808 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1809 + drop_reason = SKB_DROP_REASON_XFRM_POLICY; 1829 1810 inet_twsk_put(inet_twsk(sk)); 1830 1811 goto discard_it; 1831 1812 }