Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/af_iucv: count packets in the xmit path

The TX code keeps track of all skbs that are in-flight but haven't
actually been sent out yet. For native IUCV sockets that's not a huge
deal, but with TRANS_HIPER sockets it would be much better if we
didn't need to maintain a list of skb clones.

Note that we actually only care about the _count_ of skbs in this stage
of the TX pipeline. So as prep work for removing the skb tracking on
TRANS_HIPER sockets, keep track of the skb count in a separate variable
and pair any list {enqueue, unlink} with a count {increment, decrement}.

Then replace all occurences where we currently look at the skb list's
fill level.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Julian Wiedmann and committed by
Jakub Kicinski
ef6af7bd c464444f

+25 -6
+1
include/net/iucv/af_iucv.h
··· 128 128 u8 flags; 129 129 u16 msglimit; 130 130 u16 msglimit_peer; 131 + atomic_t skbs_in_xmit; 131 132 atomic_t msg_sent; 132 133 atomic_t msg_recv; 133 134 atomic_t pendings;
+24 -6
net/iucv/af_iucv.c
··· 182 182 if (sk->sk_state != IUCV_CONNECTED) 183 183 return 1; 184 184 if (iucv->transport == AF_IUCV_TRANS_IUCV) 185 - return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); 185 + return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim); 186 186 else 187 187 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && 188 188 (atomic_read(&iucv->pendings) <= 0)); ··· 269 269 } 270 270 271 271 skb_queue_tail(&iucv->send_skb_q, nskb); 272 + atomic_inc(&iucv->skbs_in_xmit); 272 273 err = dev_queue_xmit(skb); 273 274 if (net_xmit_eval(err)) { 275 + atomic_dec(&iucv->skbs_in_xmit); 274 276 skb_unlink(nskb, &iucv->send_skb_q); 275 277 kfree_skb(nskb); 276 278 } else { ··· 426 424 sk->sk_state = IUCV_CLOSING; 427 425 sk->sk_state_change(sk); 428 426 429 - if (!err && !skb_queue_empty(&iucv->send_skb_q)) { 427 + if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) { 430 428 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 431 429 timeo = sk->sk_lingertime; 432 430 else ··· 493 491 atomic_set(&iucv->pendings, 0); 494 492 iucv->flags = 0; 495 493 iucv->msglimit = 0; 494 + atomic_set(&iucv->skbs_in_xmit, 0); 496 495 atomic_set(&iucv->msg_sent, 0); 497 496 atomic_set(&iucv->msg_recv, 0); 498 497 iucv->path = NULL; ··· 1058 1055 } 1059 1056 } else { /* Classic VM IUCV transport */ 1060 1057 skb_queue_tail(&iucv->send_skb_q, skb); 1058 + atomic_inc(&iucv->skbs_in_xmit); 1061 1059 1062 1060 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && 1063 1061 skb->len <= 7) { ··· 1067 1063 /* on success: there is no message_complete callback */ 1068 1064 /* for an IPRMDATA msg; remove skb from send queue */ 1069 1065 if (err == 0) { 1066 + atomic_dec(&iucv->skbs_in_xmit); 1070 1067 skb_unlink(skb, &iucv->send_skb_q); 1071 1068 kfree_skb(skb); 1072 1069 } ··· 1076 1071 /* IUCV_IPRMDATA path flag is set... sever path */ 1077 1072 if (err == 0x15) { 1078 1073 pr_iucv->path_sever(iucv->path, NULL); 1074 + atomic_dec(&iucv->skbs_in_xmit); 1079 1075 skb_unlink(skb, &iucv->send_skb_q); 1080 1076 err = -EPIPE; 1081 1077 goto fail; ··· 1115 1109 } else { 1116 1110 err = -EPIPE; 1117 1111 } 1112 + 1113 + atomic_dec(&iucv->skbs_in_xmit); 1118 1114 skb_unlink(skb, &iucv->send_skb_q); 1119 1115 goto fail; 1120 1116 } ··· 1756 1748 { 1757 1749 struct sock *sk = path->private; 1758 1750 struct sk_buff *this = NULL; 1759 - struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; 1751 + struct sk_buff_head *list; 1760 1752 struct sk_buff *list_skb; 1753 + struct iucv_sock *iucv; 1761 1754 unsigned long flags; 1755 + 1756 + iucv = iucv_sk(sk); 1757 + list = &iucv->send_skb_q; 1762 1758 1763 1759 bh_lock_sock(sk); 1764 1760 ··· 1773 1761 break; 1774 1762 } 1775 1763 } 1776 - if (this) 1764 + if (this) { 1765 + atomic_dec(&iucv->skbs_in_xmit); 1777 1766 __skb_unlink(this, list); 1767 + } 1768 + 1778 1769 spin_unlock_irqrestore(&list->lock, flags); 1779 1770 1780 1771 if (this) { ··· 1787 1772 } 1788 1773 1789 1774 if (sk->sk_state == IUCV_CLOSING) { 1790 - if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { 1775 + if (atomic_read(&iucv->skbs_in_xmit) == 0) { 1791 1776 sk->sk_state = IUCV_CLOSED; 1792 1777 sk->sk_state_change(sk); 1793 1778 } ··· 2165 2150 if (skb_shinfo(list_skb) == skb_shinfo(skb)) { 2166 2151 switch (n) { 2167 2152 case TX_NOTIFY_OK: 2153 + atomic_dec(&iucv->skbs_in_xmit); 2168 2154 __skb_unlink(list_skb, list); 2169 2155 kfree_skb(list_skb); 2170 2156 iucv_sock_wake_msglim(sk); ··· 2174 2158 atomic_inc(&iucv->pendings); 2175 2159 break; 2176 2160 case TX_NOTIFY_DELAYED_OK: 2161 + atomic_dec(&iucv->skbs_in_xmit); 2177 2162 __skb_unlink(list_skb, list); 2178 2163 atomic_dec(&iucv->pendings); 2179 2164 if (atomic_read(&iucv->pendings) <= 0) ··· 2186 2169 case TX_NOTIFY_TPQFULL: /* not yet used */ 2187 2170 case TX_NOTIFY_GENERALERROR: 2188 2171 case TX_NOTIFY_DELAYED_GENERALERROR: 2172 + atomic_dec(&iucv->skbs_in_xmit); 2189 2173 __skb_unlink(list_skb, list); 2190 2174 kfree_skb(list_skb); 2191 2175 if (sk->sk_state == IUCV_CONNECTED) { ··· 2201 2183 spin_unlock_irqrestore(&list->lock, flags); 2202 2184 2203 2185 if (sk->sk_state == IUCV_CLOSING) { 2204 - if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { 2186 + if (atomic_read(&iucv->skbs_in_xmit) == 0) { 2205 2187 sk->sk_state = IUCV_CLOSED; 2206 2188 sk->sk_state_change(sk); 2207 2189 }