Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/af_iucv: don't track individual TX skbs for TRANS_HIPER sockets

Stop maintaining the skb_send_q list for TRANS_HIPER sockets.

Not only is it extra overhead, but keeping around a list of skb clones
means that we later also have to match the ->sk_txnotify() calls
against these clones and free them accordingly.
The current matching logic (comparing the skbs' shinfo location) is
frustratingly fragile, and breaks if the skb's head is mangled in any
sort of way while passing from dev_queue_xmit() to the device's
HW queue.

Also adjust the interface for ->sk_txnotify(), to make clear that we
don't actually care about any skb internals.

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Julian Wiedmann and committed by
Jakub Kicinski
80bc97aa ef6af7bd

+26 -62
+4 -2
drivers/s390/net/qeth_core_main.c
··· 1409 1409 struct sk_buff *skb; 1410 1410 1411 1411 skb_queue_walk(&buf->skb_list, skb) { 1412 + struct sock *sk = skb->sk; 1413 + 1412 1414 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); 1413 1415 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); 1414 - if (skb->sk && skb->sk->sk_family == PF_IUCV) 1415 - iucv_sk(skb->sk)->sk_txnotify(skb, notification); 1416 + if (sk && sk->sk_family == PF_IUCV) 1417 + iucv_sk(sk)->sk_txnotify(sk, notification); 1416 1418 } 1417 1419 } 1418 1420
+1 -1
include/net/iucv/af_iucv.h
··· 133 133 atomic_t msg_recv; 134 134 atomic_t pendings; 135 135 int transport; 136 - void (*sk_txnotify)(struct sk_buff *skb, 136 + void (*sk_txnotify)(struct sock *sk, 137 137 enum iucv_tx_notify n); 138 138 }; 139 139
+21 -59
net/iucv/af_iucv.c
··· 89 89 static void iucv_sock_kill(struct sock *sk); 90 90 static void iucv_sock_close(struct sock *sk); 91 91 92 - static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify); 92 + static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify); 93 93 94 94 /* Call Back functions */ 95 95 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); ··· 211 211 { 212 212 struct iucv_sock *iucv = iucv_sk(sock); 213 213 struct af_iucv_trans_hdr *phs_hdr; 214 - struct sk_buff *nskb; 215 214 int err, confirm_recv = 0; 216 215 217 216 phs_hdr = skb_push(skb, sizeof(*phs_hdr)); ··· 260 261 } 261 262 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); 262 263 263 - __skb_header_release(skb); 264 - nskb = skb_clone(skb, GFP_ATOMIC); 265 - if (!nskb) { 266 - err = -ENOMEM; 267 - goto err_free; 268 - } 269 - 270 - skb_queue_tail(&iucv->send_skb_q, nskb); 271 264 atomic_inc(&iucv->skbs_in_xmit); 272 265 err = dev_queue_xmit(skb); 273 266 if (net_xmit_eval(err)) { 274 267 atomic_dec(&iucv->skbs_in_xmit); 275 - skb_unlink(nskb, &iucv->send_skb_q); 276 - kfree_skb(nskb); 277 268 } else { 278 269 atomic_sub(confirm_recv, &iucv->msg_recv); 279 270 WARN_ON(atomic_read(&iucv->msg_recv) < 0); ··· 2135 2146 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets 2136 2147 * transport 2137 2148 **/ 2138 - static void afiucv_hs_callback_txnotify(struct sk_buff *skb, 2139 - enum iucv_tx_notify n) 2149 + static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n) 2140 2150 { 2141 - struct iucv_sock *iucv = iucv_sk(skb->sk); 2142 - struct sock *sk = skb->sk; 2143 - struct sk_buff_head *list; 2144 - struct sk_buff *list_skb; 2145 - struct sk_buff *nskb; 2146 - unsigned long flags; 2151 + struct iucv_sock *iucv = iucv_sk(sk); 2147 2152 2148 2153 if (sock_flag(sk, SOCK_ZAPPED)) 2149 2154 return; 2150 2155 2151 - list = &iucv->send_skb_q; 2152 - spin_lock_irqsave(&list->lock, flags); 2153 - skb_queue_walk_safe(list, list_skb, nskb) { 2154 - if (skb_shinfo(list_skb) == skb_shinfo(skb)) { 2155 - switch (n) { 2156 - case TX_NOTIFY_OK: 2157 - atomic_dec(&iucv->skbs_in_xmit); 2158 - __skb_unlink(list_skb, list); 2159 - kfree_skb(list_skb); 2160 - iucv_sock_wake_msglim(sk); 2161 - break; 2162 - case TX_NOTIFY_PENDING: 2163 - atomic_inc(&iucv->pendings); 2164 - break; 2165 - case TX_NOTIFY_DELAYED_OK: 2166 - atomic_dec(&iucv->skbs_in_xmit); 2167 - __skb_unlink(list_skb, list); 2168 - atomic_dec(&iucv->pendings); 2169 - if (atomic_read(&iucv->pendings) <= 0) 2170 - iucv_sock_wake_msglim(sk); 2171 - kfree_skb(list_skb); 2172 - break; 2173 - case TX_NOTIFY_UNREACHABLE: 2174 - case TX_NOTIFY_DELAYED_UNREACHABLE: 2175 - case TX_NOTIFY_TPQFULL: /* not yet used */ 2176 - case TX_NOTIFY_GENERALERROR: 2177 - case TX_NOTIFY_DELAYED_GENERALERROR: 2178 - atomic_dec(&iucv->skbs_in_xmit); 2179 - __skb_unlink(list_skb, list); 2180 - kfree_skb(list_skb); 2181 - if (sk->sk_state == IUCV_CONNECTED) { 2182 - sk->sk_state = IUCV_DISCONN; 2183 - sk->sk_state_change(sk); 2184 - } 2185 - break; 2186 - } 2187 - break; 2156 + switch (n) { 2157 + case TX_NOTIFY_OK: 2158 + atomic_dec(&iucv->skbs_in_xmit); 2159 + iucv_sock_wake_msglim(sk); 2160 + break; 2161 + case TX_NOTIFY_PENDING: 2162 + atomic_inc(&iucv->pendings); 2163 + break; 2164 + case TX_NOTIFY_DELAYED_OK: 2165 + atomic_dec(&iucv->skbs_in_xmit); 2166 + if (atomic_dec_return(&iucv->pendings) <= 0) 2167 + iucv_sock_wake_msglim(sk); 2168 + break; 2169 + default: 2170 + atomic_dec(&iucv->skbs_in_xmit); 2171 + if (sk->sk_state == IUCV_CONNECTED) { 2172 + sk->sk_state = IUCV_DISCONN; 2173 + sk->sk_state_change(sk); 2188 2174 } 2189 2175 } 2190 - spin_unlock_irqrestore(&list->lock, flags); 2191 2176 2192 2177 if (sk->sk_state == IUCV_CLOSING) { 2193 2178 if (atomic_read(&iucv->skbs_in_xmit) == 0) { ··· 2169 2206 sk->sk_state_change(sk); 2170 2207 } 2171 2208 } 2172 - 2173 2209 } 2174 2210 2175 2211 /*