Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'rxrpc-next-20230208' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

David Howells says:

====================
rxrpc development

Here are some miscellaneous changes for rxrpc:

(1) Use consume_skb() rather than kfree_skb_reason().

(2) Fix unnecessary waking when poking and already-poked call.

(3) Add ack.rwind to the rxrpc_tx_ack tracepoint as this indicates how
many incoming DATA packets we're telling the peer that we are
currently willing to accept on this call.

(4) Reduce duplicate ACK transmission. We send ACKs to let the peer know
that we're increasing the receive window (ack.rwind) as we consume
packets locally. Normal ACK transmission is triggered in three places
and that leads to duplicates being sent.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+22 -13
+7 -4
include/trace/events/rxrpc.h
··· 1118 1118 TRACE_EVENT(rxrpc_tx_ack, 1119 1119 TP_PROTO(unsigned int call, rxrpc_serial_t serial, 1120 1120 rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial, 1121 - u8 reason, u8 n_acks), 1121 + u8 reason, u8 n_acks, u16 rwind), 1122 1122 1123 - TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks), 1123 + TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks, rwind), 1124 1124 1125 1125 TP_STRUCT__entry( 1126 1126 __field(unsigned int, call) ··· 1129 1129 __field(rxrpc_serial_t, ack_serial) 1130 1130 __field(u8, reason) 1131 1131 __field(u8, n_acks) 1132 + __field(u16, rwind) 1132 1133 ), 1133 1134 1134 1135 TP_fast_assign( ··· 1139 1138 __entry->ack_serial = ack_serial; 1140 1139 __entry->reason = reason; 1141 1140 __entry->n_acks = n_acks; 1141 + __entry->rwind = rwind; 1142 1142 ), 1143 1143 1144 - TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u", 1144 + TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u rw=%u", 1145 1145 __entry->call, 1146 1146 __entry->serial, 1147 1147 __print_symbolic(__entry->reason, rxrpc_ack_names), 1148 1148 __entry->ack_first, 1149 1149 __entry->ack_serial, 1150 - __entry->n_acks) 1150 + __entry->n_acks, 1151 + __entry->rwind) 1151 1152 ); 1152 1153 1153 1154 TRACE_EVENT(rxrpc_receive,
+4 -2
net/rxrpc/call_object.c
··· 54 54 spin_lock_bh(&local->lock); 55 55 busy = !list_empty(&call->attend_link); 56 56 trace_rxrpc_poke_call(call, busy, what); 57 + if (!busy && !rxrpc_try_get_call(call, rxrpc_call_get_poke)) 58 + busy = true; 57 59 if (!busy) { 58 - rxrpc_get_call(call, rxrpc_call_get_poke); 59 60 list_add_tail(&call->attend_link, &local->call_attend_q); 60 61 } 61 62 spin_unlock_bh(&local->lock); 62 - rxrpc_wake_up_io_thread(local); 63 + if (!busy) 64 + rxrpc_wake_up_io_thread(local); 63 65 } 64 66 } 65 67
+1 -1
net/rxrpc/conn_event.c
··· 163 163 trace_rxrpc_tx_ack(chan->call_debug_id, serial, 164 164 ntohl(pkt.ack.firstPacket), 165 165 ntohl(pkt.ack.serial), 166 - pkt.ack.reason, 0); 166 + pkt.ack.reason, 0, rxrpc_rx_window_size); 167 167 break; 168 168 169 169 default:
+7 -3
net/rxrpc/output.c
··· 80 80 */ 81 81 static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, 82 82 struct rxrpc_call *call, 83 - struct rxrpc_txbuf *txb) 83 + struct rxrpc_txbuf *txb, 84 + u16 *_rwind) 84 85 { 85 86 struct rxrpc_ackinfo ackinfo; 86 87 unsigned int qsize, sack, wrap, to; ··· 125 124 jmax = rxrpc_rx_jumbo_max; 126 125 qsize = (window - 1) - call->rx_consumed; 127 126 rsize = max_t(int, call->rx_winsize - qsize, 0); 127 + *_rwind = rsize; 128 128 ackinfo.rxMTU = htonl(rxrpc_rx_mtu); 129 129 ackinfo.maxMTU = htonl(mtu); 130 130 ackinfo.rwind = htonl(rsize); ··· 192 190 rxrpc_serial_t serial; 193 191 size_t len, n; 194 192 int ret, rtt_slot = -1; 193 + u16 rwind; 195 194 196 195 if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) 197 196 return -ECONNRESET; ··· 208 205 if (txb->ack.reason == RXRPC_ACK_PING) 209 206 txb->wire.flags |= RXRPC_REQUEST_ACK; 210 207 211 - n = rxrpc_fill_out_ack(conn, call, txb); 208 + n = rxrpc_fill_out_ack(conn, call, txb, &rwind); 212 209 if (n == 0) 213 210 return 0; 214 211 ··· 220 217 txb->wire.serial = htonl(serial); 221 218 trace_rxrpc_tx_ack(call->debug_id, serial, 222 219 ntohl(txb->ack.firstPacket), 223 - ntohl(txb->ack.serial), txb->ack.reason, txb->ack.nAcks); 220 + ntohl(txb->ack.serial), txb->ack.reason, txb->ack.nAcks, 221 + rwind); 224 222 225 223 if (txb->ack.reason == RXRPC_ACK_PING) 226 224 rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
+1 -1
net/rxrpc/recvmsg.c
··· 137 137 /* Check to see if there's an ACK that needs sending. */ 138 138 acked = atomic_add_return(call->rx_consumed - old_consumed, 139 139 &call->ackr_nr_consumed); 140 - if (acked > 2 && 140 + if (acked > 8 && 141 141 !test_and_set_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags)) 142 142 rxrpc_poke_call(call, rxrpc_call_poke_idle); 143 143 }
+2 -2
net/rxrpc/skbuff.c
··· 63 63 if (skb) { 64 64 int n = atomic_dec_return(select_skb_count(skb)); 65 65 trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why); 66 - kfree_skb_reason(skb, SKB_CONSUMED); 66 + consume_skb(skb); 67 67 } 68 68 } 69 69 ··· 78 78 int n = atomic_dec_return(select_skb_count(skb)); 79 79 trace_rxrpc_skb(skb, refcount_read(&skb->users), n, 80 80 rxrpc_skb_put_purge); 81 - kfree_skb_reason(skb, SKB_CONSUMED); 81 + consume_skb(skb); 82 82 } 83 83 }