Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rxrpc: Manage RTT per-call rather than per-peer

Manage the determination of RTT on a per-call (ie. per-RPC op) basis rather
than on a per-peer basis, averaging across all calls going to that peer.
The problem is that the RTT measurements from the initial packets on a call
may be off because the server may do some setting up (such as getting a
lock on a file) before accepting the rest of the data in the RPC and,
further, the RTT may be affected by server-side file operations, for
instance if a large amount of data is being written or read.

Note: When handling the FS.StoreData-type RPCs, for example, the server
uses the userStatus field in the header of ACK packets as supplementary
flow control to aid in managing this. AF_RXRPC does not yet support this,
but it should be added.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

David Howells and committed by
Jakub Kicinski
b40ef2b8 b5099340

+97 -96
+1 -1
include/trace/events/rxrpc.h
··· 1453 1453 __entry->rtt = rtt; 1454 1454 __entry->srtt = srtt; 1455 1455 __entry->rto = rto; 1456 - __entry->min_rtt = minmax_get(&call->peer->min_rtt) 1456 + __entry->min_rtt = minmax_get(&call->min_rtt) 1457 1457 ), 1458 1458 1459 1459 TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u srtt=%u rto=%u min=%u",
+21 -18
net/rxrpc/ar-internal.h
··· 366 366 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ 367 367 unsigned short tx_seg_max; /* Maximum number of transmissable segments */ 368 368 369 - /* calculated RTT cache */ 370 - #define RXRPC_RTT_CACHE_SIZE 32 371 - spinlock_t rtt_input_lock; /* RTT lock for input routine */ 372 - ktime_t rtt_last_req; /* Time of last RTT request */ 373 - unsigned int rtt_count; /* Number of samples we've got */ 374 - unsigned int rtt_taken; /* Number of samples taken (wrapping) */ 375 - struct minmax min_rtt; /* Estimated minimum RTT */ 376 - 377 - u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 378 - u32 mdev_us; /* medium deviation */ 379 - u32 mdev_max_us; /* maximal mdev for the last rtt period */ 380 - u32 rttvar_us; /* smoothed mdev_max */ 381 - u32 rto_us; /* Retransmission timeout in usec */ 382 - u8 backoff; /* Backoff timeout (as shift) */ 369 + /* Calculated RTT cache */ 370 + unsigned int recent_srtt_us; 371 + unsigned int recent_rto_us; 383 372 384 373 u8 cong_ssthresh; /* Congestion slow-start threshold */ 385 374 }; ··· 754 765 rxrpc_serial_t acks_highest_serial; /* Highest serial number ACK'd */ 755 766 unsigned short acks_nr_sacks; /* Number of soft acks recorded */ 756 767 unsigned short acks_nr_snacks; /* Number of soft nacks recorded */ 768 + 769 + /* Calculated RTT cache */ 770 + ktime_t rtt_last_req; /* Time of last RTT request */ 771 + unsigned int rtt_count; /* Number of samples we've got */ 772 + unsigned int rtt_taken; /* Number of samples taken (wrapping) */ 773 + struct minmax min_rtt; /* Estimated minimum RTT */ 774 + u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 775 + u32 mdev_us; /* medium deviation */ 776 + u32 mdev_max_us; /* maximal mdev for the last rtt period */ 777 + u32 rttvar_us; /* smoothed mdev_max */ 778 + u32 rto_us; /* Retransmission timeout in usec */ 779 + u8 backoff; /* Backoff timeout (as shift) */ 757 780 }; 758 781 759 782 /* ··· 1288 1287 /* 1289 1288 * rtt.c 1290 1289 */ 1291 - void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int, 1292 - rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); 1293 - ktime_t rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans); 1294 - void rxrpc_peer_init_rtt(struct rxrpc_peer *); 1290 + void rxrpc_call_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, 1291 + int rtt_slot, 1292 + rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, 1293 + ktime_t send_time, ktime_t resp_time); 1294 + ktime_t rxrpc_get_rto_backoff(struct rxrpc_call *call, bool retrans); 1295 + void rxrpc_call_init_rtt(struct rxrpc_call *call); 1295 1296 1296 1297 /* 1297 1298 * rxkad.c
+9 -9
net/rxrpc/call_event.c
··· 44 44 45 45 trace_rxrpc_propose_ack(call, why, RXRPC_ACK_DELAY, serial); 46 46 47 - if (call->peer->srtt_us) 48 - delay = (call->peer->srtt_us >> 3) * NSEC_PER_USEC; 47 + if (call->srtt_us) 48 + delay = (call->srtt_us >> 3) * NSEC_PER_USEC; 49 49 else 50 50 delay = ms_to_ktime(READ_ONCE(rxrpc_soft_ack_delay)); 51 51 ktime_add_ms(delay, call->tx_backoff); ··· 105 105 }; 106 106 struct rxrpc_txqueue *tq = call->tx_queue; 107 107 ktime_t lowest_xmit_ts = KTIME_MAX; 108 - ktime_t rto = rxrpc_get_rto_backoff(call->peer, false); 108 + ktime_t rto = rxrpc_get_rto_backoff(call, false); 109 109 bool unacked = false; 110 110 111 111 _enter("{%d,%d}", call->tx_bottom, call->tx_top); ··· 195 195 } while ((tq = tq->next)); 196 196 197 197 if (lowest_xmit_ts < KTIME_MAX) { 198 - ktime_t delay = rxrpc_get_rto_backoff(call->peer, req.did_send); 198 + ktime_t delay = rxrpc_get_rto_backoff(call, req.did_send); 199 199 ktime_t resend_at = ktime_add(lowest_xmit_ts, delay); 200 200 201 201 _debug("delay %llu %lld", delay, ktime_sub(resend_at, req.now)); ··· 216 216 */ 217 217 if (!req.did_send) { 218 218 ktime_t next_ping = ktime_add_us(call->acks_latest_ts, 219 - call->peer->srtt_us >> 3); 219 + call->srtt_us >> 3); 220 220 221 221 if (ktime_sub(next_ping, req.now) <= 0) 222 222 rxrpc_send_ACK(call, RXRPC_ACK_PING, 0, ··· 366 366 */ 367 367 static void rxrpc_send_initial_ping(struct rxrpc_call *call) 368 368 { 369 - if (call->peer->rtt_count < 3 || 370 - ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), 369 + if (call->rtt_count < 3 || 370 + ktime_before(ktime_add_ms(call->rtt_last_req, 1000), 371 371 ktime_get_real())) 372 372 rxrpc_send_ACK(call, RXRPC_ACK_PING, 0, 373 373 rxrpc_propose_ack_ping_for_params); ··· 499 499 rxrpc_propose_ack_rx_idle); 500 500 501 501 if (call->ackr_nr_unacked > 2) { 502 - if (call->peer->rtt_count < 3) 502 + if (call->rtt_count < 3) 503 503 rxrpc_send_ACK(call, RXRPC_ACK_PING, 0, 504 504 rxrpc_propose_ack_ping_for_rtt); 505 - else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), 505 + else if (ktime_before(ktime_add_ms(call->rtt_last_req, 1000), 506 506 ktime_get_real())) 507 507 rxrpc_send_ACK(call, RXRPC_ACK_PING, 0, 508 508 rxrpc_propose_ack_ping_for_old_rtt);
+2
net/rxrpc/call_object.c
··· 176 176 call->cong_cwnd = RXRPC_MIN_CWND; 177 177 call->cong_ssthresh = RXRPC_TX_MAX_WINDOW; 178 178 179 + rxrpc_call_init_rtt(call); 180 + 179 181 call->rxnet = rxnet; 180 182 call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK; 181 183 atomic_inc(&rxnet->nr_calls);
+5 -5
net/rxrpc/input.c
··· 71 71 /* We analyse the number of packets that get ACK'd per RTT 72 72 * period and increase the window if we managed to fill it. 73 73 */ 74 - if (call->peer->rtt_count == 0) 74 + if (call->rtt_count == 0) 75 75 goto out; 76 76 if (ktime_before(call->acks_latest_ts, 77 77 ktime_add_us(call->cong_tstamp, 78 - call->peer->srtt_us >> 3))) 78 + call->srtt_us >> 3))) 79 79 goto out_no_clear_ca; 80 80 summary->change = rxrpc_cong_rtt_window_end; 81 81 call->cong_tstamp = call->acks_latest_ts; ··· 179 179 if (__rxrpc_call_state(call) == RXRPC_CALL_CLIENT_AWAIT_REPLY) 180 180 return; 181 181 182 - rtt = ns_to_ktime(call->peer->srtt_us * (1000 / 8)); 182 + rtt = ns_to_ktime(call->srtt_us * (NSEC_PER_USEC / 8)); 183 183 now = ktime_get_real(); 184 184 if (!ktime_before(ktime_add(call->tx_last_sent, rtt), now)) 185 185 return; ··· 200 200 struct rxrpc_txqueue *tq, 201 201 int ix) 202 202 { 203 - rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_data_ack, -1, 203 + rxrpc_call_add_rtt(call, rxrpc_rtt_rx_data_ack, -1, 204 204 summary->acked_serial, summary->ack_serial, 205 205 ktime_add_us(tq->xmit_ts_base, tq->segment_xmit_ts[ix]), 206 206 call->acks_latest_ts); ··· 725 725 clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); 726 726 smp_mb(); /* Read data before setting avail bit */ 727 727 set_bit(i, &call->rtt_avail); 728 - rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial, 728 + rxrpc_call_add_rtt(call, type, i, acked_serial, ack_serial, 729 729 sent_at, resp_time); 730 730 matched = true; 731 731 }
+7 -7
net/rxrpc/output.c
··· 234 234 if (ack_reason == RXRPC_ACK_PING) 235 235 rxrpc_begin_rtt_probe(call, *_ack_serial, now, rxrpc_rtt_tx_ping); 236 236 if (whdr->flags & RXRPC_REQUEST_ACK) 237 - call->peer->rtt_last_req = now; 237 + call->rtt_last_req = now; 238 238 rxrpc_set_keepalive(call, now); 239 239 return nr_kv; 240 240 } ··· 473 473 why = rxrpc_reqack_slow_start; 474 474 else if (call->tx_winsize <= 2) 475 475 why = rxrpc_reqack_small_txwin; 476 - else if (call->peer->rtt_count < 3 && txb->seq & 1) 476 + else if (call->rtt_count < 3) 477 477 why = rxrpc_reqack_more_rtt; 478 - else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), ktime_get_real())) 478 + else if (ktime_before(ktime_add_ms(call->rtt_last_req, 1000), ktime_get_real())) 479 479 why = rxrpc_reqack_old_rtt; 480 480 else if (!last && !after(READ_ONCE(call->send_top), txb->seq)) 481 481 why = rxrpc_reqack_app_stall; ··· 487 487 if (why != rxrpc_reqack_no_srv_last) { 488 488 flags |= RXRPC_REQUEST_ACK; 489 489 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, -1, serial); 490 - call->peer->rtt_last_req = req->now; 490 + call->rtt_last_req = req->now; 491 491 } 492 492 dont_set_request_ack: 493 493 ··· 576 576 } 577 577 578 578 /* Set timeouts */ 579 - if (call->peer->rtt_count > 1) { 580 - ktime_t delay = rxrpc_get_rto_backoff(call->peer, false); 579 + if (call->rtt_count > 1) { 580 + ktime_t delay = rxrpc_get_rto_backoff(call, false); 581 581 582 582 call->ack_lost_at = ktime_add(req->now, delay); 583 583 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_lost_ack); ··· 590 590 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx); 591 591 } 592 592 if (call->resend_at == KTIME_MAX) { 593 - ktime_t delay = rxrpc_get_rto_backoff(call->peer, false); 593 + ktime_t delay = rxrpc_get_rto_backoff(call, false); 594 594 595 595 call->resend_at = ktime_add(req->now, delay); 596 596 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_resend);
+2 -7
net/rxrpc/peer_object.c
··· 235 235 peer->service_conns = RB_ROOT; 236 236 seqlock_init(&peer->service_conn_lock); 237 237 spin_lock_init(&peer->lock); 238 - spin_lock_init(&peer->rtt_input_lock); 239 238 seqcount_init(&peer->mtu_lock); 240 239 peer->debug_id = atomic_inc_return(&rxrpc_debug_id); 241 - 242 - rxrpc_peer_init_rtt(peer); 243 - 240 + peer->recent_srtt_us = UINT_MAX; 244 241 peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW; 245 242 trace_rxrpc_peer(peer->debug_id, 1, why); 246 243 } ··· 280 283 peer->max_data = peer->if_mtu - peer->hdrsize; 281 284 282 285 rxrpc_assess_MTU_size(local, peer); 283 - 284 - peer->rtt_last_req = ktime_get_real(); 285 286 } 286 287 287 288 /* ··· 491 496 */ 492 497 unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *peer) 493 498 { 494 - return peer->rtt_count > 0 ? peer->srtt_us >> 3 : UINT_MAX; 499 + return READ_ONCE(peer->recent_srtt_us); 495 500 } 496 501 EXPORT_SYMBOL(rxrpc_kernel_get_srtt); 497 502
+3 -3
net/rxrpc/proc.c
··· 296 296 297 297 now = ktime_get_seconds(); 298 298 seq_printf(seq, 299 - "UDP %-47.47s %-47.47s %3u %4u %5u %6llus %8u %8u\n", 299 + "UDP %-47.47s %-47.47s %3u %4u %5u %6llus %8d %8d\n", 300 300 lbuff, 301 301 rbuff, 302 302 refcount_read(&peer->ref), 303 303 peer->cong_ssthresh, 304 304 peer->max_data, 305 305 now - peer->last_tx_at, 306 - peer->srtt_us >> 3, 307 - peer->rto_us); 306 + READ_ONCE(peer->recent_srtt_us), 307 + READ_ONCE(peer->recent_rto_us)); 308 308 309 309 return 0; 310 310 }
+46 -45
net/rxrpc/rtt.c
··· 15 15 #define RXRPC_TIMEOUT_INIT ((unsigned int)(1 * USEC_PER_SEC)) /* RFC6298 2.1 initial RTO value */ 16 16 #define rxrpc_jiffies32 ((u32)jiffies) /* As rxrpc_jiffies32 */ 17 17 18 - static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer) 18 + static u32 rxrpc_rto_min_us(struct rxrpc_call *call) 19 19 { 20 20 return 200; 21 21 } 22 22 23 - static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer) 23 + static u32 __rxrpc_set_rto(const struct rxrpc_call *call) 24 24 { 25 - return (peer->srtt_us >> 3) + peer->rttvar_us; 25 + return (call->srtt_us >> 3) + call->rttvar_us; 26 26 } 27 27 28 28 static u32 rxrpc_bound_rto(u32 rto) ··· 40 40 * To save cycles in the RFC 1323 implementation it was better to break 41 41 * it up into three procedures. -- erics 42 42 */ 43 - static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us) 43 + static void rxrpc_rtt_estimator(struct rxrpc_call *call, long sample_rtt_us) 44 44 { 45 45 long m = sample_rtt_us; /* RTT */ 46 - u32 srtt = peer->srtt_us; 46 + u32 srtt = call->srtt_us; 47 47 48 48 /* The following amusing code comes from Jacobson's 49 49 * article in SIGCOMM '88. Note that rtt and mdev ··· 66 66 srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 67 67 if (m < 0) { 68 68 m = -m; /* m is now abs(error) */ 69 - m -= (peer->mdev_us >> 2); /* similar update on mdev */ 69 + m -= (call->mdev_us >> 2); /* similar update on mdev */ 70 70 /* This is similar to one of Eifel findings. 71 71 * Eifel blocks mdev updates when rtt decreases. 72 72 * This solution is a bit different: we use finer gain ··· 78 78 if (m > 0) 79 79 m >>= 3; 80 80 } else { 81 - m -= (peer->mdev_us >> 2); /* similar update on mdev */ 81 + m -= (call->mdev_us >> 2); /* similar update on mdev */ 82 82 } 83 83 84 - peer->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ 85 - if (peer->mdev_us > peer->mdev_max_us) { 86 - peer->mdev_max_us = peer->mdev_us; 87 - if (peer->mdev_max_us > peer->rttvar_us) 88 - peer->rttvar_us = peer->mdev_max_us; 84 + call->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ 85 + if (call->mdev_us > call->mdev_max_us) { 86 + call->mdev_max_us = call->mdev_us; 87 + if (call->mdev_max_us > call->rttvar_us) 88 + call->rttvar_us = call->mdev_max_us; 89 89 } 90 90 } else { 91 91 /* no previous measure. */ 92 92 srtt = m << 3; /* take the measured time to be rtt */ 93 - peer->mdev_us = m << 1; /* make sure rto = 3*rtt */ 94 - peer->rttvar_us = umax(peer->mdev_us, rxrpc_rto_min_us(peer)); 95 - peer->mdev_max_us = peer->rttvar_us; 93 + call->mdev_us = m << 1; /* make sure rto = 3*rtt */ 94 + call->rttvar_us = umax(call->mdev_us, rxrpc_rto_min_us(call)); 95 + call->mdev_max_us = call->rttvar_us; 96 96 } 97 97 98 - peer->srtt_us = umax(srtt, 1); 98 + call->srtt_us = umax(srtt, 1); 99 99 } 100 100 101 101 /* 102 102 * Calculate rto without backoff. This is the second half of Van Jacobson's 103 103 * routine referred to above. 104 104 */ 105 - static void rxrpc_set_rto(struct rxrpc_peer *peer) 105 + static void rxrpc_set_rto(struct rxrpc_call *call) 106 106 { 107 107 u32 rto; 108 108 ··· 113 113 * is invisible. Actually, Linux-2.4 also generates erratic 114 114 * ACKs in some circumstances. 115 115 */ 116 - rto = __rxrpc_set_rto(peer); 116 + rto = __rxrpc_set_rto(call); 117 117 118 118 /* 2. Fixups made earlier cannot be right. 119 119 * If we do not estimate RTO correctly without them, ··· 124 124 /* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo 125 125 * guarantees that rto is higher. 126 126 */ 127 - peer->rto_us = rxrpc_bound_rto(rto); 127 + call->rto_us = rxrpc_bound_rto(rto); 128 128 } 129 129 130 - static void rxrpc_update_rtt_min(struct rxrpc_peer *peer, ktime_t resp_time, long rtt_us) 130 + static void rxrpc_update_rtt_min(struct rxrpc_call *call, ktime_t resp_time, long rtt_us) 131 131 { 132 132 /* Window size 5mins in approx usec (ipv4.sysctl_tcp_min_rtt_wlen) */ 133 133 u32 wlen_us = 5ULL * NSEC_PER_SEC / 1024; 134 134 135 - minmax_running_min(&peer->min_rtt, wlen_us, resp_time / 1024, 135 + minmax_running_min(&call->min_rtt, wlen_us, resp_time / 1024, 136 136 (u32)rtt_us ? : jiffies_to_usecs(1)); 137 137 } 138 138 139 - static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, ktime_t resp_time, long rtt_us) 139 + static void rxrpc_ack_update_rtt(struct rxrpc_call *call, ktime_t resp_time, long rtt_us) 140 140 { 141 141 if (rtt_us < 0) 142 142 return; 143 143 144 144 /* Update RACK min RTT [RFC8985 6.1 Step 1]. */ 145 - rxrpc_update_rtt_min(peer, resp_time, rtt_us); 145 + rxrpc_update_rtt_min(call, resp_time, rtt_us); 146 146 147 - rxrpc_rtt_estimator(peer, rtt_us); 148 - rxrpc_set_rto(peer); 147 + rxrpc_rtt_estimator(call, rtt_us); 148 + rxrpc_set_rto(call); 149 149 150 150 /* Only reset backoff on valid RTT measurement [RFC6298]. */ 151 - peer->backoff = 0; 151 + call->backoff = 0; 152 152 } 153 153 154 154 /* 155 155 * Add RTT information to cache. This is called in softirq mode and has 156 - * exclusive access to the peer RTT data. 156 + * exclusive access to the call RTT data. 157 157 */ 158 - void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, 158 + void rxrpc_call_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, 159 159 int rtt_slot, 160 160 rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, 161 161 ktime_t send_time, ktime_t resp_time) 162 162 { 163 - struct rxrpc_peer *peer = call->peer; 164 163 s64 rtt_us; 165 164 166 165 rtt_us = ktime_to_us(ktime_sub(resp_time, send_time)); 167 166 if (rtt_us < 0) 168 167 return; 169 168 170 - spin_lock(&peer->rtt_input_lock); 171 - rxrpc_ack_update_rtt(peer, resp_time, rtt_us); 172 - if (peer->rtt_count < 3) 173 - peer->rtt_count++; 174 - peer->rtt_taken++; 175 - spin_unlock(&peer->rtt_input_lock); 169 + rxrpc_ack_update_rtt(call, resp_time, rtt_us); 170 + if (call->rtt_count < 3) 171 + call->rtt_count++; 172 + call->rtt_taken++; 173 + 174 + WRITE_ONCE(call->peer->recent_srtt_us, call->srtt_us / 8); 175 + WRITE_ONCE(call->peer->recent_rto_us, call->rto_us); 176 176 177 177 trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial, 178 - rtt_us, peer->srtt_us, peer->rto_us); 178 + rtt_us, call->srtt_us, call->rto_us); 179 179 } 180 180 181 181 /* 182 182 * Get the retransmission timeout to set in nanoseconds, backing it off each 183 183 * time we retransmit. 184 184 */ 185 - ktime_t rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans) 185 + ktime_t rxrpc_get_rto_backoff(struct rxrpc_call *call, bool retrans) 186 186 { 187 187 u64 timo_us; 188 - u32 backoff = READ_ONCE(peer->backoff); 188 + u32 backoff = READ_ONCE(call->backoff); 189 189 190 - timo_us = peer->rto_us; 190 + timo_us = call->rto_us; 191 191 timo_us <<= backoff; 192 192 if (retrans && timo_us * 2 <= RXRPC_RTO_MAX) 193 - WRITE_ONCE(peer->backoff, backoff + 1); 193 + WRITE_ONCE(call->backoff, backoff + 1); 194 194 195 195 if (timo_us < 1) 196 196 timo_us = 1; ··· 198 198 return ns_to_ktime(timo_us * NSEC_PER_USEC); 199 199 } 200 200 201 - void rxrpc_peer_init_rtt(struct rxrpc_peer *peer) 201 + void rxrpc_call_init_rtt(struct rxrpc_call *call) 202 202 { 203 - peer->rto_us = RXRPC_TIMEOUT_INIT; 204 - peer->mdev_us = RXRPC_TIMEOUT_INIT; 205 - peer->backoff = 0; 206 - //minmax_reset(&peer->rtt_min, rxrpc_jiffies32, ~0U); 203 + call->rtt_last_req = KTIME_MIN; 204 + call->rto_us = RXRPC_TIMEOUT_INIT; 205 + call->mdev_us = RXRPC_TIMEOUT_INIT; 206 + call->backoff = 0; 207 + //minmax_reset(&call->rtt_min, rxrpc_jiffies32, ~0U); 207 208 }
+1 -1
net/rxrpc/sendmsg.c
··· 134 134 rxrpc_seq_t tx_start, tx_win; 135 135 signed long rtt, timeout; 136 136 137 - rtt = READ_ONCE(call->peer->srtt_us) >> 3; 137 + rtt = READ_ONCE(call->srtt_us) >> 3; 138 138 rtt = usecs_to_jiffies(rtt) * 2; 139 139 if (rtt < 2) 140 140 rtt = 2;