Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rxrpc: Fix call timeouts

Fix the rxrpc call expiration timeouts and make them settable from
userspace. By analogy with other rx implementations, there should be three
timeouts:

(1) "Normal timeout"

This is set for all calls and is triggered if we haven't received any
packets from the peer in a while. It is measured from the last time
we received any packet on that call. This is not reset by any
connection packets (such as CHALLENGE/RESPONSE packets).

If a service operation takes a long time, the server should generate
PING ACKs at a duration that's substantially less than the normal
timeout so is to keep both sides alive. This is set at 1/6 of normal
timeout.

(2) "Idle timeout"

This is set only for a service call and is triggered if we stop
receiving the DATA packets that comprise the request data. It is
measured from the last time we received a DATA packet.

(3) "Hard timeout"

This can be set for a call and specified the maximum lifetime of that
call. It should not be specified by default. Some operations (such
as volume transfer) take a long time.

Allow userspace to set/change the timeouts on a call with sendmsg, using a
control message:

RXRPC_SET_CALL_TIMEOUTS

The data to the message is a number of 32-bit words, not all of which need
be given:

u32 hard_timeout; /* sec from first packet */
u32 idle_timeout; /* msec from packet Rx */
u32 normal_timeout; /* msec from data Rx */

This can be set in combination with any other sendmsg() that affects a
call.

Signed-off-by: David Howells <dhowells@redhat.com>

+289 -200
+47 -22
include/trace/events/rxrpc.h
··· 138 138 139 139 enum rxrpc_timer_trace { 140 140 rxrpc_timer_begin, 141 + rxrpc_timer_exp_ack, 142 + rxrpc_timer_exp_hard, 143 + rxrpc_timer_exp_idle, 144 + rxrpc_timer_exp_normal, 145 + rxrpc_timer_exp_ping, 146 + rxrpc_timer_exp_resend, 141 147 rxrpc_timer_expired, 142 148 rxrpc_timer_init_for_reply, 143 149 rxrpc_timer_init_for_send_reply, 150 + rxrpc_timer_restart, 144 151 rxrpc_timer_set_for_ack, 152 + rxrpc_timer_set_for_hard, 153 + rxrpc_timer_set_for_idle, 154 + rxrpc_timer_set_for_normal, 145 155 rxrpc_timer_set_for_ping, 146 156 rxrpc_timer_set_for_resend, 147 157 rxrpc_timer_set_for_send, ··· 306 296 #define rxrpc_timer_traces \ 307 297 EM(rxrpc_timer_begin, "Begin ") \ 308 298 EM(rxrpc_timer_expired, "*EXPR*") \ 299 + EM(rxrpc_timer_exp_ack, "ExpAck") \ 300 + EM(rxrpc_timer_exp_hard, "ExpHrd") \ 301 + EM(rxrpc_timer_exp_idle, "ExpIdl") \ 302 + EM(rxrpc_timer_exp_normal, "ExpNml") \ 303 + EM(rxrpc_timer_exp_ping, "ExpPng") \ 304 + EM(rxrpc_timer_exp_resend, "ExpRsn") \ 309 305 EM(rxrpc_timer_init_for_reply, "IniRpl") \ 310 306 EM(rxrpc_timer_init_for_send_reply, "SndRpl") \ 307 + EM(rxrpc_timer_restart, "Restrt") \ 311 308 EM(rxrpc_timer_set_for_ack, "SetAck") \ 309 + EM(rxrpc_timer_set_for_hard, "SetHrd") \ 310 + EM(rxrpc_timer_set_for_idle, "SetIdl") \ 311 + EM(rxrpc_timer_set_for_normal, "SetNml") \ 312 312 EM(rxrpc_timer_set_for_ping, "SetPng") \ 313 313 EM(rxrpc_timer_set_for_resend, "SetRTx") \ 314 - E_(rxrpc_timer_set_for_send, "SetTx ") 314 + E_(rxrpc_timer_set_for_send, "SetSnd") 315 315 316 316 #define rxrpc_propose_ack_traces \ 317 317 EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ ··· 952 932 953 933 TRACE_EVENT(rxrpc_timer, 954 934 TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why, 955 - ktime_t now, unsigned long now_j), 935 + unsigned long now), 956 936 957 - TP_ARGS(call, why, now, now_j), 937 + TP_ARGS(call, why, now), 958 938 959 939 TP_STRUCT__entry( 960 940 __field(struct rxrpc_call *, call ) 961 941 __field(enum rxrpc_timer_trace, why ) 962 - __field_struct(ktime_t, now ) 963 - __field_struct(ktime_t, expire_at ) 964 - __field_struct(ktime_t, ack_at ) 965 - __field_struct(ktime_t, resend_at ) 966 - __field(unsigned long, now_j ) 967 - __field(unsigned long, timer ) 942 + __field(long, now ) 943 + __field(long, ack_at ) 944 + __field(long, resend_at ) 945 + __field(long, ping_at ) 946 + __field(long, expect_rx_by ) 947 + __field(long, expect_req_by ) 948 + __field(long, expect_term_by ) 949 + __field(long, timer ) 968 950 ), 969 951 970 952 TP_fast_assign( 971 - __entry->call = call; 972 - __entry->why = why; 973 - __entry->now = now; 974 - __entry->expire_at = call->expire_at; 975 - __entry->ack_at = call->ack_at; 976 - __entry->resend_at = call->resend_at; 977 - __entry->now_j = now_j; 978 - __entry->timer = call->timer.expires; 953 + __entry->call = call; 954 + __entry->why = why; 955 + __entry->now = now; 956 + __entry->ack_at = call->ack_at; 957 + __entry->resend_at = call->resend_at; 958 + __entry->expect_rx_by = call->expect_rx_by; 959 + __entry->expect_req_by = call->expect_req_by; 960 + __entry->expect_term_by = call->expect_term_by; 961 + __entry->timer = call->timer.expires; 979 962 ), 980 963 981 - TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld", 964 + TP_printk("c=%p %s a=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld", 982 965 __entry->call, 983 966 __print_symbolic(__entry->why, rxrpc_timer_traces), 984 - ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)), 985 - ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)), 986 - ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)), 987 - __entry->timer - __entry->now_j) 967 + __entry->ack_at - __entry->now, 968 + __entry->resend_at - __entry->now, 969 + __entry->expect_rx_by - __entry->now, 970 + __entry->expect_req_by - __entry->now, 971 + __entry->expect_term_by - __entry->now, 972 + __entry->timer - __entry->now) 988 973 ); 989 974 990 975 TRACE_EVENT(rxrpc_rx_lose,
+1
include/uapi/linux/rxrpc.h
··· 59 59 RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */ 60 60 RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */ 61 61 RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */ 62 + RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */ 62 63 RXRPC__SUPPORTED 63 64 }; 64 65
+24 -13
net/rxrpc/ar-internal.h
··· 468 468 enum rxrpc_call_event { 469 469 RXRPC_CALL_EV_ACK, /* need to generate ACK */ 470 470 RXRPC_CALL_EV_ABORT, /* need to generate abort */ 471 - RXRPC_CALL_EV_TIMER, /* Timer expired */ 472 471 RXRPC_CALL_EV_RESEND, /* Tx resend required */ 473 472 RXRPC_CALL_EV_PING, /* Ping send required */ 473 + RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */ 474 474 }; 475 475 476 476 /* ··· 514 514 struct rxrpc_peer *peer; /* Peer record for remote address */ 515 515 struct rxrpc_sock __rcu *socket; /* socket responsible */ 516 516 struct mutex user_mutex; /* User access mutex */ 517 - ktime_t ack_at; /* When deferred ACK needs to happen */ 518 - ktime_t resend_at; /* When next resend needs to happen */ 519 - ktime_t ping_at; /* When next to send a ping */ 520 - ktime_t expire_at; /* When the call times out */ 517 + unsigned long ack_at; /* When deferred ACK needs to happen */ 518 + unsigned long resend_at; /* When next resend needs to happen */ 519 + unsigned long ping_at; /* When next to send a ping */ 520 + unsigned long expect_rx_by; /* When we expect to get a packet by */ 521 + unsigned long expect_req_by; /* When we expect to get a request DATA packet by */ 522 + unsigned long expect_term_by; /* When we expect call termination by */ 523 + u32 next_rx_timo; /* Timeout for next Rx packet (jif) */ 524 + u32 next_req_timo; /* Timeout for next Rx request packet (jif) */ 521 525 struct timer_list timer; /* Combined event timer */ 522 526 struct work_struct processor; /* Event processor */ 523 527 rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */ ··· 701 697 /* 702 698 * call_event.c 703 699 */ 704 - void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t); 705 - void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t); 706 700 void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, 707 701 enum rxrpc_propose_ack_trace); 708 702 void rxrpc_process_call(struct work_struct *); 703 + 704 + static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call, 705 + unsigned long expire_at, 706 + unsigned long now, 707 + enum rxrpc_timer_trace why) 708 + { 709 + trace_rxrpc_timer(call, why, now); 710 + timer_reduce(&call->timer, expire_at); 711 + } 709 712 710 713 /* 711 714 * call_object.c ··· 854 843 */ 855 844 extern unsigned int rxrpc_max_client_connections; 856 845 extern unsigned int rxrpc_reap_client_connections; 857 - extern unsigned int rxrpc_conn_idle_client_expiry; 858 - extern unsigned int rxrpc_conn_idle_client_fast_expiry; 846 + extern unsigned long rxrpc_conn_idle_client_expiry; 847 + extern unsigned long rxrpc_conn_idle_client_fast_expiry; 859 848 extern struct idr rxrpc_client_conn_ids; 860 849 861 850 void rxrpc_destroy_client_conn_ids(void); ··· 987 976 * misc.c 988 977 */ 989 978 extern unsigned int rxrpc_max_backlog __read_mostly; 990 - extern unsigned int rxrpc_requested_ack_delay; 991 - extern unsigned int rxrpc_soft_ack_delay; 992 - extern unsigned int rxrpc_idle_ack_delay; 979 + extern unsigned long rxrpc_requested_ack_delay; 980 + extern unsigned long rxrpc_soft_ack_delay; 981 + extern unsigned long rxrpc_idle_ack_delay; 993 982 extern unsigned int rxrpc_rx_window_size; 994 983 extern unsigned int rxrpc_rx_mtu; 995 984 extern unsigned int rxrpc_rx_jumbo_max; 996 - extern unsigned int rxrpc_resend_timeout; 985 + extern unsigned long rxrpc_resend_timeout; 997 986 998 987 extern const s8 rxrpc_ack_priority[]; 999 988
+84 -95
net/rxrpc/call_event.c
··· 22 22 #include "ar-internal.h" 23 23 24 24 /* 25 - * Set the timer 26 - */ 27 - void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, 28 - ktime_t now) 29 - { 30 - unsigned long t_j, now_j = jiffies; 31 - ktime_t t; 32 - bool queue = false; 33 - 34 - if (call->state < RXRPC_CALL_COMPLETE) { 35 - t = call->expire_at; 36 - if (!ktime_after(t, now)) { 37 - trace_rxrpc_timer(call, why, now, now_j); 38 - queue = true; 39 - goto out; 40 - } 41 - 42 - if (!ktime_after(call->resend_at, now)) { 43 - call->resend_at = call->expire_at; 44 - if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) 45 - queue = true; 46 - } else if (ktime_before(call->resend_at, t)) { 47 - t = call->resend_at; 48 - } 49 - 50 - if (!ktime_after(call->ack_at, now)) { 51 - call->ack_at = call->expire_at; 52 - if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events)) 53 - queue = true; 54 - } else if (ktime_before(call->ack_at, t)) { 55 - t = call->ack_at; 56 - } 57 - 58 - if (!ktime_after(call->ping_at, now)) { 59 - call->ping_at = call->expire_at; 60 - if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) 61 - queue = true; 62 - } else if (ktime_before(call->ping_at, t)) { 63 - t = call->ping_at; 64 - } 65 - 66 - t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now))); 67 - t_j += jiffies; 68 - 69 - /* We have to make sure that the calculated jiffies value falls 70 - * at or after the nsec value, or we may loop ceaselessly 71 - * because the timer times out, but we haven't reached the nsec 72 - * timeout yet. 73 - */ 74 - t_j++; 75 - 76 - if (call->timer.expires != t_j || !timer_pending(&call->timer)) { 77 - mod_timer(&call->timer, t_j); 78 - trace_rxrpc_timer(call, why, now, now_j); 79 - } 80 - } 81 - 82 - out: 83 - if (queue) 84 - rxrpc_queue_call(call); 85 - } 86 - 87 - /* 88 - * Set the timer 89 - */ 90 - void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, 91 - ktime_t now) 92 - { 93 - read_lock_bh(&call->state_lock); 94 - __rxrpc_set_timer(call, why, now); 95 - read_unlock_bh(&call->state_lock); 96 - } 97 - 98 - /* 99 25 * Propose a PING ACK be sent. 100 26 */ 101 27 static void rxrpc_propose_ping(struct rxrpc_call *call, ··· 32 106 !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) 33 107 rxrpc_queue_call(call); 34 108 } else { 35 - ktime_t now = ktime_get_real(); 36 - ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay); 109 + unsigned long now = jiffies; 110 + unsigned long ping_at = now + rxrpc_idle_ack_delay; 37 111 38 - if (ktime_before(ping_at, call->ping_at)) { 39 - call->ping_at = ping_at; 40 - rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now); 112 + if (time_before(ping_at, call->ping_at)) { 113 + WRITE_ONCE(call->ping_at, ping_at); 114 + rxrpc_reduce_call_timer(call, ping_at, now, 115 + rxrpc_timer_set_for_ping); 41 116 } 42 117 } 43 118 } ··· 52 125 enum rxrpc_propose_ack_trace why) 53 126 { 54 127 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; 55 - unsigned int expiry = rxrpc_soft_ack_delay; 56 - ktime_t now, ack_at; 128 + unsigned long now, ack_at, expiry = rxrpc_soft_ack_delay; 57 129 s8 prior = rxrpc_ack_priority[ack_reason]; 58 130 59 131 /* Pings are handled specially because we don't want to accidentally ··· 116 190 background) 117 191 rxrpc_queue_call(call); 118 192 } else { 119 - now = ktime_get_real(); 120 - ack_at = ktime_add_ms(now, expiry); 121 - if (ktime_before(ack_at, call->ack_at)) { 122 - call->ack_at = ack_at; 123 - rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now); 193 + now = jiffies; 194 + ack_at = jiffies + expiry; 195 + if (time_before(ack_at, call->ack_at)) { 196 + WRITE_ONCE(call->ack_at, ack_at); 197 + rxrpc_reduce_call_timer(call, ack_at, now, 198 + rxrpc_timer_set_for_ack); 124 199 } 125 200 } 126 201 ··· 154 227 /* 155 228 * Perform retransmission of NAK'd and unack'd packets. 156 229 */ 157 - static void rxrpc_resend(struct rxrpc_call *call, ktime_t now) 230 + static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) 158 231 { 159 232 struct rxrpc_skb_priv *sp; 160 233 struct sk_buff *skb; 234 + unsigned long resend_at; 161 235 rxrpc_seq_t cursor, seq, top; 162 - ktime_t max_age, oldest, ack_ts; 236 + ktime_t now, max_age, oldest, ack_ts; 163 237 int ix; 164 238 u8 annotation, anno_type, retrans = 0, unacked = 0; 165 239 166 240 _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); 167 241 168 - max_age = ktime_sub_ms(now, rxrpc_resend_timeout); 242 + now = ktime_get_real(); 243 + max_age = ktime_sub_ms(now, rxrpc_resend_timeout * 1000 / HZ); 169 244 170 245 spin_lock_bh(&call->lock); 171 246 ··· 211 282 ktime_to_ns(ktime_sub(skb->tstamp, max_age))); 212 283 } 213 284 214 - call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout); 285 + resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now))); 286 + resend_at += jiffies + rxrpc_resend_timeout; 287 + WRITE_ONCE(call->resend_at, resend_at); 215 288 216 289 if (unacked) 217 290 rxrpc_congestion_timeout(call); ··· 223 292 * retransmitting data. 224 293 */ 225 294 if (!retrans) { 226 - rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); 295 + rxrpc_reduce_call_timer(call, resend_at, now, 296 + rxrpc_timer_set_for_resend); 227 297 spin_unlock_bh(&call->lock); 228 298 ack_ts = ktime_sub(now, call->acks_latest_ts); 229 299 if (ktime_to_ns(ack_ts) < call->peer->rtt) ··· 296 364 { 297 365 struct rxrpc_call *call = 298 366 container_of(work, struct rxrpc_call, processor); 299 - ktime_t now; 367 + unsigned long now, next, t; 300 368 301 369 rxrpc_see_call(call); 302 370 ··· 316 384 goto out_put; 317 385 } 318 386 319 - now = ktime_get_real(); 320 - if (ktime_before(call->expire_at, now)) { 387 + /* Work out if any timeouts tripped */ 388 + now = jiffies; 389 + t = READ_ONCE(call->expect_rx_by); 390 + if (time_after_eq(now, t)) { 391 + trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now); 392 + set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); 393 + } 394 + 395 + t = READ_ONCE(call->expect_req_by); 396 + if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST && 397 + time_after_eq(now, t)) { 398 + trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now); 399 + set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); 400 + } 401 + 402 + t = READ_ONCE(call->expect_term_by); 403 + if (time_after_eq(now, t)) { 404 + trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now); 405 + set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); 406 + } 407 + 408 + t = READ_ONCE(call->ack_at); 409 + if (time_after_eq(now, t)) { 410 + trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now); 411 + cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET); 412 + set_bit(RXRPC_CALL_EV_ACK, &call->events); 413 + } 414 + 415 + t = READ_ONCE(call->ping_at); 416 + if (time_after_eq(now, t)) { 417 + trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now); 418 + cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET); 419 + set_bit(RXRPC_CALL_EV_PING, &call->events); 420 + } 421 + 422 + t = READ_ONCE(call->resend_at); 423 + if (time_after_eq(now, t)) { 424 + trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now); 425 + cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET); 426 + set_bit(RXRPC_CALL_EV_RESEND, &call->events); 427 + } 428 + 429 + /* Process events */ 430 + if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) { 321 431 rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME); 322 432 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 323 433 goto recheck_state; ··· 382 408 goto recheck_state; 383 409 } 384 410 385 - rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); 411 + /* Make sure the timer is restarted */ 412 + next = call->expect_rx_by; 413 + 414 + #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; } 415 + 416 + set(call->expect_req_by); 417 + set(call->expect_term_by); 418 + set(call->ack_at); 419 + set(call->resend_at); 420 + set(call->ping_at); 421 + 422 + now = jiffies; 423 + if (time_after_eq(now, next)) 424 + goto recheck_state; 425 + 426 + rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart); 386 427 387 428 /* other events may have been raised since we started checking */ 388 429 if (call->events && call->state < RXRPC_CALL_COMPLETE) {
+15 -10
net/rxrpc/call_object.c
··· 51 51 52 52 _enter("%d", call->debug_id); 53 53 54 - if (call->state < RXRPC_CALL_COMPLETE) 55 - rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real()); 54 + if (call->state < RXRPC_CALL_COMPLETE) { 55 + trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies); 56 + rxrpc_queue_call(call); 57 + } 56 58 } 57 59 58 60 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key; ··· 141 139 atomic_set(&call->usage, 1); 142 140 call->debug_id = atomic_inc_return(&rxrpc_debug_id); 143 141 call->tx_total_len = -1; 142 + call->next_rx_timo = 20 * HZ; 143 + call->next_req_timo = 1 * HZ; 144 144 145 145 memset(&call->sock_node, 0xed, sizeof(call->sock_node)); 146 146 ··· 193 189 */ 194 190 static void rxrpc_start_call_timer(struct rxrpc_call *call) 195 191 { 196 - ktime_t now = ktime_get_real(), expire_at; 192 + unsigned long now = jiffies; 193 + unsigned long j = now + MAX_JIFFY_OFFSET; 197 194 198 - expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime); 199 - call->expire_at = expire_at; 200 - call->ack_at = expire_at; 201 - call->ping_at = expire_at; 202 - call->resend_at = expire_at; 203 - call->timer.expires = jiffies + LONG_MAX / 2; 204 - rxrpc_set_timer(call, rxrpc_timer_begin, now); 195 + call->ack_at = j; 196 + call->resend_at = j; 197 + call->ping_at = j; 198 + call->expect_rx_by = j; 199 + call->expect_req_by = j; 200 + call->expect_term_by = j; 201 + call->timer.expires = now; 205 202 } 206 203 207 204 /*
+2 -2
net/rxrpc/conn_client.c
··· 85 85 86 86 __read_mostly unsigned int rxrpc_max_client_connections = 1000; 87 87 __read_mostly unsigned int rxrpc_reap_client_connections = 900; 88 - __read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; 89 - __read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ; 88 + __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; 89 + __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; 90 90 91 91 /* 92 92 * We use machine-unique IDs for our client connections.
+30 -4
net/rxrpc/input.c
··· 318 318 static bool rxrpc_receiving_reply(struct rxrpc_call *call) 319 319 { 320 320 struct rxrpc_ack_summary summary = { 0 }; 321 + unsigned long now, timo; 321 322 rxrpc_seq_t top = READ_ONCE(call->tx_top); 322 323 323 324 if (call->ackr_reason) { 324 325 spin_lock_bh(&call->lock); 325 326 call->ackr_reason = 0; 326 - call->resend_at = call->expire_at; 327 - call->ack_at = call->expire_at; 328 327 spin_unlock_bh(&call->lock); 329 - rxrpc_set_timer(call, rxrpc_timer_init_for_reply, 330 - ktime_get_real()); 328 + now = jiffies; 329 + timo = now + MAX_JIFFY_OFFSET; 330 + WRITE_ONCE(call->resend_at, timo); 331 + WRITE_ONCE(call->ack_at, timo); 332 + trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); 331 333 } 332 334 333 335 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) ··· 438 436 state = READ_ONCE(call->state); 439 437 if (state >= RXRPC_CALL_COMPLETE) 440 438 return; 439 + 440 + if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { 441 + unsigned long timo = READ_ONCE(call->next_req_timo); 442 + unsigned long now, expect_req_by; 443 + 444 + if (timo) { 445 + now = jiffies; 446 + expect_req_by = now + timo; 447 + WRITE_ONCE(call->expect_req_by, expect_req_by); 448 + rxrpc_reduce_call_timer(call, expect_req_by, now, 449 + rxrpc_timer_set_for_idle); 450 + } 451 + } 441 452 442 453 /* Received data implicitly ACKs all of the request packets we sent 443 454 * when we're acting as a client. ··· 923 908 struct sk_buff *skb, u16 skew) 924 909 { 925 910 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 911 + unsigned long timo; 926 912 927 913 _enter("%p,%p", call, skb); 928 914 915 + timo = READ_ONCE(call->next_rx_timo); 916 + if (timo) { 917 + unsigned long now = jiffies, expect_rx_by; 918 + 919 + expect_rx_by = jiffies + timo; 920 + WRITE_ONCE(call->expect_rx_by, expect_rx_by); 921 + rxrpc_reduce_call_timer(call, expect_rx_by, now, 922 + rxrpc_timer_set_for_normal); 923 + } 924 + 929 925 switch (sp->hdr.type) { 930 926 case RXRPC_PACKET_TYPE_DATA: 931 927 rxrpc_input_data(call, skb, skew);
+7 -12
net/rxrpc/misc.c
··· 21 21 unsigned int rxrpc_max_backlog __read_mostly = 10; 22 22 23 23 /* 24 - * Maximum lifetime of a call (in mx). 25 - */ 26 - unsigned int rxrpc_max_call_lifetime = 60 * 1000; 27 - 28 - /* 29 24 * How long to wait before scheduling ACK generation after seeing a 30 - * packet with RXRPC_REQUEST_ACK set (in ms). 25 + * packet with RXRPC_REQUEST_ACK set (in jiffies). 31 26 */ 32 - unsigned int rxrpc_requested_ack_delay = 1; 27 + unsigned long rxrpc_requested_ack_delay = 1; 33 28 34 29 /* 35 - * How long to wait before scheduling an ACK with subtype DELAY (in ms). 30 + * How long to wait before scheduling an ACK with subtype DELAY (in jiffies). 36 31 * 37 32 * We use this when we've received new data packets. If those packets aren't 38 33 * all consumed within this time we will send a DELAY ACK if an ACK was not 39 34 * requested to let the sender know it doesn't need to resend. 40 35 */ 41 - unsigned int rxrpc_soft_ack_delay = 1 * 1000; 36 + unsigned long rxrpc_soft_ack_delay = HZ; 42 37 43 38 /* 44 - * How long to wait before scheduling an ACK with subtype IDLE (in ms). 39 + * How long to wait before scheduling an ACK with subtype IDLE (in jiffies). 45 40 * 46 41 * We use this when we've consumed some previously soft-ACK'd packets when 47 42 * further packets aren't immediately received to decide when to send an IDLE 48 43 * ACK let the other end know that it can free up its Tx buffer space. 49 44 */ 50 - unsigned int rxrpc_idle_ack_delay = 0.5 * 1000; 45 + unsigned long rxrpc_idle_ack_delay = HZ / 2; 51 46 52 47 /* 53 48 * Receive window size in packets. This indicates the maximum number of ··· 70 75 /* 71 76 * Time till packet resend (in milliseconds). 72 77 */ 73 - unsigned int rxrpc_resend_timeout = 4 * 1000; 78 + unsigned long rxrpc_resend_timeout = 4 * HZ; 74 79 75 80 const s8 rxrpc_ack_priority[] = { 76 81 [0] = 0,
+1 -1
net/rxrpc/recvmsg.c
··· 163 163 case RXRPC_CALL_SERVER_RECV_REQUEST: 164 164 call->tx_phase = true; 165 165 call->state = RXRPC_CALL_SERVER_ACK_REQUEST; 166 - call->ack_at = call->expire_at; 166 + call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; 167 167 write_unlock_bh(&call->state_lock); 168 168 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, 169 169 rxrpc_propose_ack_processing_op);
+49 -10
net/rxrpc/sendmsg.c
··· 158 158 rxrpc_notify_end_tx_t notify_end_tx) 159 159 { 160 160 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 161 + unsigned long now; 161 162 rxrpc_seq_t seq = sp->hdr.seq; 162 163 int ret, ix; 163 164 u8 annotation = RXRPC_TX_ANNO_UNACK; ··· 198 197 break; 199 198 case RXRPC_CALL_SERVER_ACK_REQUEST: 200 199 call->state = RXRPC_CALL_SERVER_SEND_REPLY; 201 - call->ack_at = call->expire_at; 200 + now = jiffies; 201 + WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET); 202 202 if (call->ackr_reason == RXRPC_ACK_DELAY) 203 203 call->ackr_reason = 0; 204 - __rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply, 205 - ktime_get_real()); 204 + trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now); 206 205 if (!last) 207 206 break; 208 207 /* Fall through */ ··· 224 223 _debug("need instant resend %d", ret); 225 224 rxrpc_instant_resend(call, ix); 226 225 } else { 227 - ktime_t now = ktime_get_real(), resend_at; 226 + unsigned long now = jiffies, resend_at; 228 227 229 - resend_at = ktime_add_ms(now, rxrpc_resend_timeout); 230 - 231 - if (ktime_before(resend_at, call->resend_at)) { 232 - call->resend_at = resend_at; 233 - rxrpc_set_timer(call, rxrpc_timer_set_for_send, now); 234 - } 228 + resend_at = now + rxrpc_resend_timeout; 229 + WRITE_ONCE(call->resend_at, resend_at); 230 + rxrpc_reduce_call_timer(call, resend_at, now, 231 + rxrpc_timer_set_for_send); 235 232 } 236 233 237 234 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); ··· 512 513 return -EINVAL; 513 514 break; 514 515 516 + case RXRPC_SET_CALL_TIMEOUT: 517 + if (len & 3 || len < 4 || len > 12) 518 + return -EINVAL; 519 + memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len); 520 + p->call.nr_timeouts = len / 4; 521 + if (p->call.timeouts.hard > INT_MAX / HZ) 522 + return -ERANGE; 523 + if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000) 524 + return -ERANGE; 525 + if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000) 526 + return -ERANGE; 527 + break; 528 + 515 529 default: 516 530 return -EINVAL; 517 531 } ··· 589 577 { 590 578 enum rxrpc_call_state state; 591 579 struct rxrpc_call *call; 580 + unsigned long now, j; 592 581 int ret; 593 582 594 583 struct rxrpc_send_params p = { 595 584 .call.tx_total_len = -1, 596 585 .call.user_call_ID = 0, 586 + .call.nr_timeouts = 0, 597 587 .abort_code = 0, 598 588 .command = RXRPC_CMD_SEND_DATA, 599 589 .exclusive = false, ··· 658 644 goto error_put; 659 645 call->tx_total_len = p.call.tx_total_len; 660 646 } 647 + } 648 + 649 + switch (p.call.nr_timeouts) { 650 + case 3: 651 + j = msecs_to_jiffies(p.call.timeouts.normal); 652 + if (p.call.timeouts.normal > 0 && j == 0) 653 + j = 1; 654 + WRITE_ONCE(call->next_rx_timo, j); 655 + /* Fall through */ 656 + case 2: 657 + j = msecs_to_jiffies(p.call.timeouts.idle); 658 + if (p.call.timeouts.idle > 0 && j == 0) 659 + j = 1; 660 + WRITE_ONCE(call->next_req_timo, j); 661 + /* Fall through */ 662 + case 1: 663 + if (p.call.timeouts.hard > 0) { 664 + j = msecs_to_jiffies(p.call.timeouts.hard); 665 + now = jiffies; 666 + j += now; 667 + WRITE_ONCE(call->expect_term_by, j); 668 + rxrpc_reduce_call_timer(call, j, now, 669 + rxrpc_timer_set_for_hard); 670 + } 671 + break; 661 672 } 662 673 663 674 state = READ_ONCE(call->state);
+29 -31
net/rxrpc/sysctl.c
··· 21 21 static const unsigned int thirtytwo = 32; 22 22 static const unsigned int n_65535 = 65535; 23 23 static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1; 24 + static const unsigned long one_jiffy = 1; 25 + static const unsigned long max_jiffies = MAX_JIFFY_OFFSET; 24 26 25 27 /* 26 28 * RxRPC operating parameters. ··· 31 29 * information on the individual parameters. 32 30 */ 33 31 static struct ctl_table rxrpc_sysctl_table[] = { 34 - /* Values measured in milliseconds */ 32 + /* Values measured in milliseconds but used in jiffies */ 35 33 { 36 34 .procname = "req_ack_delay", 37 35 .data = &rxrpc_requested_ack_delay, 38 - .maxlen = sizeof(unsigned int), 36 + .maxlen = sizeof(unsigned long), 39 37 .mode = 0644, 40 - .proc_handler = proc_dointvec, 41 - .extra1 = (void *)&zero, 38 + .proc_handler = proc_doulongvec_ms_jiffies_minmax, 39 + .extra1 = (void *)&one_jiffy, 40 + .extra2 = (void *)&max_jiffies, 42 41 }, 43 42 { 44 43 .procname = "soft_ack_delay", 45 44 .data = &rxrpc_soft_ack_delay, 46 - .maxlen = sizeof(unsigned int), 45 + .maxlen = sizeof(unsigned long), 47 46 .mode = 0644, 48 - .proc_handler = proc_dointvec, 49 - .extra1 = (void *)&one, 47 + .proc_handler = proc_doulongvec_ms_jiffies_minmax, 48 + .extra1 = (void *)&one_jiffy, 49 + .extra2 = (void *)&max_jiffies, 50 50 }, 51 51 { 52 52 .procname = "idle_ack_delay", 53 53 .data = &rxrpc_idle_ack_delay, 54 - .maxlen = sizeof(unsigned int), 54 + .maxlen = sizeof(unsigned long), 55 55 .mode = 0644, 56 - .proc_handler = proc_dointvec, 57 - .extra1 = (void *)&one, 58 - }, 59 - { 60 - .procname = "resend_timeout", 61 - .data = &rxrpc_resend_timeout, 62 - .maxlen = sizeof(unsigned int), 63 - .mode = 0644, 64 - .proc_handler = proc_dointvec, 65 - .extra1 = (void *)&one, 56 + .proc_handler = proc_doulongvec_ms_jiffies_minmax, 57 + .extra1 = (void *)&one_jiffy, 58 + .extra2 = (void *)&max_jiffies, 66 59 }, 67 60 { 68 61 .procname = "idle_conn_expiry", 69 62 .data = &rxrpc_conn_idle_client_expiry, 70 - .maxlen = sizeof(unsigned int), 63 + .maxlen = sizeof(unsigned long), 71 64 .mode = 0644, 72 - .proc_handler = proc_dointvec_ms_jiffies, 73 - .extra1 = (void *)&one, 65 + .proc_handler = proc_doulongvec_ms_jiffies_minmax, 66 + .extra1 = (void *)&one_jiffy, 67 + .extra2 = (void *)&max_jiffies, 74 68 }, 75 69 { 76 70 .procname = "idle_conn_fast_expiry", 77 71 .data = &rxrpc_conn_idle_client_fast_expiry, 78 - .maxlen = sizeof(unsigned int), 72 + .maxlen = sizeof(unsigned long), 79 73 .mode = 0644, 80 - .proc_handler = proc_dointvec_ms_jiffies, 81 - .extra1 = (void *)&one, 74 + .proc_handler = proc_doulongvec_ms_jiffies_minmax, 75 + .extra1 = (void *)&one_jiffy, 76 + .extra2 = (void *)&max_jiffies, 82 77 }, 83 - 84 - /* Values measured in seconds but used in jiffies */ 85 78 { 86 - .procname = "max_call_lifetime", 87 - .data = &rxrpc_max_call_lifetime, 88 - .maxlen = sizeof(unsigned int), 79 + .procname = "resend_timeout", 80 + .data = &rxrpc_resend_timeout, 81 + .maxlen = sizeof(unsigned long), 89 82 .mode = 0644, 90 - .proc_handler = proc_dointvec, 91 - .extra1 = (void *)&one, 83 + .proc_handler = proc_doulongvec_ms_jiffies_minmax, 84 + .extra1 = (void *)&one_jiffy, 85 + .extra2 = (void *)&max_jiffies, 92 86 }, 93 87 94 88 /* Non-time values */