Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the TCP module.
8 *
9 * Version: @(#)tcp.h 1.0.5 05/23/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 */
14#ifndef _TCP_H
15#define _TCP_H
16
17#define FASTRETRANS_DEBUG 1
18
19#include <linux/list.h>
20#include <linux/tcp.h>
21#include <linux/bug.h>
22#include <linux/slab.h>
23#include <linux/cache.h>
24#include <linux/percpu.h>
25#include <linux/skbuff.h>
26#include <linux/kref.h>
27#include <linux/ktime.h>
28#include <linux/indirect_call_wrapper.h>
29
30#include <net/inet_connection_sock.h>
31#include <net/inet_timewait_sock.h>
32#include <net/inet_hashtables.h>
33#include <net/checksum.h>
34#include <net/request_sock.h>
35#include <net/sock_reuseport.h>
36#include <net/sock.h>
37#include <net/snmp.h>
38#include <net/ip.h>
39#include <net/tcp_states.h>
40#include <net/inet_ecn.h>
41#include <net/dst.h>
42#include <net/mptcp.h>
43
44#include <linux/seq_file.h>
45#include <linux/memcontrol.h>
46#include <linux/bpf-cgroup.h>
47#include <linux/siphash.h>
48#include <linux/net_mm.h>
49
50extern struct inet_hashinfo tcp_hashinfo;
51
52DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
53int tcp_orphan_count_sum(void);
54
55void tcp_time_wait(struct sock *sk, int state, int timeo);
56
57#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
58#define MAX_TCP_OPTION_SPACE 40
59#define TCP_MIN_SND_MSS 48
60#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
61
62/*
63 * Never offer a window over 32767 without using window scaling. Some
64 * poor stacks do signed 16bit maths!
65 */
66#define MAX_TCP_WINDOW 32767U
67
68/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
69#define TCP_MIN_MSS 88U
70
71/* The initial MTU to use for probing */
72#define TCP_BASE_MSS 1024
73
74/* probing interval, default to 10 minutes as per RFC4821 */
75#define TCP_PROBE_INTERVAL 600
76
77/* Specify interval when tcp mtu probing will stop */
78#define TCP_PROBE_THRESHOLD 8
79
80/* After receiving this amount of duplicate ACKs fast retransmit starts. */
81#define TCP_FASTRETRANS_THRESH 3
82
83/* Maximal number of ACKs sent quickly to accelerate slow-start. */
84#define TCP_MAX_QUICKACKS 16U
85
86/* Maximal number of window scale according to RFC1323 */
87#define TCP_MAX_WSCALE 14U
88
89/* urg_data states */
90#define TCP_URG_VALID 0x0100
91#define TCP_URG_NOTYET 0x0200
92#define TCP_URG_READ 0x0400
93
94#define TCP_RETR1 3 /*
95 * This is how many retries it does before it
96 * tries to figure out if the gateway is
97 * down. Minimal RFC value is 3; it corresponds
98 * to ~3sec-8min depending on RTO.
99 */
100
101#define TCP_RETR2 15 /*
102 * This should take at least
103 * 90 minutes to time out.
104 * RFC1122 says that the limit is 100 sec.
105 * 15 is ~13-30min depending on RTO.
106 */
107
108#define TCP_SYN_RETRIES 6 /* This is how many retries are done
109 * when active opening a connection.
110 * RFC1122 says the minimum retry MUST
111 * be at least 180secs. Nevertheless
112 * this value is corresponding to
113 * 63secs of retransmission with the
114 * current initial RTO.
115 */
116
117#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
118 * when passive opening a connection.
119 * This is corresponding to 31secs of
120 * retransmission with the current
121 * initial RTO.
122 */
123
124#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
125 * state, about 60 seconds */
126#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
127 /* BSD style FIN_WAIT2 deadlock breaker.
128 * It used to be 3min, new value is 60sec,
129 * to combine FIN-WAIT-2 timeout with
130 * TIME-WAIT timer.
131 */
132#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
133
134#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
135#if HZ >= 100
136#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
137#define TCP_ATO_MIN ((unsigned)(HZ/25))
138#else
139#define TCP_DELACK_MIN 4U
140#define TCP_ATO_MIN 4U
141#endif
142#define TCP_RTO_MAX ((unsigned)(120*HZ))
143#define TCP_RTO_MIN ((unsigned)(HZ/5))
144#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
145#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
146#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
147 * used as a fallback RTO for the
148 * initial data transmission if no
149 * valid RTT sample has been acquired,
150 * most likely due to retrans in 3WHS.
151 */
152
153#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
154 * for local resources.
155 */
156#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
157#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
158#define TCP_KEEPALIVE_INTVL (75*HZ)
159
160#define MAX_TCP_KEEPIDLE 32767
161#define MAX_TCP_KEEPINTVL 32767
162#define MAX_TCP_KEEPCNT 127
163#define MAX_TCP_SYNCNT 127
164
165#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
166#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
167 * after this time. It should be equal
168 * (or greater than) TCP_TIMEWAIT_LEN
169 * to provide reliability equal to one
170 * provided by timewait state.
171 */
172#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
173 * timestamps. It must be less than
174 * minimal timewait lifetime.
175 */
176/*
177 * TCP option
178 */
179
180#define TCPOPT_NOP 1 /* Padding */
181#define TCPOPT_EOL 0 /* End of options */
182#define TCPOPT_MSS 2 /* Segment size negotiating */
183#define TCPOPT_WINDOW 3 /* Window scaling */
184#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
185#define TCPOPT_SACK 5 /* SACK Block */
186#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
187#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
188#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
189#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
190#define TCPOPT_EXP 254 /* Experimental */
191/* Magic number to be after the option value for sharing TCP
192 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
193 */
194#define TCPOPT_FASTOPEN_MAGIC 0xF989
195#define TCPOPT_SMC_MAGIC 0xE2D4C3D9
196
197/*
198 * TCP option lengths
199 */
200
201#define TCPOLEN_MSS 4
202#define TCPOLEN_WINDOW 3
203#define TCPOLEN_SACK_PERM 2
204#define TCPOLEN_TIMESTAMP 10
205#define TCPOLEN_MD5SIG 18
206#define TCPOLEN_FASTOPEN_BASE 2
207#define TCPOLEN_EXP_FASTOPEN_BASE 4
208#define TCPOLEN_EXP_SMC_BASE 6
209
210/* But this is what stacks really send out. */
211#define TCPOLEN_TSTAMP_ALIGNED 12
212#define TCPOLEN_WSCALE_ALIGNED 4
213#define TCPOLEN_SACKPERM_ALIGNED 4
214#define TCPOLEN_SACK_BASE 2
215#define TCPOLEN_SACK_BASE_ALIGNED 4
216#define TCPOLEN_SACK_PERBLOCK 8
217#define TCPOLEN_MD5SIG_ALIGNED 20
218#define TCPOLEN_MSS_ALIGNED 4
219#define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
220
221/* Flags in tp->nonagle */
222#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
223#define TCP_NAGLE_CORK 2 /* Socket is corked */
224#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
225
226/* TCP thin-stream limits */
227#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
228
229/* TCP initial congestion window as per rfc6928 */
230#define TCP_INIT_CWND 10
231
232/* Bit Flags for sysctl_tcp_fastopen */
233#define TFO_CLIENT_ENABLE 1
234#define TFO_SERVER_ENABLE 2
235#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
236
237/* Accept SYN data w/o any cookie option */
238#define TFO_SERVER_COOKIE_NOT_REQD 0x200
239
240/* Force enable TFO on all listeners, i.e., not requiring the
241 * TCP_FASTOPEN socket option.
242 */
243#define TFO_SERVER_WO_SOCKOPT1 0x400
244
245
246/* sysctl variables for tcp */
247extern int sysctl_tcp_max_orphans;
248extern long sysctl_tcp_mem[3];
249
250#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
251#define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
252#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
253
254extern atomic_long_t tcp_memory_allocated;
255DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
256
257extern struct percpu_counter tcp_sockets_allocated;
258extern unsigned long tcp_memory_pressure;
259
260/* optimized version of sk_under_memory_pressure() for TCP sockets */
261static inline bool tcp_under_memory_pressure(const struct sock *sk)
262{
263 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
264 mem_cgroup_under_socket_pressure(sk->sk_memcg))
265 return true;
266
267 return READ_ONCE(tcp_memory_pressure);
268}
269/*
270 * The next routines deal with comparing 32 bit unsigned ints
271 * and worry about wraparound (automatic with unsigned arithmetic).
272 */
273
274static inline bool before(__u32 seq1, __u32 seq2)
275{
276 return (__s32)(seq1-seq2) < 0;
277}
278#define after(seq2, seq1) before(seq1, seq2)
279
280/* is s2<=s1<=s3 ? */
281static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
282{
283 return seq3 - seq2 >= seq1 - seq2;
284}
285
286static inline bool tcp_out_of_memory(struct sock *sk)
287{
288 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
289 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
290 return true;
291 return false;
292}
293
294static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
295{
296 sk_wmem_queued_add(sk, -skb->truesize);
297 if (!skb_zcopy_pure(skb))
298 sk_mem_uncharge(sk, skb->truesize);
299 else
300 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
301 __kfree_skb(skb);
302}
303
304void sk_forced_mem_schedule(struct sock *sk, int size);
305
306bool tcp_check_oom(struct sock *sk, int shift);
307
308
309extern struct proto tcp_prot;
310
311#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
312#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
313#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
314#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
315
316void tcp_tasklet_init(void);
317
318int tcp_v4_err(struct sk_buff *skb, u32);
319
320void tcp_shutdown(struct sock *sk, int how);
321
322int tcp_v4_early_demux(struct sk_buff *skb);
323int tcp_v4_rcv(struct sk_buff *skb);
324
325void tcp_remove_empty_skb(struct sock *sk);
326int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
327int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
328int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
329int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
330 size_t size, struct ubuf_info *uarg);
331void tcp_splice_eof(struct socket *sock);
332int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
333int tcp_wmem_schedule(struct sock *sk, int copy);
334void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
335 int size_goal);
336void tcp_release_cb(struct sock *sk);
337void tcp_wfree(struct sk_buff *skb);
338void tcp_write_timer_handler(struct sock *sk);
339void tcp_delack_timer_handler(struct sock *sk);
340int tcp_ioctl(struct sock *sk, int cmd, int *karg);
341int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
342void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
343void tcp_rcv_space_adjust(struct sock *sk);
344int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
345void tcp_twsk_destructor(struct sock *sk);
346void tcp_twsk_purge(struct list_head *net_exit_list, int family);
347ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
348 struct pipe_inode_info *pipe, size_t len,
349 unsigned int flags);
350struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
351 bool force_schedule);
352
353void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
354static inline void tcp_dec_quickack_mode(struct sock *sk,
355 const unsigned int pkts)
356{
357 struct inet_connection_sock *icsk = inet_csk(sk);
358
359 if (icsk->icsk_ack.quick) {
360 if (pkts >= icsk->icsk_ack.quick) {
361 icsk->icsk_ack.quick = 0;
362 /* Leaving quickack mode we deflate ATO. */
363 icsk->icsk_ack.ato = TCP_ATO_MIN;
364 } else
365 icsk->icsk_ack.quick -= pkts;
366 }
367}
368
369#define TCP_ECN_OK 1
370#define TCP_ECN_QUEUE_CWR 2
371#define TCP_ECN_DEMAND_CWR 4
372#define TCP_ECN_SEEN 8
373
374enum tcp_tw_status {
375 TCP_TW_SUCCESS = 0,
376 TCP_TW_RST = 1,
377 TCP_TW_ACK = 2,
378 TCP_TW_SYN = 3
379};
380
381
382enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
383 struct sk_buff *skb,
384 const struct tcphdr *th);
385struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
386 struct request_sock *req, bool fastopen,
387 bool *lost_race);
388int tcp_child_process(struct sock *parent, struct sock *child,
389 struct sk_buff *skb);
390void tcp_enter_loss(struct sock *sk);
391void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
392void tcp_clear_retrans(struct tcp_sock *tp);
393void tcp_update_metrics(struct sock *sk);
394void tcp_init_metrics(struct sock *sk);
395void tcp_metrics_init(void);
396bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
397void __tcp_close(struct sock *sk, long timeout);
398void tcp_close(struct sock *sk, long timeout);
399void tcp_init_sock(struct sock *sk);
400void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
401__poll_t tcp_poll(struct file *file, struct socket *sock,
402 struct poll_table_struct *wait);
403int do_tcp_getsockopt(struct sock *sk, int level,
404 int optname, sockptr_t optval, sockptr_t optlen);
405int tcp_getsockopt(struct sock *sk, int level, int optname,
406 char __user *optval, int __user *optlen);
407bool tcp_bpf_bypass_getsockopt(int level, int optname);
408int do_tcp_setsockopt(struct sock *sk, int level, int optname,
409 sockptr_t optval, unsigned int optlen);
410int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
411 unsigned int optlen);
412void tcp_set_keepalive(struct sock *sk, int val);
413void tcp_syn_ack_timeout(const struct request_sock *req);
414int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
415 int flags, int *addr_len);
416int tcp_set_rcvlowat(struct sock *sk, int val);
417int tcp_set_window_clamp(struct sock *sk, int val);
418void tcp_update_recv_tstamps(struct sk_buff *skb,
419 struct scm_timestamping_internal *tss);
420void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
421 struct scm_timestamping_internal *tss);
422void tcp_data_ready(struct sock *sk);
423#ifdef CONFIG_MMU
424int tcp_mmap(struct file *file, struct socket *sock,
425 struct vm_area_struct *vma);
426#endif
427void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
428 struct tcp_options_received *opt_rx,
429 int estab, struct tcp_fastopen_cookie *foc);
430const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
431
432/*
433 * BPF SKB-less helpers
434 */
435u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
436 struct tcphdr *th, u32 *cookie);
437u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
438 struct tcphdr *th, u32 *cookie);
439u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
440u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
441 const struct tcp_request_sock_ops *af_ops,
442 struct sock *sk, struct tcphdr *th);
443/*
444 * TCP v4 functions exported for the inet6 API
445 */
446
447void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
448void tcp_v4_mtu_reduced(struct sock *sk);
449void tcp_req_err(struct sock *sk, u32 seq, bool abort);
450void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
451int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
452struct sock *tcp_create_openreq_child(const struct sock *sk,
453 struct request_sock *req,
454 struct sk_buff *skb);
455void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
456struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
457 struct request_sock *req,
458 struct dst_entry *dst,
459 struct request_sock *req_unhash,
460 bool *own_req);
461int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
462int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
463int tcp_connect(struct sock *sk);
464enum tcp_synack_type {
465 TCP_SYNACK_NORMAL,
466 TCP_SYNACK_FASTOPEN,
467 TCP_SYNACK_COOKIE,
468};
469struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
470 struct request_sock *req,
471 struct tcp_fastopen_cookie *foc,
472 enum tcp_synack_type synack_type,
473 struct sk_buff *syn_skb);
474int tcp_disconnect(struct sock *sk, int flags);
475
476void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
477int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
478void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
479
480/* From syncookies.c */
481struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
482 struct request_sock *req,
483 struct dst_entry *dst, u32 tsoff);
484int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
485 u32 cookie);
486struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
487struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
488 const struct tcp_request_sock_ops *af_ops,
489 struct sock *sk, struct sk_buff *skb);
490#ifdef CONFIG_SYN_COOKIES
491
492/* Syncookies use a monotonic timer which increments every 60 seconds.
493 * This counter is used both as a hash input and partially encoded into
494 * the cookie value. A cookie is only validated further if the delta
495 * between the current counter value and the encoded one is less than this,
496 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
497 * the counter advances immediately after a cookie is generated).
498 */
499#define MAX_SYNCOOKIE_AGE 2
500#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
501#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
502
503/* syncookies: remember time of last synqueue overflow
504 * But do not dirty this field too often (once per second is enough)
505 * It is racy as we do not hold a lock, but race is very minor.
506 */
507static inline void tcp_synq_overflow(const struct sock *sk)
508{
509 unsigned int last_overflow;
510 unsigned int now = jiffies;
511
512 if (sk->sk_reuseport) {
513 struct sock_reuseport *reuse;
514
515 reuse = rcu_dereference(sk->sk_reuseport_cb);
516 if (likely(reuse)) {
517 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
518 if (!time_between32(now, last_overflow,
519 last_overflow + HZ))
520 WRITE_ONCE(reuse->synq_overflow_ts, now);
521 return;
522 }
523 }
524
525 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
526 if (!time_between32(now, last_overflow, last_overflow + HZ))
527 WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
528}
529
530/* syncookies: no recent synqueue overflow on this listening socket? */
531static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
532{
533 unsigned int last_overflow;
534 unsigned int now = jiffies;
535
536 if (sk->sk_reuseport) {
537 struct sock_reuseport *reuse;
538
539 reuse = rcu_dereference(sk->sk_reuseport_cb);
540 if (likely(reuse)) {
541 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
542 return !time_between32(now, last_overflow - HZ,
543 last_overflow +
544 TCP_SYNCOOKIE_VALID);
545 }
546 }
547
548 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
549
550 /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
551 * then we're under synflood. However, we have to use
552 * 'last_overflow - HZ' as lower bound. That's because a concurrent
553 * tcp_synq_overflow() could update .ts_recent_stamp after we read
554 * jiffies but before we store .ts_recent_stamp into last_overflow,
555 * which could lead to rejecting a valid syncookie.
556 */
557 return !time_between32(now, last_overflow - HZ,
558 last_overflow + TCP_SYNCOOKIE_VALID);
559}
560
561static inline u32 tcp_cookie_time(void)
562{
563 u64 val = get_jiffies_64();
564
565 do_div(val, TCP_SYNCOOKIE_PERIOD);
566 return val;
567}
568
569u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
570 u16 *mssp);
571__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
572u64 cookie_init_timestamp(struct request_sock *req, u64 now);
573bool cookie_timestamp_decode(const struct net *net,
574 struct tcp_options_received *opt);
575bool cookie_ecn_ok(const struct tcp_options_received *opt,
576 const struct net *net, const struct dst_entry *dst);
577
578/* From net/ipv6/syncookies.c */
579int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
580 u32 cookie);
581struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
582
583u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
584 const struct tcphdr *th, u16 *mssp);
585__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
586#endif
587/* tcp_output.c */
588
589void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
590void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
591void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
592 int nonagle);
593int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
594int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
595void tcp_retransmit_timer(struct sock *sk);
596void tcp_xmit_retransmit_queue(struct sock *);
597void tcp_simple_retransmit(struct sock *);
598void tcp_enter_recovery(struct sock *sk, bool ece_ack);
599int tcp_trim_head(struct sock *, struct sk_buff *, u32);
600enum tcp_queue {
601 TCP_FRAG_IN_WRITE_QUEUE,
602 TCP_FRAG_IN_RTX_QUEUE,
603};
604int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
605 struct sk_buff *skb, u32 len,
606 unsigned int mss_now, gfp_t gfp);
607
608void tcp_send_probe0(struct sock *);
609void tcp_send_partial(struct sock *);
610int tcp_write_wakeup(struct sock *, int mib);
611void tcp_send_fin(struct sock *sk);
612void tcp_send_active_reset(struct sock *sk, gfp_t priority);
613int tcp_send_synack(struct sock *);
614void tcp_push_one(struct sock *, unsigned int mss_now);
615void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
616void tcp_send_ack(struct sock *sk);
617void tcp_send_delayed_ack(struct sock *sk);
618void tcp_send_loss_probe(struct sock *sk);
619bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
620void tcp_skb_collapse_tstamp(struct sk_buff *skb,
621 const struct sk_buff *next_skb);
622
623/* tcp_input.c */
624void tcp_rearm_rto(struct sock *sk);
625void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
626void tcp_reset(struct sock *sk, struct sk_buff *skb);
627void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
628void tcp_fin(struct sock *sk);
629void tcp_check_space(struct sock *sk);
630void tcp_sack_compress_send_ack(struct sock *sk);
631
632/* tcp_timer.c */
633void tcp_init_xmit_timers(struct sock *);
634static inline void tcp_clear_xmit_timers(struct sock *sk)
635{
636 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
637 __sock_put(sk);
638
639 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
640 __sock_put(sk);
641
642 inet_csk_clear_xmit_timers(sk);
643}
644
645unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
646unsigned int tcp_current_mss(struct sock *sk);
647u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
648
649/* Bound MSS / TSO packet size with the half of the window */
650static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
651{
652 int cutoff;
653
654 /* When peer uses tiny windows, there is no use in packetizing
655 * to sub-MSS pieces for the sake of SWS or making sure there
656 * are enough packets in the pipe for fast recovery.
657 *
658 * On the other hand, for extremely large MSS devices, handling
659 * smaller than MSS windows in this way does make sense.
660 */
661 if (tp->max_window > TCP_MSS_DEFAULT)
662 cutoff = (tp->max_window >> 1);
663 else
664 cutoff = tp->max_window;
665
666 if (cutoff && pktsize > cutoff)
667 return max_t(int, cutoff, 68U - tp->tcp_header_len);
668 else
669 return pktsize;
670}
671
672/* tcp.c */
673void tcp_get_info(struct sock *, struct tcp_info *);
674
675/* Read 'sendfile()'-style from a TCP socket */
676int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
677 sk_read_actor_t recv_actor);
678int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
679struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
680void tcp_read_done(struct sock *sk, size_t len);
681
682void tcp_initialize_rcv_mss(struct sock *sk);
683
684int tcp_mtu_to_mss(struct sock *sk, int pmtu);
685int tcp_mss_to_mtu(struct sock *sk, int mss);
686void tcp_mtup_init(struct sock *sk);
687
688static inline void tcp_bound_rto(const struct sock *sk)
689{
690 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
691 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
692}
693
694static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
695{
696 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
697}
698
699static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
700{
701 /* mptcp hooks are only on the slow path */
702 if (sk_is_mptcp((struct sock *)tp))
703 return;
704
705 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
706 ntohl(TCP_FLAG_ACK) |
707 snd_wnd);
708}
709
710static inline void tcp_fast_path_on(struct tcp_sock *tp)
711{
712 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
713}
714
715static inline void tcp_fast_path_check(struct sock *sk)
716{
717 struct tcp_sock *tp = tcp_sk(sk);
718
719 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
720 tp->rcv_wnd &&
721 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
722 !tp->urg_data)
723 tcp_fast_path_on(tp);
724}
725
726/* Compute the actual rto_min value */
727static inline u32 tcp_rto_min(struct sock *sk)
728{
729 const struct dst_entry *dst = __sk_dst_get(sk);
730 u32 rto_min = inet_csk(sk)->icsk_rto_min;
731
732 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
733 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
734 return rto_min;
735}
736
737static inline u32 tcp_rto_min_us(struct sock *sk)
738{
739 return jiffies_to_usecs(tcp_rto_min(sk));
740}
741
742static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
743{
744 return dst_metric_locked(dst, RTAX_CC_ALGO);
745}
746
747/* Minimum RTT in usec. ~0 means not available. */
748static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
749{
750 return minmax_get(&tp->rtt_min);
751}
752
753/* Compute the actual receive window we are currently advertising.
754 * Rcv_nxt can be after the window if our peer push more data
755 * than the offered window.
756 */
757static inline u32 tcp_receive_window(const struct tcp_sock *tp)
758{
759 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
760
761 if (win < 0)
762 win = 0;
763 return (u32) win;
764}
765
766/* Choose a new window, without checks for shrinking, and without
767 * scaling applied to the result. The caller does these things
768 * if necessary. This is a "raw" window selection.
769 */
770u32 __tcp_select_window(struct sock *sk);
771
772void tcp_send_window_probe(struct sock *sk);
773
774/* TCP uses 32bit jiffies to save some space.
775 * Note that this is different from tcp_time_stamp, which
776 * historically has been the same until linux-4.13.
777 */
778#define tcp_jiffies32 ((u32)jiffies)
779
780/*
781 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
782 * It is no longer tied to jiffies, but to 1 ms clock.
783 * Note: double check if you want to use tcp_jiffies32 instead of this.
784 */
785#define TCP_TS_HZ 1000
786
787static inline u64 tcp_clock_ns(void)
788{
789 return ktime_get_ns();
790}
791
792static inline u64 tcp_clock_us(void)
793{
794 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
795}
796
797/* This should only be used in contexts where tp->tcp_mstamp is up to date */
798static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
799{
800 return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
801}
802
803/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
804static inline u32 tcp_ns_to_ts(u64 ns)
805{
806 return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
807}
808
809/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
810static inline u32 tcp_time_stamp_raw(void)
811{
812 return tcp_ns_to_ts(tcp_clock_ns());
813}
814
815void tcp_mstamp_refresh(struct tcp_sock *tp);
816
817static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
818{
819 return max_t(s64, t1 - t0, 0);
820}
821
822static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
823{
824 return tcp_ns_to_ts(skb->skb_mstamp_ns);
825}
826
827/* provide the departure time in us unit */
828static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
829{
830 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
831}
832
833
834#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
835
836#define TCPHDR_FIN 0x01
837#define TCPHDR_SYN 0x02
838#define TCPHDR_RST 0x04
839#define TCPHDR_PSH 0x08
840#define TCPHDR_ACK 0x10
841#define TCPHDR_URG 0x20
842#define TCPHDR_ECE 0x40
843#define TCPHDR_CWR 0x80
844
845#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
846
847/* This is what the send packet queuing engine uses to pass
848 * TCP per-packet control information to the transmission code.
849 * We also store the host-order sequence numbers in here too.
850 * This is 44 bytes if IPV6 is enabled.
851 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
852 */
853struct tcp_skb_cb {
854 __u32 seq; /* Starting sequence number */
855 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
856 union {
857 /* Note : tcp_tw_isn is used in input path only
858 * (isn chosen by tcp_timewait_state_process())
859 *
860 * tcp_gso_segs/size are used in write queue only,
861 * cf tcp_skb_pcount()/tcp_skb_mss()
862 */
863 __u32 tcp_tw_isn;
864 struct {
865 u16 tcp_gso_segs;
866 u16 tcp_gso_size;
867 };
868 };
869 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
870
871 __u8 sacked; /* State flags for SACK. */
872#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
873#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
874#define TCPCB_LOST 0x04 /* SKB is lost */
875#define TCPCB_TAGBITS 0x07 /* All tag bits */
876#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */
877#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
878#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
879 TCPCB_REPAIRED)
880
881 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
882 __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
883 eor:1, /* Is skb MSG_EOR marked? */
884 has_rxtstamp:1, /* SKB has a RX timestamp */
885 unused:5;
886 __u32 ack_seq; /* Sequence number ACK'd */
887 union {
888 struct {
889#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
890 /* There is space for up to 24 bytes */
891 __u32 is_app_limited:1, /* cwnd not fully used? */
892 delivered_ce:20,
893 unused:11;
894 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
895 __u32 delivered;
896 /* start of send pipeline phase */
897 u64 first_tx_mstamp;
898 /* when we reached the "delivered" count */
899 u64 delivered_mstamp;
900 } tx; /* only used for outgoing skbs */
901 union {
902 struct inet_skb_parm h4;
903#if IS_ENABLED(CONFIG_IPV6)
904 struct inet6_skb_parm h6;
905#endif
906 } header; /* For incoming skbs */
907 };
908};
909
910#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
911
912extern const struct inet_connection_sock_af_ops ipv4_specific;
913
914#if IS_ENABLED(CONFIG_IPV6)
915/* This is the variant of inet6_iif() that must be used by TCP,
916 * as TCP moves IP6CB into a different location in skb->cb[]
917 */
918static inline int tcp_v6_iif(const struct sk_buff *skb)
919{
920 return TCP_SKB_CB(skb)->header.h6.iif;
921}
922
923static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
924{
925 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
926
927 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
928}
929
930/* TCP_SKB_CB reference means this can not be used from early demux */
931static inline int tcp_v6_sdif(const struct sk_buff *skb)
932{
933#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
934 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
935 return TCP_SKB_CB(skb)->header.h6.iif;
936#endif
937 return 0;
938}
939
940extern const struct inet_connection_sock_af_ops ipv6_specific;
941
942INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
943INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
944void tcp_v6_early_demux(struct sk_buff *skb);
945
946#endif
947
948/* TCP_SKB_CB reference means this can not be used from early demux */
949static inline int tcp_v4_sdif(struct sk_buff *skb)
950{
951#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
952 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
953 return TCP_SKB_CB(skb)->header.h4.iif;
954#endif
955 return 0;
956}
957
958/* Due to TSO, an SKB can be composed of multiple actual
959 * packets. To keep these tracked properly, we use this.
960 */
961static inline int tcp_skb_pcount(const struct sk_buff *skb)
962{
963 return TCP_SKB_CB(skb)->tcp_gso_segs;
964}
965
966static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
967{
968 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
969}
970
971static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
972{
973 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
974}
975
976/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
977static inline int tcp_skb_mss(const struct sk_buff *skb)
978{
979 return TCP_SKB_CB(skb)->tcp_gso_size;
980}
981
982static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
983{
984 return likely(!TCP_SKB_CB(skb)->eor);
985}
986
987static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
988 const struct sk_buff *from)
989{
990 return likely(tcp_skb_can_collapse_to(to) &&
991 mptcp_skb_can_collapse(to, from) &&
992 skb_pure_zcopy_same(to, from));
993}
994
995/* Events passed to congestion control interface */
996enum tcp_ca_event {
997 CA_EVENT_TX_START, /* first transmit when no packets in flight */
998 CA_EVENT_CWND_RESTART, /* congestion window restart */
999 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
1000 CA_EVENT_LOSS, /* loss timeout */
1001 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
1002 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
1003};
1004
1005/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1006enum tcp_ca_ack_event_flags {
1007 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
1008 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
1009 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
1010};
1011
1012/*
1013 * Interface for adding new TCP congestion control handlers
1014 */
1015#define TCP_CA_NAME_MAX 16
1016#define TCP_CA_MAX 128
1017#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
1018
1019#define TCP_CA_UNSPEC 0
1020
1021/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1022#define TCP_CONG_NON_RESTRICTED 0x1
1023/* Requires ECN/ECT set on all packets */
1024#define TCP_CONG_NEEDS_ECN 0x2
1025#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1026
1027union tcp_cc_info;
1028
1029struct ack_sample {
1030 u32 pkts_acked;
1031 s32 rtt_us;
1032 u32 in_flight;
1033};
1034
1035/* A rate sample measures the number of (original/retransmitted) data
1036 * packets delivered "delivered" over an interval of time "interval_us".
1037 * The tcp_rate.c code fills in the rate sample, and congestion
1038 * control modules that define a cong_control function to run at the end
1039 * of ACK processing can optionally chose to consult this sample when
1040 * setting cwnd and pacing rate.
1041 * A sample is invalid if "delivered" or "interval_us" is negative.
1042 */
1043struct rate_sample {
1044 u64 prior_mstamp; /* starting timestamp for interval */
1045 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
1046 u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1047 s32 delivered; /* number of packets delivered over interval */
1048 s32 delivered_ce; /* number of packets delivered w/ CE marks*/
1049 long interval_us; /* time for tp->delivered to incr "delivered" */
1050 u32 snd_interval_us; /* snd interval for delivered packets */
1051 u32 rcv_interval_us; /* rcv interval for delivered packets */
1052 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
1053 int losses; /* number of packets marked lost upon ACK */
1054 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
1055 u32 prior_in_flight; /* in flight before this ACK */
1056 u32 last_end_seq; /* end_seq of most recently ACKed packet */
1057 bool is_app_limited; /* is sample from packet with bubble in pipe? */
1058 bool is_retrans; /* is sample from retransmission? */
1059 bool is_ack_delayed; /* is this (likely) a delayed ACK? */
1060};
1061
1062struct tcp_congestion_ops {
1063/* fast path fields are put first to fill one cache line */
1064
1065 /* return slow start threshold (required) */
1066 u32 (*ssthresh)(struct sock *sk);
1067
1068 /* do new cwnd calculation (required) */
1069 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1070
1071 /* call before changing ca_state (optional) */
1072 void (*set_state)(struct sock *sk, u8 new_state);
1073
1074 /* call when cwnd event occurs (optional) */
1075 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1076
1077 /* call when ack arrives (optional) */
1078 void (*in_ack_event)(struct sock *sk, u32 flags);
1079
1080 /* hook for packet ack accounting (optional) */
1081 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1082
1083 /* override sysctl_tcp_min_tso_segs */
1084 u32 (*min_tso_segs)(struct sock *sk);
1085
1086 /* call when packets are delivered to update cwnd and pacing rate,
1087 * after all the ca_state processing. (optional)
1088 */
1089 void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1090
1091
1092 /* new value of cwnd after loss (required) */
1093 u32 (*undo_cwnd)(struct sock *sk);
1094 /* returns the multiplier used in tcp_sndbuf_expand (optional) */
1095 u32 (*sndbuf_expand)(struct sock *sk);
1096
1097/* control/slow paths put last */
1098 /* get info for inet_diag (optional) */
1099 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1100 union tcp_cc_info *info);
1101
1102 char name[TCP_CA_NAME_MAX];
1103 struct module *owner;
1104 struct list_head list;
1105 u32 key;
1106 u32 flags;
1107
1108 /* initialize private data (optional) */
1109 void (*init)(struct sock *sk);
1110 /* cleanup private data (optional) */
1111 void (*release)(struct sock *sk);
1112} ____cacheline_aligned_in_smp;
1113
1114int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1115void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1116int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1117 struct tcp_congestion_ops *old_type);
1118int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1119
1120void tcp_assign_congestion_control(struct sock *sk);
1121void tcp_init_congestion_control(struct sock *sk);
1122void tcp_cleanup_congestion_control(struct sock *sk);
1123int tcp_set_default_congestion_control(struct net *net, const char *name);
1124void tcp_get_default_congestion_control(struct net *net, char *name);
1125void tcp_get_available_congestion_control(char *buf, size_t len);
1126void tcp_get_allowed_congestion_control(char *buf, size_t len);
1127int tcp_set_allowed_congestion_control(char *allowed);
1128int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1129 bool cap_net_admin);
1130u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1131void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1132
1133u32 tcp_reno_ssthresh(struct sock *sk);
1134u32 tcp_reno_undo_cwnd(struct sock *sk);
1135void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1136extern struct tcp_congestion_ops tcp_reno;
1137
1138struct tcp_congestion_ops *tcp_ca_find(const char *name);
1139struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1140u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1141#ifdef CONFIG_INET
1142char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1143#else
1144static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1145{
1146 return NULL;
1147}
1148#endif
1149
1150static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1151{
1152 const struct inet_connection_sock *icsk = inet_csk(sk);
1153
1154 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1155}
1156
1157static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1158{
1159 const struct inet_connection_sock *icsk = inet_csk(sk);
1160
1161 if (icsk->icsk_ca_ops->cwnd_event)
1162 icsk->icsk_ca_ops->cwnd_event(sk, event);
1163}
1164
1165/* From tcp_cong.c */
1166void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1167
1168/* From tcp_rate.c */
1169void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1170void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1171 struct rate_sample *rs);
1172void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1173 bool is_sack_reneg, struct rate_sample *rs);
1174void tcp_rate_check_app_limited(struct sock *sk);
1175
1176static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1177{
1178 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1179}
1180
1181/* These functions determine how the current flow behaves in respect of SACK
1182 * handling. SACK is negotiated with the peer, and therefore it can vary
1183 * between different flows.
1184 *
1185 * tcp_is_sack - SACK enabled
1186 * tcp_is_reno - No SACK
1187 */
1188static inline int tcp_is_sack(const struct tcp_sock *tp)
1189{
1190 return likely(tp->rx_opt.sack_ok);
1191}
1192
1193static inline bool tcp_is_reno(const struct tcp_sock *tp)
1194{
1195 return !tcp_is_sack(tp);
1196}
1197
1198static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1199{
1200 return tp->sacked_out + tp->lost_out;
1201}
1202
1203/* This determines how many packets are "in the network" to the best
1204 * of our knowledge. In many cases it is conservative, but where
1205 * detailed information is available from the receiver (via SACK
1206 * blocks etc.) we can make more aggressive calculations.
1207 *
1208 * Use this for decisions involving congestion control, use just
1209 * tp->packets_out to determine if the send queue is empty or not.
1210 *
1211 * Read this equation as:
1212 *
1213 * "Packets sent once on transmission queue" MINUS
1214 * "Packets left network, but not honestly ACKed yet" PLUS
1215 * "Packets fast retransmitted"
1216 */
1217static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1218{
1219 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1220}
1221
1222#define TCP_INFINITE_SSTHRESH 0x7fffffff
1223
1224static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1225{
1226 return tp->snd_cwnd;
1227}
1228
1229static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1230{
1231 WARN_ON_ONCE((int)val <= 0);
1232 tp->snd_cwnd = val;
1233}
1234
1235static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1236{
1237 return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1238}
1239
1240static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1241{
1242 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1243}
1244
1245static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1246{
1247 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1248 (1 << inet_csk(sk)->icsk_ca_state);
1249}
1250
1251/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1252 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1253 * ssthresh.
1254 */
1255static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1256{
1257 const struct tcp_sock *tp = tcp_sk(sk);
1258
1259 if (tcp_in_cwnd_reduction(sk))
1260 return tp->snd_ssthresh;
1261 else
1262 return max(tp->snd_ssthresh,
1263 ((tcp_snd_cwnd(tp) >> 1) +
1264 (tcp_snd_cwnd(tp) >> 2)));
1265}
1266
1267/* Use define here intentionally to get WARN_ON location shown at the caller */
1268#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1269
1270void tcp_enter_cwr(struct sock *sk);
1271__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1272
1273/* The maximum number of MSS of available cwnd for which TSO defers
1274 * sending if not using sysctl_tcp_tso_win_divisor.
1275 */
1276static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1277{
1278 return 3;
1279}
1280
1281/* Returns end sequence number of the receiver's advertised window */
1282static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1283{
1284 return tp->snd_una + tp->snd_wnd;
1285}
1286
1287/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1288 * flexible approach. The RFC suggests cwnd should not be raised unless
1289 * it was fully used previously. And that's exactly what we do in
1290 * congestion avoidance mode. But in slow start we allow cwnd to grow
1291 * as long as the application has used half the cwnd.
1292 * Example :
1293 * cwnd is 10 (IW10), but application sends 9 frames.
1294 * We allow cwnd to reach 18 when all frames are ACKed.
1295 * This check is safe because it's as aggressive as slow start which already
1296 * risks 100% overshoot. The advantage is that we discourage application to
1297 * either send more filler packets or data to artificially blow up the cwnd
1298 * usage, and allow application-limited process to probe bw more aggressively.
1299 */
1300static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1301{
1302 const struct tcp_sock *tp = tcp_sk(sk);
1303
1304 if (tp->is_cwnd_limited)
1305 return true;
1306
1307 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1308 if (tcp_in_slow_start(tp))
1309 return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1310
1311 return false;
1312}
1313
1314/* BBR congestion control needs pacing.
1315 * Same remark for SO_MAX_PACING_RATE.
1316 * sch_fq packet scheduler is efficiently handling pacing,
1317 * but is not always installed/used.
1318 * Return true if TCP stack should pace packets itself.
1319 */
1320static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1321{
1322 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1323}
1324
1325/* Estimates in how many jiffies next packet for this flow can be sent.
1326 * Scheduling a retransmit timer too early would be silly.
1327 */
1328static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1329{
1330 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1331
1332 return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1333}
1334
1335static inline void tcp_reset_xmit_timer(struct sock *sk,
1336 const int what,
1337 unsigned long when,
1338 const unsigned long max_when)
1339{
1340 inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1341 max_when);
1342}
1343
1344/* Something is really bad, we could not queue an additional packet,
1345 * because qdisc is full or receiver sent a 0 window, or we are paced.
1346 * We do not want to add fuel to the fire, or abort too early,
1347 * so make sure the timer we arm now is at least 200ms in the future,
1348 * regardless of current icsk_rto value (as it could be ~2ms)
1349 */
1350static inline unsigned long tcp_probe0_base(const struct sock *sk)
1351{
1352 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1353}
1354
1355/* Variant of inet_csk_rto_backoff() used for zero window probes */
1356static inline unsigned long tcp_probe0_when(const struct sock *sk,
1357 unsigned long max_when)
1358{
1359 u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1360 inet_csk(sk)->icsk_backoff);
1361 u64 when = (u64)tcp_probe0_base(sk) << backoff;
1362
1363 return (unsigned long)min_t(u64, when, max_when);
1364}
1365
1366static inline void tcp_check_probe_timer(struct sock *sk)
1367{
1368 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1369 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1370 tcp_probe0_base(sk), TCP_RTO_MAX);
1371}
1372
1373static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1374{
1375 tp->snd_wl1 = seq;
1376}
1377
1378static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1379{
1380 tp->snd_wl1 = seq;
1381}
1382
1383/*
1384 * Calculate(/check) TCP checksum
1385 */
1386static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1387 __be32 daddr, __wsum base)
1388{
1389 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1390}
1391
1392static inline bool tcp_checksum_complete(struct sk_buff *skb)
1393{
1394 return !skb_csum_unnecessary(skb) &&
1395 __skb_checksum_complete(skb);
1396}
1397
1398bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1399 enum skb_drop_reason *reason);
1400
1401
1402int tcp_filter(struct sock *sk, struct sk_buff *skb);
1403void tcp_set_state(struct sock *sk, int state);
1404void tcp_done(struct sock *sk);
1405int tcp_abort(struct sock *sk, int err);
1406
1407static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1408{
1409 rx_opt->dsack = 0;
1410 rx_opt->num_sacks = 0;
1411}
1412
1413void tcp_cwnd_restart(struct sock *sk, s32 delta);
1414
1415static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1416{
1417 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1418 struct tcp_sock *tp = tcp_sk(sk);
1419 s32 delta;
1420
1421 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1422 tp->packets_out || ca_ops->cong_control)
1423 return;
1424 delta = tcp_jiffies32 - tp->lsndtime;
1425 if (delta > inet_csk(sk)->icsk_rto)
1426 tcp_cwnd_restart(sk, delta);
1427}
1428
1429/* Determine a window scaling and initial window to offer. */
1430void tcp_select_initial_window(const struct sock *sk, int __space,
1431 __u32 mss, __u32 *rcv_wnd,
1432 __u32 *window_clamp, int wscale_ok,
1433 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1434
1435static inline int tcp_win_from_space(const struct sock *sk, int space)
1436{
1437 int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
1438
1439 return tcp_adv_win_scale <= 0 ?
1440 (space>>(-tcp_adv_win_scale)) :
1441 space - (space>>tcp_adv_win_scale);
1442}
1443
1444/* Note: caller must be prepared to deal with negative returns */
1445static inline int tcp_space(const struct sock *sk)
1446{
1447 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1448 READ_ONCE(sk->sk_backlog.len) -
1449 atomic_read(&sk->sk_rmem_alloc));
1450}
1451
1452static inline int tcp_full_space(const struct sock *sk)
1453{
1454 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1455}
1456
1457static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1458{
1459 int unused_mem = sk_unused_reserved_mem(sk);
1460 struct tcp_sock *tp = tcp_sk(sk);
1461
1462 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
1463 if (unused_mem)
1464 tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1465 tcp_win_from_space(sk, unused_mem));
1466}
1467
1468void tcp_cleanup_rbuf(struct sock *sk, int copied);
1469void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1470
1471
1472/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1473 * If 87.5 % (7/8) of the space has been consumed, we want to override
1474 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1475 * len/truesize ratio.
1476 */
1477static inline bool tcp_rmem_pressure(const struct sock *sk)
1478{
1479 int rcvbuf, threshold;
1480
1481 if (tcp_under_memory_pressure(sk))
1482 return true;
1483
1484 rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1485 threshold = rcvbuf - (rcvbuf >> 3);
1486
1487 return atomic_read(&sk->sk_rmem_alloc) > threshold;
1488}
1489
1490static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1491{
1492 const struct tcp_sock *tp = tcp_sk(sk);
1493 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1494
1495 if (avail <= 0)
1496 return false;
1497
1498 return (avail >= target) || tcp_rmem_pressure(sk) ||
1499 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1500}
1501
1502extern void tcp_openreq_init_rwin(struct request_sock *req,
1503 const struct sock *sk_listener,
1504 const struct dst_entry *dst);
1505
1506void tcp_enter_memory_pressure(struct sock *sk);
1507void tcp_leave_memory_pressure(struct sock *sk);
1508
1509static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1510{
1511 struct net *net = sock_net((struct sock *)tp);
1512
1513 return tp->keepalive_intvl ? :
1514 READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1515}
1516
1517static inline int keepalive_time_when(const struct tcp_sock *tp)
1518{
1519 struct net *net = sock_net((struct sock *)tp);
1520
1521 return tp->keepalive_time ? :
1522 READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1523}
1524
1525static inline int keepalive_probes(const struct tcp_sock *tp)
1526{
1527 struct net *net = sock_net((struct sock *)tp);
1528
1529 return tp->keepalive_probes ? :
1530 READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1531}
1532
1533static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1534{
1535 const struct inet_connection_sock *icsk = &tp->inet_conn;
1536
1537 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1538 tcp_jiffies32 - tp->rcv_tstamp);
1539}
1540
1541static inline int tcp_fin_time(const struct sock *sk)
1542{
1543 int fin_timeout = tcp_sk(sk)->linger2 ? :
1544 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1545 const int rto = inet_csk(sk)->icsk_rto;
1546
1547 if (fin_timeout < (rto << 2) - (rto >> 1))
1548 fin_timeout = (rto << 2) - (rto >> 1);
1549
1550 return fin_timeout;
1551}
1552
1553static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1554 int paws_win)
1555{
1556 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1557 return true;
1558 if (unlikely(!time_before32(ktime_get_seconds(),
1559 rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
1560 return true;
1561 /*
1562 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1563 * then following tcp messages have valid values. Ignore 0 value,
1564 * or else 'negative' tsval might forbid us to accept their packets.
1565 */
1566 if (!rx_opt->ts_recent)
1567 return true;
1568 return false;
1569}
1570
1571static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1572 int rst)
1573{
1574 if (tcp_paws_check(rx_opt, 0))
1575 return false;
1576
1577 /* RST segments are not recommended to carry timestamp,
1578 and, if they do, it is recommended to ignore PAWS because
1579 "their cleanup function should take precedence over timestamps."
1580 Certainly, it is mistake. It is necessary to understand the reasons
1581 of this constraint to relax it: if peer reboots, clock may go
1582 out-of-sync and half-open connections will not be reset.
1583 Actually, the problem would be not existing if all
1584 the implementations followed draft about maintaining clock
1585 via reboots. Linux-2.2 DOES NOT!
1586
1587 However, we can relax time bounds for RST segments to MSL.
1588 */
1589 if (rst && !time_before32(ktime_get_seconds(),
1590 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1591 return false;
1592 return true;
1593}
1594
1595bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1596 int mib_idx, u32 *last_oow_ack_time);
1597
1598static inline void tcp_mib_init(struct net *net)
1599{
1600 /* See RFC 2012 */
1601 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1602 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1603 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1604 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1605}
1606
1607/* from STCP */
1608static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1609{
1610 tp->lost_skb_hint = NULL;
1611}
1612
1613static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1614{
1615 tcp_clear_retrans_hints_partial(tp);
1616 tp->retransmit_skb_hint = NULL;
1617}
1618
1619union tcp_md5_addr {
1620 struct in_addr a4;
1621#if IS_ENABLED(CONFIG_IPV6)
1622 struct in6_addr a6;
1623#endif
1624};
1625
1626/* - key database */
1627struct tcp_md5sig_key {
1628 struct hlist_node node;
1629 u8 keylen;
1630 u8 family; /* AF_INET or AF_INET6 */
1631 u8 prefixlen;
1632 u8 flags;
1633 union tcp_md5_addr addr;
1634 int l3index; /* set if key added with L3 scope */
1635 u8 key[TCP_MD5SIG_MAXKEYLEN];
1636 struct rcu_head rcu;
1637};
1638
1639/* - sock block */
1640struct tcp_md5sig_info {
1641 struct hlist_head head;
1642 struct rcu_head rcu;
1643};
1644
1645/* - pseudo header */
1646struct tcp4_pseudohdr {
1647 __be32 saddr;
1648 __be32 daddr;
1649 __u8 pad;
1650 __u8 protocol;
1651 __be16 len;
1652};
1653
1654struct tcp6_pseudohdr {
1655 struct in6_addr saddr;
1656 struct in6_addr daddr;
1657 __be32 len;
1658 __be32 protocol; /* including padding */
1659};
1660
1661union tcp_md5sum_block {
1662 struct tcp4_pseudohdr ip4;
1663#if IS_ENABLED(CONFIG_IPV6)
1664 struct tcp6_pseudohdr ip6;
1665#endif
1666};
1667
1668/* - pool: digest algorithm, hash description and scratch buffer */
1669struct tcp_md5sig_pool {
1670 struct ahash_request *md5_req;
1671 void *scratch;
1672};
1673
1674/* - functions */
1675int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1676 const struct sock *sk, const struct sk_buff *skb);
1677int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1678 int family, u8 prefixlen, int l3index, u8 flags,
1679 const u8 *newkey, u8 newkeylen);
1680int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1681 int family, u8 prefixlen, int l3index,
1682 struct tcp_md5sig_key *key);
1683
1684int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1685 int family, u8 prefixlen, int l3index, u8 flags);
1686struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1687 const struct sock *addr_sk);
1688
1689#ifdef CONFIG_TCP_MD5SIG
1690#include <linux/jump_label.h>
1691extern struct static_key_false_deferred tcp_md5_needed;
1692struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1693 const union tcp_md5_addr *addr,
1694 int family);
1695static inline struct tcp_md5sig_key *
1696tcp_md5_do_lookup(const struct sock *sk, int l3index,
1697 const union tcp_md5_addr *addr, int family)
1698{
1699 if (!static_branch_unlikely(&tcp_md5_needed.key))
1700 return NULL;
1701 return __tcp_md5_do_lookup(sk, l3index, addr, family);
1702}
1703
1704enum skb_drop_reason
1705tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1706 const void *saddr, const void *daddr,
1707 int family, int dif, int sdif);
1708
1709
1710#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1711#else
1712static inline struct tcp_md5sig_key *
1713tcp_md5_do_lookup(const struct sock *sk, int l3index,
1714 const union tcp_md5_addr *addr, int family)
1715{
1716 return NULL;
1717}
1718
1719static inline enum skb_drop_reason
1720tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1721 const void *saddr, const void *daddr,
1722 int family, int dif, int sdif)
1723{
1724 return SKB_NOT_DROPPED_YET;
1725}
1726#define tcp_twsk_md5_key(twsk) NULL
1727#endif
1728
1729bool tcp_alloc_md5sig_pool(void);
1730
1731struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1732static inline void tcp_put_md5sig_pool(void)
1733{
1734 local_bh_enable();
1735}
1736
1737int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1738 unsigned int header_len);
1739int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1740 const struct tcp_md5sig_key *key);
1741
1742/* From tcp_fastopen.c */
1743void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1744 struct tcp_fastopen_cookie *cookie);
1745void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1746 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1747 u16 try_exp);
1748struct tcp_fastopen_request {
1749 /* Fast Open cookie. Size 0 means a cookie request */
1750 struct tcp_fastopen_cookie cookie;
1751 struct msghdr *data; /* data in MSG_FASTOPEN */
1752 size_t size;
1753 int copied; /* queued in tcp_connect() */
1754 struct ubuf_info *uarg;
1755};
1756void tcp_free_fastopen_req(struct tcp_sock *tp);
1757void tcp_fastopen_destroy_cipher(struct sock *sk);
1758void tcp_fastopen_ctx_destroy(struct net *net);
1759int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1760 void *primary_key, void *backup_key);
1761int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1762 u64 *key);
1763void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1764struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1765 struct request_sock *req,
1766 struct tcp_fastopen_cookie *foc,
1767 const struct dst_entry *dst);
1768void tcp_fastopen_init_key_once(struct net *net);
1769bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1770 struct tcp_fastopen_cookie *cookie);
1771bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1772#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1773#define TCP_FASTOPEN_KEY_MAX 2
1774#define TCP_FASTOPEN_KEY_BUF_LENGTH \
1775 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1776
1777/* Fastopen key context */
1778struct tcp_fastopen_context {
1779 siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
1780 int num;
1781 struct rcu_head rcu;
1782};
1783
1784void tcp_fastopen_active_disable(struct sock *sk);
1785bool tcp_fastopen_active_should_disable(struct sock *sk);
1786void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1787void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1788
1789/* Caller needs to wrap with rcu_read_(un)lock() */
1790static inline
1791struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1792{
1793 struct tcp_fastopen_context *ctx;
1794
1795 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1796 if (!ctx)
1797 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1798 return ctx;
1799}
1800
1801static inline
1802bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1803 const struct tcp_fastopen_cookie *orig)
1804{
1805 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1806 orig->len == foc->len &&
1807 !memcmp(orig->val, foc->val, foc->len))
1808 return true;
1809 return false;
1810}
1811
1812static inline
1813int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1814{
1815 return ctx->num;
1816}
1817
1818/* Latencies incurred by various limits for a sender. They are
1819 * chronograph-like stats that are mutually exclusive.
1820 */
1821enum tcp_chrono {
1822 TCP_CHRONO_UNSPEC,
1823 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1824 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1825 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1826 __TCP_CHRONO_MAX,
1827};
1828
1829void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1830void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1831
1832/* This helper is needed, because skb->tcp_tsorted_anchor uses
1833 * the same memory storage than skb->destructor/_skb_refdst
1834 */
1835static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1836{
1837 skb->destructor = NULL;
1838 skb->_skb_refdst = 0UL;
1839}
1840
1841#define tcp_skb_tsorted_save(skb) { \
1842 unsigned long _save = skb->_skb_refdst; \
1843 skb->_skb_refdst = 0UL;
1844
1845#define tcp_skb_tsorted_restore(skb) \
1846 skb->_skb_refdst = _save; \
1847}
1848
1849void tcp_write_queue_purge(struct sock *sk);
1850
1851static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1852{
1853 return skb_rb_first(&sk->tcp_rtx_queue);
1854}
1855
1856static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1857{
1858 return skb_rb_last(&sk->tcp_rtx_queue);
1859}
1860
1861static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1862{
1863 return skb_peek_tail(&sk->sk_write_queue);
1864}
1865
1866#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1867 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1868
1869static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1870{
1871 return skb_peek(&sk->sk_write_queue);
1872}
1873
1874static inline bool tcp_skb_is_last(const struct sock *sk,
1875 const struct sk_buff *skb)
1876{
1877 return skb_queue_is_last(&sk->sk_write_queue, skb);
1878}
1879
1880/**
1881 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
1882 * @sk: socket
1883 *
1884 * Since the write queue can have a temporary empty skb in it,
1885 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
1886 */
1887static inline bool tcp_write_queue_empty(const struct sock *sk)
1888{
1889 const struct tcp_sock *tp = tcp_sk(sk);
1890
1891 return tp->write_seq == tp->snd_nxt;
1892}
1893
1894static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1895{
1896 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1897}
1898
1899static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1900{
1901 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1902}
1903
1904static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1905{
1906 __skb_queue_tail(&sk->sk_write_queue, skb);
1907
1908 /* Queue it, remembering where we must start sending. */
1909 if (sk->sk_write_queue.next == skb)
1910 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1911}
1912
1913/* Insert new before skb on the write queue of sk. */
1914static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1915 struct sk_buff *skb,
1916 struct sock *sk)
1917{
1918 __skb_queue_before(&sk->sk_write_queue, skb, new);
1919}
1920
1921static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1922{
1923 tcp_skb_tsorted_anchor_cleanup(skb);
1924 __skb_unlink(skb, &sk->sk_write_queue);
1925}
1926
1927void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1928
1929static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1930{
1931 tcp_skb_tsorted_anchor_cleanup(skb);
1932 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1933}
1934
1935static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1936{
1937 list_del(&skb->tcp_tsorted_anchor);
1938 tcp_rtx_queue_unlink(skb, sk);
1939 tcp_wmem_free_skb(sk, skb);
1940}
1941
1942static inline void tcp_push_pending_frames(struct sock *sk)
1943{
1944 if (tcp_send_head(sk)) {
1945 struct tcp_sock *tp = tcp_sk(sk);
1946
1947 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1948 }
1949}
1950
1951/* Start sequence of the skb just after the highest skb with SACKed
1952 * bit, valid only if sacked_out > 0 or when the caller has ensured
1953 * validity by itself.
1954 */
1955static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1956{
1957 if (!tp->sacked_out)
1958 return tp->snd_una;
1959
1960 if (tp->highest_sack == NULL)
1961 return tp->snd_nxt;
1962
1963 return TCP_SKB_CB(tp->highest_sack)->seq;
1964}
1965
1966static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1967{
1968 tcp_sk(sk)->highest_sack = skb_rb_next(skb);
1969}
1970
1971static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1972{
1973 return tcp_sk(sk)->highest_sack;
1974}
1975
1976static inline void tcp_highest_sack_reset(struct sock *sk)
1977{
1978 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
1979}
1980
1981/* Called when old skb is about to be deleted and replaced by new skb */
1982static inline void tcp_highest_sack_replace(struct sock *sk,
1983 struct sk_buff *old,
1984 struct sk_buff *new)
1985{
1986 if (old == tcp_highest_sack(sk))
1987 tcp_sk(sk)->highest_sack = new;
1988}
1989
1990/* This helper checks if socket has IP_TRANSPARENT set */
1991static inline bool inet_sk_transparent(const struct sock *sk)
1992{
1993 switch (sk->sk_state) {
1994 case TCP_TIME_WAIT:
1995 return inet_twsk(sk)->tw_transparent;
1996 case TCP_NEW_SYN_RECV:
1997 return inet_rsk(inet_reqsk(sk))->no_srccheck;
1998 }
1999 return inet_sk(sk)->transparent;
2000}
2001
2002/* Determines whether this is a thin stream (which may suffer from
2003 * increased latency). Used to trigger latency-reducing mechanisms.
2004 */
2005static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2006{
2007 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2008}
2009
2010/* /proc */
2011enum tcp_seq_states {
2012 TCP_SEQ_STATE_LISTENING,
2013 TCP_SEQ_STATE_ESTABLISHED,
2014};
2015
2016void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2017void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2018void tcp_seq_stop(struct seq_file *seq, void *v);
2019
2020struct tcp_seq_afinfo {
2021 sa_family_t family;
2022};
2023
2024struct tcp_iter_state {
2025 struct seq_net_private p;
2026 enum tcp_seq_states state;
2027 struct sock *syn_wait_sk;
2028 int bucket, offset, sbucket, num;
2029 loff_t last_pos;
2030};
2031
2032extern struct request_sock_ops tcp_request_sock_ops;
2033extern struct request_sock_ops tcp6_request_sock_ops;
2034
2035void tcp_v4_destroy_sock(struct sock *sk);
2036
2037struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2038 netdev_features_t features);
2039struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
2040INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2041INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2042INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
2043INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
2044void tcp_gro_complete(struct sk_buff *skb);
2045
2046void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
2047
2048static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2049{
2050 struct net *net = sock_net((struct sock *)tp);
2051 return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2052}
2053
2054bool tcp_stream_memory_free(const struct sock *sk, int wake);
2055
2056#ifdef CONFIG_PROC_FS
2057int tcp4_proc_init(void);
2058void tcp4_proc_exit(void);
2059#endif
2060
2061int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2062int tcp_conn_request(struct request_sock_ops *rsk_ops,
2063 const struct tcp_request_sock_ops *af_ops,
2064 struct sock *sk, struct sk_buff *skb);
2065
2066/* TCP af-specific functions */
2067struct tcp_sock_af_ops {
2068#ifdef CONFIG_TCP_MD5SIG
2069 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2070 const struct sock *addr_sk);
2071 int (*calc_md5_hash)(char *location,
2072 const struct tcp_md5sig_key *md5,
2073 const struct sock *sk,
2074 const struct sk_buff *skb);
2075 int (*md5_parse)(struct sock *sk,
2076 int optname,
2077 sockptr_t optval,
2078 int optlen);
2079#endif
2080};
2081
2082struct tcp_request_sock_ops {
2083 u16 mss_clamp;
2084#ifdef CONFIG_TCP_MD5SIG
2085 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2086 const struct sock *addr_sk);
2087 int (*calc_md5_hash) (char *location,
2088 const struct tcp_md5sig_key *md5,
2089 const struct sock *sk,
2090 const struct sk_buff *skb);
2091#endif
2092#ifdef CONFIG_SYN_COOKIES
2093 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
2094 __u16 *mss);
2095#endif
2096 struct dst_entry *(*route_req)(const struct sock *sk,
2097 struct sk_buff *skb,
2098 struct flowi *fl,
2099 struct request_sock *req);
2100 u32 (*init_seq)(const struct sk_buff *skb);
2101 u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2102 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2103 struct flowi *fl, struct request_sock *req,
2104 struct tcp_fastopen_cookie *foc,
2105 enum tcp_synack_type synack_type,
2106 struct sk_buff *syn_skb);
2107};
2108
2109extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2110#if IS_ENABLED(CONFIG_IPV6)
2111extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2112#endif
2113
2114#ifdef CONFIG_SYN_COOKIES
2115static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2116 const struct sock *sk, struct sk_buff *skb,
2117 __u16 *mss)
2118{
2119 tcp_synq_overflow(sk);
2120 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2121 return ops->cookie_init_seq(skb, mss);
2122}
2123#else
2124static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2125 const struct sock *sk, struct sk_buff *skb,
2126 __u16 *mss)
2127{
2128 return 0;
2129}
2130#endif
2131
2132int tcpv4_offload_init(void);
2133
2134void tcp_v4_init(void);
2135void tcp_init(void);
2136
2137/* tcp_recovery.c */
2138void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2139void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2140extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2141 u32 reo_wnd);
2142extern bool tcp_rack_mark_lost(struct sock *sk);
2143extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2144 u64 xmit_time);
2145extern void tcp_rack_reo_timeout(struct sock *sk);
2146extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2147
2148/* tcp_plb.c */
2149
2150/*
2151 * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2152 * expects cong_ratio which represents fraction of traffic that experienced
2153 * congestion over a single RTT. In order to avoid floating point operations,
2154 * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2155 */
2156#define TCP_PLB_SCALE 8
2157
2158/* State for PLB (Protective Load Balancing) for a single TCP connection. */
2159struct tcp_plb_state {
2160 u8 consec_cong_rounds:5, /* consecutive congested rounds */
2161 unused:3;
2162 u32 pause_until; /* jiffies32 when PLB can resume rerouting */
2163};
2164
2165static inline void tcp_plb_init(const struct sock *sk,
2166 struct tcp_plb_state *plb)
2167{
2168 plb->consec_cong_rounds = 0;
2169 plb->pause_until = 0;
2170}
2171void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2172 const int cong_ratio);
2173void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2174void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2175
2176/* At how many usecs into the future should the RTO fire? */
2177static inline s64 tcp_rto_delta_us(const struct sock *sk)
2178{
2179 const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2180 u32 rto = inet_csk(sk)->icsk_rto;
2181 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2182
2183 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2184}
2185
2186/*
2187 * Save and compile IPv4 options, return a pointer to it
2188 */
2189static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2190 struct sk_buff *skb)
2191{
2192 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2193 struct ip_options_rcu *dopt = NULL;
2194
2195 if (opt->optlen) {
2196 int opt_size = sizeof(*dopt) + opt->optlen;
2197
2198 dopt = kmalloc(opt_size, GFP_ATOMIC);
2199 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2200 kfree(dopt);
2201 dopt = NULL;
2202 }
2203 }
2204 return dopt;
2205}
2206
2207/* locally generated TCP pure ACKs have skb->truesize == 2
2208 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2209 * This is much faster than dissecting the packet to find out.
2210 * (Think of GRE encapsulations, IPv4, IPv6, ...)
2211 */
2212static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2213{
2214 return skb->truesize == 2;
2215}
2216
2217static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2218{
2219 skb->truesize = 2;
2220}
2221
2222static inline int tcp_inq(struct sock *sk)
2223{
2224 struct tcp_sock *tp = tcp_sk(sk);
2225 int answ;
2226
2227 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2228 answ = 0;
2229 } else if (sock_flag(sk, SOCK_URGINLINE) ||
2230 !tp->urg_data ||
2231 before(tp->urg_seq, tp->copied_seq) ||
2232 !before(tp->urg_seq, tp->rcv_nxt)) {
2233
2234 answ = tp->rcv_nxt - tp->copied_seq;
2235
2236 /* Subtract 1, if FIN was received */
2237 if (answ && sock_flag(sk, SOCK_DONE))
2238 answ--;
2239 } else {
2240 answ = tp->urg_seq - tp->copied_seq;
2241 }
2242
2243 return answ;
2244}
2245
2246int tcp_peek_len(struct socket *sock);
2247
2248static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2249{
2250 u16 segs_in;
2251
2252 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2253
2254 /* We update these fields while other threads might
2255 * read them from tcp_get_info()
2256 */
2257 WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2258 if (skb->len > tcp_hdrlen(skb))
2259 WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2260}
2261
2262/*
2263 * TCP listen path runs lockless.
2264 * We forced "struct sock" to be const qualified to make sure
2265 * we don't modify one of its field by mistake.
2266 * Here, we increment sk_drops which is an atomic_t, so we can safely
2267 * make sock writable again.
2268 */
2269static inline void tcp_listendrop(const struct sock *sk)
2270{
2271 atomic_inc(&((struct sock *)sk)->sk_drops);
2272 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2273}
2274
2275enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2276
2277/*
2278 * Interface for adding Upper Level Protocols over TCP
2279 */
2280
2281#define TCP_ULP_NAME_MAX 16
2282#define TCP_ULP_MAX 128
2283#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2284
2285struct tcp_ulp_ops {
2286 struct list_head list;
2287
2288 /* initialize ulp */
2289 int (*init)(struct sock *sk);
2290 /* update ulp */
2291 void (*update)(struct sock *sk, struct proto *p,
2292 void (*write_space)(struct sock *sk));
2293 /* cleanup ulp */
2294 void (*release)(struct sock *sk);
2295 /* diagnostic */
2296 int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2297 size_t (*get_info_size)(const struct sock *sk);
2298 /* clone ulp */
2299 void (*clone)(const struct request_sock *req, struct sock *newsk,
2300 const gfp_t priority);
2301
2302 char name[TCP_ULP_NAME_MAX];
2303 struct module *owner;
2304};
2305int tcp_register_ulp(struct tcp_ulp_ops *type);
2306void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2307int tcp_set_ulp(struct sock *sk, const char *name);
2308void tcp_get_available_ulp(char *buf, size_t len);
2309void tcp_cleanup_ulp(struct sock *sk);
2310void tcp_update_ulp(struct sock *sk, struct proto *p,
2311 void (*write_space)(struct sock *sk));
2312
2313#define MODULE_ALIAS_TCP_ULP(name) \
2314 __MODULE_INFO(alias, alias_userspace, name); \
2315 __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2316
2317#ifdef CONFIG_NET_SOCK_MSG
2318struct sk_msg;
2319struct sk_psock;
2320
2321#ifdef CONFIG_BPF_SYSCALL
2322struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
2323int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2324void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2325#endif /* CONFIG_BPF_SYSCALL */
2326
2327#ifdef CONFIG_INET
2328void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2329#else
2330static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2331{
2332}
2333#endif
2334
2335int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2336 struct sk_msg *msg, u32 bytes, int flags);
2337#endif /* CONFIG_NET_SOCK_MSG */
2338
2339#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2340static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2341{
2342}
2343#endif
2344
2345#ifdef CONFIG_CGROUP_BPF
2346static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2347 struct sk_buff *skb,
2348 unsigned int end_offset)
2349{
2350 skops->skb = skb;
2351 skops->skb_data_end = skb->data + end_offset;
2352}
2353#else
2354static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2355 struct sk_buff *skb,
2356 unsigned int end_offset)
2357{
2358}
2359#endif
2360
2361/* Call BPF_SOCK_OPS program that returns an int. If the return value
2362 * is < 0, then the BPF op failed (for example if the loaded BPF
2363 * program does not support the chosen operation or there is no BPF
2364 * program loaded).
2365 */
2366#ifdef CONFIG_BPF
2367static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2368{
2369 struct bpf_sock_ops_kern sock_ops;
2370 int ret;
2371
2372 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2373 if (sk_fullsock(sk)) {
2374 sock_ops.is_fullsock = 1;
2375 sock_owned_by_me(sk);
2376 }
2377
2378 sock_ops.sk = sk;
2379 sock_ops.op = op;
2380 if (nargs > 0)
2381 memcpy(sock_ops.args, args, nargs * sizeof(*args));
2382
2383 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2384 if (ret == 0)
2385 ret = sock_ops.reply;
2386 else
2387 ret = -1;
2388 return ret;
2389}
2390
2391static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2392{
2393 u32 args[2] = {arg1, arg2};
2394
2395 return tcp_call_bpf(sk, op, 2, args);
2396}
2397
2398static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2399 u32 arg3)
2400{
2401 u32 args[3] = {arg1, arg2, arg3};
2402
2403 return tcp_call_bpf(sk, op, 3, args);
2404}
2405
2406#else
2407static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2408{
2409 return -EPERM;
2410}
2411
2412static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2413{
2414 return -EPERM;
2415}
2416
2417static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2418 u32 arg3)
2419{
2420 return -EPERM;
2421}
2422
2423#endif
2424
2425static inline u32 tcp_timeout_init(struct sock *sk)
2426{
2427 int timeout;
2428
2429 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2430
2431 if (timeout <= 0)
2432 timeout = TCP_TIMEOUT_INIT;
2433 return min_t(int, timeout, TCP_RTO_MAX);
2434}
2435
2436static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2437{
2438 int rwnd;
2439
2440 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2441
2442 if (rwnd < 0)
2443 rwnd = 0;
2444 return rwnd;
2445}
2446
2447static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2448{
2449 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2450}
2451
2452static inline void tcp_bpf_rtt(struct sock *sk)
2453{
2454 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2455 tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2456}
2457
2458#if IS_ENABLED(CONFIG_SMC)
2459extern struct static_key_false tcp_have_smc;
2460#endif
2461
2462#if IS_ENABLED(CONFIG_TLS_DEVICE)
2463void clean_acked_data_enable(struct inet_connection_sock *icsk,
2464 void (*cad)(struct sock *sk, u32 ack_seq));
2465void clean_acked_data_disable(struct inet_connection_sock *icsk);
2466void clean_acked_data_flush(void);
2467#endif
2468
2469DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2470static inline void tcp_add_tx_delay(struct sk_buff *skb,
2471 const struct tcp_sock *tp)
2472{
2473 if (static_branch_unlikely(&tcp_tx_delay_enabled))
2474 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2475}
2476
2477/* Compute Earliest Departure Time for some control packets
2478 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2479 */
2480static inline u64 tcp_transmit_time(const struct sock *sk)
2481{
2482 if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2483 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2484 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2485
2486 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2487 }
2488 return 0;
2489}
2490
2491#endif /* _TCP_H */