at v6.2 16 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the UDP module. 8 * 9 * Version: @(#)udp.h 1.0.2 05/07/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 * 14 * Fixes: 15 * Alan Cox : Turned on udp checksums. I don't want to 16 * chase 'memory corruption' bugs that aren't! 17 */ 18#ifndef _UDP_H 19#define _UDP_H 20 21#include <linux/list.h> 22#include <linux/bug.h> 23#include <net/inet_sock.h> 24#include <net/sock.h> 25#include <net/snmp.h> 26#include <net/ip.h> 27#include <linux/ipv6.h> 28#include <linux/seq_file.h> 29#include <linux/poll.h> 30#include <linux/indirect_call_wrapper.h> 31 32/** 33 * struct udp_skb_cb - UDP(-Lite) private variables 34 * 35 * @header: private variables used by IPv4/IPv6 36 * @cscov: checksum coverage length (UDP-Lite only) 37 * @partial_cov: if set indicates partial csum coverage 38 */ 39struct udp_skb_cb { 40 union { 41 struct inet_skb_parm h4; 42#if IS_ENABLED(CONFIG_IPV6) 43 struct inet6_skb_parm h6; 44#endif 45 } header; 46 __u16 cscov; 47 __u8 partial_cov; 48}; 49#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb)) 50 51/** 52 * struct udp_hslot - UDP hash slot 53 * 54 * @head: head of list of sockets 55 * @count: number of sockets in 'head' list 56 * @lock: spinlock protecting changes to head/count 57 */ 58struct udp_hslot { 59 struct hlist_head head; 60 int count; 61 spinlock_t lock; 62} __attribute__((aligned(2 * sizeof(long)))); 63 64/** 65 * struct udp_table - UDP table 66 * 67 * @hash: hash table, sockets are hashed on (local port) 68 * @hash2: hash table, sockets are hashed on (local port, local address) 69 * @mask: number of slots in hash tables, minus 1 70 * @log: log2(number of slots in hash table) 71 */ 72struct udp_table { 73 struct udp_hslot *hash; 74 struct udp_hslot *hash2; 75 unsigned int mask; 76 unsigned int log; 77}; 78extern struct udp_table udp_table; 79void udp_table_init(struct udp_table *, const char *); 80static inline struct udp_hslot *udp_hashslot(struct udp_table *table, 81 struct net *net, unsigned int num) 82{ 83 return &table->hash[udp_hashfn(net, num, table->mask)]; 84} 85/* 86 * For secondary hash, net_hash_mix() is performed before calling 87 * udp_hashslot2(), this explains difference with udp_hashslot() 88 */ 89static inline struct udp_hslot *udp_hashslot2(struct udp_table *table, 90 unsigned int hash) 91{ 92 return &table->hash2[hash & table->mask]; 93} 94 95extern struct proto udp_prot; 96 97extern atomic_long_t udp_memory_allocated; 98DECLARE_PER_CPU(int, udp_memory_per_cpu_fw_alloc); 99 100/* sysctl variables for udp */ 101extern long sysctl_udp_mem[3]; 102extern int sysctl_udp_rmem_min; 103extern int sysctl_udp_wmem_min; 104 105struct sk_buff; 106 107/* 108 * Generic checksumming routines for UDP(-Lite) v4 and v6 109 */ 110static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) 111{ 112 return (UDP_SKB_CB(skb)->cscov == skb->len ? 113 __skb_checksum_complete(skb) : 114 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov)); 115} 116 117static inline int udp_lib_checksum_complete(struct sk_buff *skb) 118{ 119 return !skb_csum_unnecessary(skb) && 120 __udp_lib_checksum_complete(skb); 121} 122 123/** 124 * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments 125 * @sk: socket we are writing to 126 * @skb: sk_buff containing the filled-in UDP header 127 * (checksum field must be zeroed out) 128 */ 129static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb) 130{ 131 __wsum csum = csum_partial(skb_transport_header(skb), 132 sizeof(struct udphdr), 0); 133 skb_queue_walk(&sk->sk_write_queue, skb) { 134 csum = csum_add(csum, skb->csum); 135 } 136 return csum; 137} 138 139static inline __wsum udp_csum(struct sk_buff *skb) 140{ 141 __wsum csum = csum_partial(skb_transport_header(skb), 142 sizeof(struct udphdr), skb->csum); 143 144 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { 145 csum = csum_add(csum, skb->csum); 146 } 147 return csum; 148} 149 150static inline __sum16 udp_v4_check(int len, __be32 saddr, 151 __be32 daddr, __wsum base) 152{ 153 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base); 154} 155 156void udp_set_csum(bool nocheck, struct sk_buff *skb, 157 __be32 saddr, __be32 daddr, int len); 158 159static inline void udp_csum_pull_header(struct sk_buff *skb) 160{ 161 if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE) 162 skb->csum = csum_partial(skb->data, sizeof(struct udphdr), 163 skb->csum); 164 skb_pull_rcsum(skb, sizeof(struct udphdr)); 165 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr); 166} 167 168typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport, 169 __be16 dport); 170 171void udp_v6_early_demux(struct sk_buff *skb); 172INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *)); 173 174struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, 175 netdev_features_t features, bool is_ipv6); 176 177static inline void udp_lib_init_sock(struct sock *sk) 178{ 179 struct udp_sock *up = udp_sk(sk); 180 181 skb_queue_head_init(&up->reader_queue); 182 up->forward_threshold = sk->sk_rcvbuf >> 2; 183 set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags); 184} 185 186/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ 187static inline int udp_lib_hash(struct sock *sk) 188{ 189 BUG(); 190 return 0; 191} 192 193void udp_lib_unhash(struct sock *sk); 194void udp_lib_rehash(struct sock *sk, u16 new_hash); 195 196static inline void udp_lib_close(struct sock *sk, long timeout) 197{ 198 sk_common_release(sk); 199} 200 201int udp_lib_get_port(struct sock *sk, unsigned short snum, 202 unsigned int hash2_nulladdr); 203 204u32 udp_flow_hashrnd(void); 205 206static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb, 207 int min, int max, bool use_eth) 208{ 209 u32 hash; 210 211 if (min >= max) { 212 /* Use default range */ 213 inet_get_local_port_range(net, &min, &max); 214 } 215 216 hash = skb_get_hash(skb); 217 if (unlikely(!hash)) { 218 if (use_eth) { 219 /* Can't find a normal hash, caller has indicated an 220 * Ethernet packet so use that to compute a hash. 221 */ 222 hash = jhash(skb->data, 2 * ETH_ALEN, 223 (__force u32) skb->protocol); 224 } else { 225 /* Can't derive any sort of hash for the packet, set 226 * to some consistent random value. 227 */ 228 hash = udp_flow_hashrnd(); 229 } 230 } 231 232 /* Since this is being sent on the wire obfuscate hash a bit 233 * to minimize possbility that any useful information to an 234 * attacker is leaked. Only upper 16 bits are relevant in the 235 * computation for 16 bit port value. 236 */ 237 hash ^= hash << 16; 238 239 return htons((((u64) hash * (max - min)) >> 32) + min); 240} 241 242static inline int udp_rqueue_get(struct sock *sk) 243{ 244 return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit); 245} 246 247static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if, 248 int dif, int sdif) 249{ 250#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 251 return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept), 252 bound_dev_if, dif, sdif); 253#else 254 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); 255#endif 256} 257 258/* net/ipv4/udp.c */ 259void udp_destruct_common(struct sock *sk); 260void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len); 261int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb); 262void udp_skb_destructor(struct sock *sk, struct sk_buff *skb); 263struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, int *off, 264 int *err); 265static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags, 266 int *err) 267{ 268 int off = 0; 269 270 return __skb_recv_udp(sk, flags, &off, err); 271} 272 273int udp_v4_early_demux(struct sk_buff *skb); 274bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); 275int udp_get_port(struct sock *sk, unsigned short snum, 276 int (*saddr_cmp)(const struct sock *, 277 const struct sock *)); 278int udp_err(struct sk_buff *, u32); 279int udp_abort(struct sock *sk, int err); 280int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 281int udp_push_pending_frames(struct sock *sk); 282void udp_flush_pending_frames(struct sock *sk); 283int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size); 284void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); 285int udp_rcv(struct sk_buff *skb); 286int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); 287int udp_init_sock(struct sock *sk); 288int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 289int __udp_disconnect(struct sock *sk, int flags); 290int udp_disconnect(struct sock *sk, int flags); 291__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait); 292struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, 293 netdev_features_t features, 294 bool is_ipv6); 295int udp_lib_getsockopt(struct sock *sk, int level, int optname, 296 char __user *optval, int __user *optlen); 297int udp_lib_setsockopt(struct sock *sk, int level, int optname, 298 sockptr_t optval, unsigned int optlen, 299 int (*push_pending_frames)(struct sock *)); 300struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 301 __be32 daddr, __be16 dport, int dif); 302struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 303 __be32 daddr, __be16 dport, int dif, int sdif, 304 struct udp_table *tbl, struct sk_buff *skb); 305struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb, 306 __be16 sport, __be16 dport); 307struct sock *udp6_lib_lookup(struct net *net, 308 const struct in6_addr *saddr, __be16 sport, 309 const struct in6_addr *daddr, __be16 dport, 310 int dif); 311struct sock *__udp6_lib_lookup(struct net *net, 312 const struct in6_addr *saddr, __be16 sport, 313 const struct in6_addr *daddr, __be16 dport, 314 int dif, int sdif, struct udp_table *tbl, 315 struct sk_buff *skb); 316struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, 317 __be16 sport, __be16 dport); 318int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor); 319 320/* UDP uses skb->dev_scratch to cache as much information as possible and avoid 321 * possibly multiple cache miss on dequeue() 322 */ 323struct udp_dev_scratch { 324 /* skb->truesize and the stateless bit are embedded in a single field; 325 * do not use a bitfield since the compiler emits better/smaller code 326 * this way 327 */ 328 u32 _tsize_state; 329 330#if BITS_PER_LONG == 64 331 /* len and the bit needed to compute skb_csum_unnecessary 332 * will be on cold cache lines at recvmsg time. 333 * skb->len can be stored on 16 bits since the udp header has been 334 * already validated and pulled. 335 */ 336 u16 len; 337 bool is_linear; 338 bool csum_unnecessary; 339#endif 340}; 341 342static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb) 343{ 344 return (struct udp_dev_scratch *)&skb->dev_scratch; 345} 346 347#if BITS_PER_LONG == 64 348static inline unsigned int udp_skb_len(struct sk_buff *skb) 349{ 350 return udp_skb_scratch(skb)->len; 351} 352 353static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb) 354{ 355 return udp_skb_scratch(skb)->csum_unnecessary; 356} 357 358static inline bool udp_skb_is_linear(struct sk_buff *skb) 359{ 360 return udp_skb_scratch(skb)->is_linear; 361} 362 363#else 364static inline unsigned int udp_skb_len(struct sk_buff *skb) 365{ 366 return skb->len; 367} 368 369static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb) 370{ 371 return skb_csum_unnecessary(skb); 372} 373 374static inline bool udp_skb_is_linear(struct sk_buff *skb) 375{ 376 return !skb_is_nonlinear(skb); 377} 378#endif 379 380static inline int copy_linear_skb(struct sk_buff *skb, int len, int off, 381 struct iov_iter *to) 382{ 383 int n; 384 385 n = copy_to_iter(skb->data + off, len, to); 386 if (n == len) 387 return 0; 388 389 iov_iter_revert(to, n); 390 return -EFAULT; 391} 392 393/* 394 * SNMP statistics for UDP and UDP-Lite 395 */ 396#define UDP_INC_STATS(net, field, is_udplite) do { \ 397 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ 398 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0) 399#define __UDP_INC_STATS(net, field, is_udplite) do { \ 400 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \ 401 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0) 402 403#define __UDP6_INC_STATS(net, field, is_udplite) do { \ 404 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\ 405 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \ 406} while(0) 407#define UDP6_INC_STATS(net, field, __lite) do { \ 408 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \ 409 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \ 410} while(0) 411 412#if IS_ENABLED(CONFIG_IPV6) 413#define __UDPX_MIB(sk, ipv4) \ 414({ \ 415 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \ 416 sock_net(sk)->mib.udp_statistics) : \ 417 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \ 418 sock_net(sk)->mib.udp_stats_in6); \ 419}) 420#else 421#define __UDPX_MIB(sk, ipv4) \ 422({ \ 423 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \ 424 sock_net(sk)->mib.udp_statistics; \ 425}) 426#endif 427 428#define __UDPX_INC_STATS(sk, field) \ 429 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field) 430 431#ifdef CONFIG_PROC_FS 432struct udp_seq_afinfo { 433 sa_family_t family; 434 struct udp_table *udp_table; 435}; 436 437struct udp_iter_state { 438 struct seq_net_private p; 439 int bucket; 440 struct udp_seq_afinfo *bpf_seq_afinfo; 441}; 442 443void *udp_seq_start(struct seq_file *seq, loff_t *pos); 444void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos); 445void udp_seq_stop(struct seq_file *seq, void *v); 446 447extern const struct seq_operations udp_seq_ops; 448extern const struct seq_operations udp6_seq_ops; 449 450int udp4_proc_init(void); 451void udp4_proc_exit(void); 452#endif /* CONFIG_PROC_FS */ 453 454int udpv4_offload_init(void); 455 456void udp_init(void); 457 458DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key); 459void udp_encap_enable(void); 460void udp_encap_disable(void); 461#if IS_ENABLED(CONFIG_IPV6) 462DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 463void udpv6_encap_enable(void); 464#endif 465 466static inline struct sk_buff *udp_rcv_segment(struct sock *sk, 467 struct sk_buff *skb, bool ipv4) 468{ 469 netdev_features_t features = NETIF_F_SG; 470 struct sk_buff *segs; 471 472 /* Avoid csum recalculation by skb_segment unless userspace explicitly 473 * asks for the final checksum values 474 */ 475 if (!inet_get_convert_csum(sk)) 476 features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 477 478 /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or 479 * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial 480 * packets in udp_gro_complete_segment. As does UDP GSO, verified by 481 * udp_send_skb. But when those packets are looped in dev_loopback_xmit 482 * their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY. 483 * Reset in this specific case, where PARTIAL is both correct and 484 * required. 485 */ 486 if (skb->pkt_type == PACKET_LOOPBACK) 487 skb->ip_summed = CHECKSUM_PARTIAL; 488 489 /* the GSO CB lays after the UDP one, no need to save and restore any 490 * CB fragment 491 */ 492 segs = __skb_gso_segment(skb, features, false); 493 if (IS_ERR_OR_NULL(segs)) { 494 int segs_nr = skb_shinfo(skb)->gso_segs; 495 496 atomic_add(segs_nr, &sk->sk_drops); 497 SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr); 498 kfree_skb(skb); 499 return NULL; 500 } 501 502 consume_skb(skb); 503 return segs; 504} 505 506static inline void udp_post_segment_fix_csum(struct sk_buff *skb) 507{ 508 /* UDP-lite can't land here - no GRO */ 509 WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov); 510 511 /* UDP packets generated with UDP_SEGMENT and traversing: 512 * 513 * UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx) 514 * 515 * can reach an UDP socket with CHECKSUM_NONE, because 516 * __iptunnel_pull_header() converts CHECKSUM_PARTIAL into NONE. 517 * SKB_GSO_UDP_L4 or SKB_GSO_FRAGLIST packets with no UDP tunnel will 518 * have a valid checksum, as the GRO engine validates the UDP csum 519 * before the aggregation and nobody strips such info in between. 520 * Instead of adding another check in the tunnel fastpath, we can force 521 * a valid csum after the segmentation. 522 * Additionally fixup the UDP CB. 523 */ 524 UDP_SKB_CB(skb)->cscov = skb->len; 525 if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid) 526 skb->csum_valid = 1; 527} 528 529#ifdef CONFIG_BPF_SYSCALL 530struct sk_psock; 531struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock); 532int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); 533#endif 534 535#endif /* _UDP_H */