Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the IP module.
8 *
9 * Version: @(#)ip.h 1.0.2 05/07/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Alan Cox, <gw4pts@gw4pts.ampr.org>
14 *
15 * Changes:
16 * Mike McLagan : Routing by source
17 */
18#ifndef _IP_H
19#define _IP_H
20
21#include <linux/types.h>
22#include <linux/ip.h>
23#include <linux/in.h>
24#include <linux/skbuff.h>
25#include <linux/jhash.h>
26#include <linux/sockptr.h>
27#include <linux/static_key.h>
28
29#include <net/inet_sock.h>
30#include <net/route.h>
31#include <net/snmp.h>
32#include <net/flow.h>
33#include <net/flow_dissector.h>
34#include <net/netns/hash.h>
35#include <net/lwtunnel.h>
36
37#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
38#define IPV4_MIN_MTU 68 /* RFC 791 */
39
40extern unsigned int sysctl_fib_sync_mem;
41extern unsigned int sysctl_fib_sync_mem_min;
42extern unsigned int sysctl_fib_sync_mem_max;
43
44struct sock;
45
46struct inet_skb_parm {
47 int iif;
48 struct ip_options opt; /* Compiled IP options */
49 u16 flags;
50
51#define IPSKB_FORWARDED BIT(0)
52#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
53#define IPSKB_XFRM_TRANSFORMED BIT(2)
54#define IPSKB_FRAG_COMPLETE BIT(3)
55#define IPSKB_REROUTED BIT(4)
56#define IPSKB_DOREDIRECT BIT(5)
57#define IPSKB_FRAG_PMTU BIT(6)
58#define IPSKB_L3SLAVE BIT(7)
59#define IPSKB_NOPOLICY BIT(8)
60
61 u16 frag_max_size;
62};
63
64static inline bool ipv4_l3mdev_skb(u16 flags)
65{
66 return !!(flags & IPSKB_L3SLAVE);
67}
68
69static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
70{
71 return ip_hdr(skb)->ihl * 4;
72}
73
74struct ipcm_cookie {
75 struct sockcm_cookie sockc;
76 __be32 addr;
77 int oif;
78 struct ip_options_rcu *opt;
79 __u8 ttl;
80 __s16 tos;
81 char priority;
82 __u16 gso_size;
83};
84
85static inline void ipcm_init(struct ipcm_cookie *ipcm)
86{
87 *ipcm = (struct ipcm_cookie) { .tos = -1 };
88}
89
90static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
91 const struct inet_sock *inet)
92{
93 ipcm_init(ipcm);
94
95 ipcm->sockc.mark = inet->sk.sk_mark;
96 ipcm->sockc.tsflags = inet->sk.sk_tsflags;
97 ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
98 ipcm->addr = inet->inet_saddr;
99}
100
101#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
102#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
103
104/* return enslaved device index if relevant */
105static inline int inet_sdif(const struct sk_buff *skb)
106{
107#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
108 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
109 return IPCB(skb)->iif;
110#endif
111 return 0;
112}
113
114/* Special input handler for packets caught by router alert option.
115 They are selected only by protocol field, and then processed likely
116 local ones; but only if someone wants them! Otherwise, router
117 not running rsvpd will kill RSVP.
118
119 It is user level problem, what it will make with them.
120 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
121 but receiver should be enough clever f.e. to forward mtrace requests,
122 sent to multicast group to reach destination designated router.
123 */
124
125struct ip_ra_chain {
126 struct ip_ra_chain __rcu *next;
127 struct sock *sk;
128 union {
129 void (*destructor)(struct sock *);
130 struct sock *saved_sk;
131 };
132 struct rcu_head rcu;
133};
134
135/* IP flags. */
136#define IP_CE 0x8000 /* Flag: "Congestion" */
137#define IP_DF 0x4000 /* Flag: "Don't Fragment" */
138#define IP_MF 0x2000 /* Flag: "More Fragments" */
139#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
140
141#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
142
143struct msghdr;
144struct net_device;
145struct packet_type;
146struct rtable;
147struct sockaddr;
148
149int igmp_mc_init(void);
150
151/*
152 * Functions provided by ip.c
153 */
154
155int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
156 __be32 saddr, __be32 daddr,
157 struct ip_options_rcu *opt, u8 tos);
158int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
159 struct net_device *orig_dev);
160void ip_list_rcv(struct list_head *head, struct packet_type *pt,
161 struct net_device *orig_dev);
162int ip_local_deliver(struct sk_buff *skb);
163void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
164int ip_mr_input(struct sk_buff *skb);
165int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
166int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
167int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
168 int (*output)(struct net *, struct sock *, struct sk_buff *));
169
170struct ip_fraglist_iter {
171 struct sk_buff *frag;
172 struct iphdr *iph;
173 int offset;
174 unsigned int hlen;
175};
176
177void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
178 unsigned int hlen, struct ip_fraglist_iter *iter);
179void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
180
181static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
182{
183 struct sk_buff *skb = iter->frag;
184
185 iter->frag = skb->next;
186 skb_mark_not_on_list(skb);
187
188 return skb;
189}
190
191struct ip_frag_state {
192 bool DF;
193 unsigned int hlen;
194 unsigned int ll_rs;
195 unsigned int mtu;
196 unsigned int left;
197 int offset;
198 int ptr;
199 __be16 not_last_frag;
200};
201
202void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
203 unsigned int mtu, bool DF, struct ip_frag_state *state);
204struct sk_buff *ip_frag_next(struct sk_buff *skb,
205 struct ip_frag_state *state);
206
207void ip_send_check(struct iphdr *ip);
208int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
209int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
210
211int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
212 __u8 tos);
213void ip_init(void);
214int ip_append_data(struct sock *sk, struct flowi4 *fl4,
215 int getfrag(void *from, char *to, int offset, int len,
216 int odd, struct sk_buff *skb),
217 void *from, int len, int protolen,
218 struct ipcm_cookie *ipc,
219 struct rtable **rt,
220 unsigned int flags);
221int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
222 struct sk_buff *skb);
223ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
224 int offset, size_t size, int flags);
225struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
226 struct sk_buff_head *queue,
227 struct inet_cork *cork);
228int ip_send_skb(struct net *net, struct sk_buff *skb);
229int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
230void ip_flush_pending_frames(struct sock *sk);
231struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
232 int getfrag(void *from, char *to, int offset,
233 int len, int odd, struct sk_buff *skb),
234 void *from, int length, int transhdrlen,
235 struct ipcm_cookie *ipc, struct rtable **rtp,
236 struct inet_cork *cork, unsigned int flags);
237
238int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
239
240static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
241{
242 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
243}
244
245static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
246{
247 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
248}
249
250static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
251{
252 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
253}
254
255/* datagram.c */
256int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
257int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
258
259void ip4_datagram_release_cb(struct sock *sk);
260
261struct ip_reply_arg {
262 struct kvec iov[1];
263 int flags;
264 __wsum csum;
265 int csumoffset; /* u16 offset of csum in iov[0].iov_base */
266 /* -1 if not needed */
267 int bound_dev_if;
268 u8 tos;
269 kuid_t uid;
270};
271
272#define IP_REPLY_ARG_NOSRCCHECK 1
273
274static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
275{
276 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
277}
278
279void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
280 const struct ip_options *sopt,
281 __be32 daddr, __be32 saddr,
282 const struct ip_reply_arg *arg,
283 unsigned int len, u64 transmit_time);
284
285#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
286#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
287#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
288#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
289#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
290#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
291#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
292#define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
293#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
294#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
295
296static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
297{
298 return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
299}
300
301unsigned long snmp_fold_field(void __percpu *mib, int offt);
302#if BITS_PER_LONG==32
303u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
304 size_t syncp_offset);
305u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
306#else
307static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
308 size_t syncp_offset)
309{
310 return snmp_get_cpu_field(mib, cpu, offct);
311
312}
313
314static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
315{
316 return snmp_fold_field(mib, offt);
317}
318#endif
319
320#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
321{ \
322 int i, c; \
323 for_each_possible_cpu(c) { \
324 for (i = 0; stats_list[i].name; i++) \
325 buff64[i] += snmp_get_cpu_field64( \
326 mib_statistic, \
327 c, stats_list[i].entry, \
328 offset); \
329 } \
330}
331
332#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
333{ \
334 int i, c; \
335 for_each_possible_cpu(c) { \
336 for (i = 0; stats_list[i].name; i++) \
337 buff[i] += snmp_get_cpu_field( \
338 mib_statistic, \
339 c, stats_list[i].entry); \
340 } \
341}
342
343void inet_get_local_port_range(const struct net *net, int *low, int *high);
344void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
345
346#ifdef CONFIG_SYSCTL
347static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
348{
349 if (!net->ipv4.sysctl_local_reserved_ports)
350 return false;
351 return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
352}
353
354static inline bool sysctl_dev_name_is_allowed(const char *name)
355{
356 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
357}
358
359static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
360{
361 return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
362}
363
364#else
365static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
366{
367 return false;
368}
369
370static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
371{
372 return port < PROT_SOCK;
373}
374#endif
375
376__be32 inet_current_timestamp(void);
377
378/* From inetpeer.c */
379extern int inet_peer_threshold;
380extern int inet_peer_minttl;
381extern int inet_peer_maxttl;
382
383void ipfrag_init(void);
384
385void ip_static_sysctl_init(void);
386
387#define IP4_REPLY_MARK(net, mark) \
388 (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
389
390static inline bool ip_is_fragment(const struct iphdr *iph)
391{
392 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
393}
394
395#ifdef CONFIG_INET
396#include <net/dst.h>
397
398/* The function in 2.2 was invalid, producing wrong result for
399 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
400static inline
401int ip_decrease_ttl(struct iphdr *iph)
402{
403 u32 check = (__force u32)iph->check;
404 check += (__force u32)htons(0x0100);
405 iph->check = (__force __sum16)(check + (check>=0xFFFF));
406 return --iph->ttl;
407}
408
409static inline int ip_mtu_locked(const struct dst_entry *dst)
410{
411 const struct rtable *rt = (const struct rtable *)dst;
412
413 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
414}
415
416static inline
417int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
418{
419 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
420
421 return pmtudisc == IP_PMTUDISC_DO ||
422 (pmtudisc == IP_PMTUDISC_WANT &&
423 !ip_mtu_locked(dst));
424}
425
426static inline bool ip_sk_accept_pmtu(const struct sock *sk)
427{
428 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
429 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
430}
431
432static inline bool ip_sk_use_pmtu(const struct sock *sk)
433{
434 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
435}
436
437static inline bool ip_sk_ignore_df(const struct sock *sk)
438{
439 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
440 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
441}
442
443static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
444 bool forwarding)
445{
446 const struct rtable *rt = container_of(dst, struct rtable, dst);
447 struct net *net = dev_net(dst->dev);
448 unsigned int mtu;
449
450 if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
451 ip_mtu_locked(dst) ||
452 !forwarding) {
453 mtu = rt->rt_pmtu;
454 if (mtu && time_before(jiffies, rt->dst.expires))
455 goto out;
456 }
457
458 /* 'forwarding = true' case should always honour route mtu */
459 mtu = dst_metric_raw(dst, RTAX_MTU);
460 if (mtu)
461 goto out;
462
463 mtu = READ_ONCE(dst->dev->mtu);
464
465 if (unlikely(ip_mtu_locked(dst))) {
466 if (rt->rt_uses_gateway && mtu > 576)
467 mtu = 576;
468 }
469
470out:
471 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
472
473 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
474}
475
476static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
477 const struct sk_buff *skb)
478{
479 unsigned int mtu;
480
481 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
482 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
483
484 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
485 }
486
487 mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
488 return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
489}
490
491struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
492 int fc_mx_len,
493 struct netlink_ext_ack *extack);
494static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
495{
496 if (fib_metrics != &dst_default_metrics &&
497 refcount_dec_and_test(&fib_metrics->refcnt))
498 kfree(fib_metrics);
499}
500
501/* ipv4 and ipv6 both use refcounted metrics if it is not the default */
502static inline
503void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
504{
505 dst_init_metrics(dst, fib_metrics->metrics, true);
506
507 if (fib_metrics != &dst_default_metrics) {
508 dst->_metrics |= DST_METRICS_REFCOUNTED;
509 refcount_inc(&fib_metrics->refcnt);
510 }
511}
512
513static inline
514void ip_dst_metrics_put(struct dst_entry *dst)
515{
516 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
517
518 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
519 kfree(p);
520}
521
522void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
523
524static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
525 struct sock *sk, int segs)
526{
527 struct iphdr *iph = ip_hdr(skb);
528
529 /* We had many attacks based on IPID, use the private
530 * generator as much as we can.
531 */
532 if (sk && inet_sk(sk)->inet_daddr) {
533 iph->id = htons(inet_sk(sk)->inet_id);
534 inet_sk(sk)->inet_id += segs;
535 return;
536 }
537 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
538 iph->id = 0;
539 } else {
540 /* Unfortunately we need the big hammer to get a suitable IPID */
541 __ip_select_ident(net, iph, segs);
542 }
543}
544
545static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
546 struct sock *sk)
547{
548 ip_select_ident_segs(net, skb, sk, 1);
549}
550
551static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
552{
553 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
554 skb->len, proto, 0);
555}
556
557/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
558 * Equivalent to : flow->v4addrs.src = iph->saddr;
559 * flow->v4addrs.dst = iph->daddr;
560 */
561static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
562 const struct iphdr *iph)
563{
564 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
565 offsetof(typeof(flow->addrs), v4addrs.src) +
566 sizeof(flow->addrs.v4addrs.src));
567 memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
568 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
569}
570
571/*
572 * Map a multicast IP onto multicast MAC for type ethernet.
573 */
574
575static inline void ip_eth_mc_map(__be32 naddr, char *buf)
576{
577 __u32 addr=ntohl(naddr);
578 buf[0]=0x01;
579 buf[1]=0x00;
580 buf[2]=0x5e;
581 buf[5]=addr&0xFF;
582 addr>>=8;
583 buf[4]=addr&0xFF;
584 addr>>=8;
585 buf[3]=addr&0x7F;
586}
587
588/*
589 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
590 * Leave P_Key as 0 to be filled in by driver.
591 */
592
593static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
594{
595 __u32 addr;
596 unsigned char scope = broadcast[5] & 0xF;
597
598 buf[0] = 0; /* Reserved */
599 buf[1] = 0xff; /* Multicast QPN */
600 buf[2] = 0xff;
601 buf[3] = 0xff;
602 addr = ntohl(naddr);
603 buf[4] = 0xff;
604 buf[5] = 0x10 | scope; /* scope from broadcast address */
605 buf[6] = 0x40; /* IPv4 signature */
606 buf[7] = 0x1b;
607 buf[8] = broadcast[8]; /* P_Key */
608 buf[9] = broadcast[9];
609 buf[10] = 0;
610 buf[11] = 0;
611 buf[12] = 0;
612 buf[13] = 0;
613 buf[14] = 0;
614 buf[15] = 0;
615 buf[19] = addr & 0xff;
616 addr >>= 8;
617 buf[18] = addr & 0xff;
618 addr >>= 8;
619 buf[17] = addr & 0xff;
620 addr >>= 8;
621 buf[16] = addr & 0x0f;
622}
623
624static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
625{
626 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
627 memcpy(buf, broadcast, 4);
628 else
629 memcpy(buf, &naddr, sizeof(naddr));
630}
631
632#if IS_ENABLED(CONFIG_IPV6)
633#include <linux/ipv6.h>
634#endif
635
636static __inline__ void inet_reset_saddr(struct sock *sk)
637{
638 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
639#if IS_ENABLED(CONFIG_IPV6)
640 if (sk->sk_family == PF_INET6) {
641 struct ipv6_pinfo *np = inet6_sk(sk);
642
643 memset(&np->saddr, 0, sizeof(np->saddr));
644 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
645 }
646#endif
647}
648
649#endif
650
651static inline unsigned int ipv4_addr_hash(__be32 ip)
652{
653 return (__force unsigned int) ip;
654}
655
656static inline u32 ipv4_portaddr_hash(const struct net *net,
657 __be32 saddr,
658 unsigned int port)
659{
660 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
661}
662
663bool ip_call_ra_chain(struct sk_buff *skb);
664
665/*
666 * Functions provided by ip_fragment.c
667 */
668
669enum ip_defrag_users {
670 IP_DEFRAG_LOCAL_DELIVER,
671 IP_DEFRAG_CALL_RA_CHAIN,
672 IP_DEFRAG_CONNTRACK_IN,
673 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
674 IP_DEFRAG_CONNTRACK_OUT,
675 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
676 IP_DEFRAG_CONNTRACK_BRIDGE_IN,
677 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
678 IP_DEFRAG_VS_IN,
679 IP_DEFRAG_VS_OUT,
680 IP_DEFRAG_VS_FWD,
681 IP_DEFRAG_AF_PACKET,
682 IP_DEFRAG_MACVLAN,
683};
684
685/* Return true if the value of 'user' is between 'lower_bond'
686 * and 'upper_bond' inclusively.
687 */
688static inline bool ip_defrag_user_in_between(u32 user,
689 enum ip_defrag_users lower_bond,
690 enum ip_defrag_users upper_bond)
691{
692 return user >= lower_bond && user <= upper_bond;
693}
694
695int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
696#ifdef CONFIG_INET
697struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
698#else
699static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
700{
701 return skb;
702}
703#endif
704
705/*
706 * Functions provided by ip_forward.c
707 */
708
709int ip_forward(struct sk_buff *skb);
710
711/*
712 * Functions provided by ip_options.c
713 */
714
715void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
716 __be32 daddr, struct rtable *rt);
717
718int __ip_options_echo(struct net *net, struct ip_options *dopt,
719 struct sk_buff *skb, const struct ip_options *sopt);
720static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
721 struct sk_buff *skb)
722{
723 return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
724}
725
726void ip_options_fragment(struct sk_buff *skb);
727int __ip_options_compile(struct net *net, struct ip_options *opt,
728 struct sk_buff *skb, __be32 *info);
729int ip_options_compile(struct net *net, struct ip_options *opt,
730 struct sk_buff *skb);
731int ip_options_get(struct net *net, struct ip_options_rcu **optp,
732 sockptr_t data, int optlen);
733void ip_options_undo(struct ip_options *opt);
734void ip_forward_options(struct sk_buff *skb);
735int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
736
737/*
738 * Functions provided by ip_sockglue.c
739 */
740
741void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
742void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
743 struct sk_buff *skb, int tlen, int offset);
744int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
745 struct ipcm_cookie *ipc, bool allow_ipv6);
746DECLARE_STATIC_KEY_FALSE(ip4_min_ttl);
747int do_ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
748 unsigned int optlen);
749int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
750 unsigned int optlen);
751int do_ip_getsockopt(struct sock *sk, int level, int optname,
752 sockptr_t optval, sockptr_t optlen);
753int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
754 int __user *optlen);
755int ip_ra_control(struct sock *sk, unsigned char on,
756 void (*destructor)(struct sock *));
757
758int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
759void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
760 u32 info, u8 *payload);
761void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
762 u32 info);
763
764static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
765{
766 ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
767}
768
769bool icmp_global_allow(void);
770extern int sysctl_icmp_msgs_per_sec;
771extern int sysctl_icmp_msgs_burst;
772
773#ifdef CONFIG_PROC_FS
774int ip_misc_proc_init(void);
775#endif
776
777int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
778 struct netlink_ext_ack *extack);
779
780static inline bool inetdev_valid_mtu(unsigned int mtu)
781{
782 return likely(mtu >= IPV4_MIN_MTU);
783}
784
785void ip_sock_set_freebind(struct sock *sk);
786int ip_sock_set_mtu_discover(struct sock *sk, int val);
787void ip_sock_set_pktinfo(struct sock *sk);
788void ip_sock_set_recverr(struct sock *sk);
789void ip_sock_set_tos(struct sock *sk, int val);
790void __ip_sock_set_tos(struct sock *sk, int val);
791
792#endif /* _IP_H */