at v4.16 19 kB view raw
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the IP module. 7 * 8 * Version: @(#)ip.h 1.0.2 05/07/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Alan Cox, <gw4pts@gw4pts.ampr.org> 13 * 14 * Changes: 15 * Mike McLagan : Routing by source 16 * 17 * This program is free software; you can redistribute it and/or 18 * modify it under the terms of the GNU General Public License 19 * as published by the Free Software Foundation; either version 20 * 2 of the License, or (at your option) any later version. 21 */ 22#ifndef _IP_H 23#define _IP_H 24 25#include <linux/types.h> 26#include <linux/ip.h> 27#include <linux/in.h> 28#include <linux/skbuff.h> 29#include <linux/jhash.h> 30 31#include <net/inet_sock.h> 32#include <net/route.h> 33#include <net/snmp.h> 34#include <net/flow.h> 35#include <net/flow_dissector.h> 36#include <net/netns/hash.h> 37 38#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ 39#define IPV4_MIN_MTU 68 /* RFC 791 */ 40 41struct sock; 42 43struct inet_skb_parm { 44 int iif; 45 struct ip_options opt; /* Compiled IP options */ 46 u16 flags; 47 48#define IPSKB_FORWARDED BIT(0) 49#define IPSKB_XFRM_TUNNEL_SIZE BIT(1) 50#define IPSKB_XFRM_TRANSFORMED BIT(2) 51#define IPSKB_FRAG_COMPLETE BIT(3) 52#define IPSKB_REROUTED BIT(4) 53#define IPSKB_DOREDIRECT BIT(5) 54#define IPSKB_FRAG_PMTU BIT(6) 55#define IPSKB_L3SLAVE BIT(7) 56 57 u16 frag_max_size; 58}; 59 60static inline bool ipv4_l3mdev_skb(u16 flags) 61{ 62 return !!(flags & IPSKB_L3SLAVE); 63} 64 65static inline unsigned int ip_hdrlen(const struct sk_buff *skb) 66{ 67 return ip_hdr(skb)->ihl * 4; 68} 69 70struct ipcm_cookie { 71 struct sockcm_cookie sockc; 72 __be32 addr; 73 int oif; 74 struct ip_options_rcu *opt; 75 __u8 tx_flags; 76 __u8 ttl; 77 __s16 tos; 78 char priority; 79}; 80 81#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) 82#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb)) 83 84/* return enslaved device index if relevant */ 85static inline int inet_sdif(struct sk_buff *skb) 86{ 87#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 88 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) 89 return IPCB(skb)->iif; 90#endif 91 return 0; 92} 93 94struct ip_ra_chain { 95 struct ip_ra_chain __rcu *next; 96 struct sock *sk; 97 union { 98 void (*destructor)(struct sock *); 99 struct sock *saved_sk; 100 }; 101 struct rcu_head rcu; 102}; 103 104extern struct ip_ra_chain __rcu *ip_ra_chain; 105 106/* IP flags. */ 107#define IP_CE 0x8000 /* Flag: "Congestion" */ 108#define IP_DF 0x4000 /* Flag: "Don't Fragment" */ 109#define IP_MF 0x2000 /* Flag: "More Fragments" */ 110#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ 111 112#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */ 113 114struct msghdr; 115struct net_device; 116struct packet_type; 117struct rtable; 118struct sockaddr; 119 120int igmp_mc_init(void); 121 122/* 123 * Functions provided by ip.c 124 */ 125 126int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, 127 __be32 saddr, __be32 daddr, 128 struct ip_options_rcu *opt); 129int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, 130 struct net_device *orig_dev); 131int ip_local_deliver(struct sk_buff *skb); 132int ip_mr_input(struct sk_buff *skb); 133int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb); 134int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb); 135int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 136 int (*output)(struct net *, struct sock *, struct sk_buff *)); 137void ip_send_check(struct iphdr *ip); 138int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); 139int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); 140 141int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); 142void ip_init(void); 143int ip_append_data(struct sock *sk, struct flowi4 *fl4, 144 int getfrag(void *from, char *to, int offset, int len, 145 int odd, struct sk_buff *skb), 146 void *from, int len, int protolen, 147 struct ipcm_cookie *ipc, 148 struct rtable **rt, 149 unsigned int flags); 150int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, 151 struct sk_buff *skb); 152ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, 153 int offset, size_t size, int flags); 154struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, 155 struct sk_buff_head *queue, 156 struct inet_cork *cork); 157int ip_send_skb(struct net *net, struct sk_buff *skb); 158int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4); 159void ip_flush_pending_frames(struct sock *sk); 160struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, 161 int getfrag(void *from, char *to, int offset, 162 int len, int odd, struct sk_buff *skb), 163 void *from, int length, int transhdrlen, 164 struct ipcm_cookie *ipc, struct rtable **rtp, 165 unsigned int flags); 166 167static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) 168{ 169 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); 170} 171 172static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet) 173{ 174 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos); 175} 176 177static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) 178{ 179 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk); 180} 181 182/* datagram.c */ 183int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 184int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 185 186void ip4_datagram_release_cb(struct sock *sk); 187 188struct ip_reply_arg { 189 struct kvec iov[1]; 190 int flags; 191 __wsum csum; 192 int csumoffset; /* u16 offset of csum in iov[0].iov_base */ 193 /* -1 if not needed */ 194 int bound_dev_if; 195 u8 tos; 196 kuid_t uid; 197}; 198 199#define IP_REPLY_ARG_NOSRCCHECK 1 200 201static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) 202{ 203 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; 204} 205 206void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, 207 const struct ip_options *sopt, 208 __be32 daddr, __be32 saddr, 209 const struct ip_reply_arg *arg, 210 unsigned int len); 211 212#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) 213#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field) 214#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) 215#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) 216#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) 217#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) 218#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) 219#define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field) 220#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) 221#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) 222 223u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); 224unsigned long snmp_fold_field(void __percpu *mib, int offt); 225#if BITS_PER_LONG==32 226u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, 227 size_t syncp_offset); 228u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off); 229#else 230static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, 231 size_t syncp_offset) 232{ 233 return snmp_get_cpu_field(mib, cpu, offct); 234 235} 236 237static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off) 238{ 239 return snmp_fold_field(mib, offt); 240} 241#endif 242 243#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \ 244{ \ 245 int i, c; \ 246 for_each_possible_cpu(c) { \ 247 for (i = 0; stats_list[i].name; i++) \ 248 buff64[i] += snmp_get_cpu_field64( \ 249 mib_statistic, \ 250 c, stats_list[i].entry, \ 251 offset); \ 252 } \ 253} 254 255#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \ 256{ \ 257 int i, c; \ 258 for_each_possible_cpu(c) { \ 259 for (i = 0; stats_list[i].name; i++) \ 260 buff[i] += snmp_get_cpu_field( \ 261 mib_statistic, \ 262 c, stats_list[i].entry); \ 263 } \ 264} 265 266void inet_get_local_port_range(struct net *net, int *low, int *high); 267 268#ifdef CONFIG_SYSCTL 269static inline int inet_is_local_reserved_port(struct net *net, int port) 270{ 271 if (!net->ipv4.sysctl_local_reserved_ports) 272 return 0; 273 return test_bit(port, net->ipv4.sysctl_local_reserved_ports); 274} 275 276static inline bool sysctl_dev_name_is_allowed(const char *name) 277{ 278 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0; 279} 280 281static inline int inet_prot_sock(struct net *net) 282{ 283 return net->ipv4.sysctl_ip_prot_sock; 284} 285 286#else 287static inline int inet_is_local_reserved_port(struct net *net, int port) 288{ 289 return 0; 290} 291 292static inline int inet_prot_sock(struct net *net) 293{ 294 return PROT_SOCK; 295} 296#endif 297 298__be32 inet_current_timestamp(void); 299 300/* From inetpeer.c */ 301extern int inet_peer_threshold; 302extern int inet_peer_minttl; 303extern int inet_peer_maxttl; 304 305void ipfrag_init(void); 306 307void ip_static_sysctl_init(void); 308 309#define IP4_REPLY_MARK(net, mark) \ 310 ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0) 311 312static inline bool ip_is_fragment(const struct iphdr *iph) 313{ 314 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0; 315} 316 317#ifdef CONFIG_INET 318#include <net/dst.h> 319 320/* The function in 2.2 was invalid, producing wrong result for 321 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */ 322static inline 323int ip_decrease_ttl(struct iphdr *iph) 324{ 325 u32 check = (__force u32)iph->check; 326 check += (__force u32)htons(0x0100); 327 iph->check = (__force __sum16)(check + (check>=0xFFFF)); 328 return --iph->ttl; 329} 330 331static inline int ip_mtu_locked(const struct dst_entry *dst) 332{ 333 const struct rtable *rt = (const struct rtable *)dst; 334 335 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU); 336} 337 338static inline 339int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) 340{ 341 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); 342 343 return pmtudisc == IP_PMTUDISC_DO || 344 (pmtudisc == IP_PMTUDISC_WANT && 345 !ip_mtu_locked(dst)); 346} 347 348static inline bool ip_sk_accept_pmtu(const struct sock *sk) 349{ 350 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE && 351 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT; 352} 353 354static inline bool ip_sk_use_pmtu(const struct sock *sk) 355{ 356 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; 357} 358 359static inline bool ip_sk_ignore_df(const struct sock *sk) 360{ 361 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO || 362 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT; 363} 364 365static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, 366 bool forwarding) 367{ 368 struct net *net = dev_net(dst->dev); 369 370 if (net->ipv4.sysctl_ip_fwd_use_pmtu || 371 ip_mtu_locked(dst) || 372 !forwarding) 373 return dst_mtu(dst); 374 375 return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); 376} 377 378static inline unsigned int ip_skb_dst_mtu(struct sock *sk, 379 const struct sk_buff *skb) 380{ 381 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { 382 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; 383 384 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); 385 } 386 387 return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); 388} 389 390u32 ip_idents_reserve(u32 hash, int segs); 391void __ip_select_ident(struct net *net, struct iphdr *iph, int segs); 392 393static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, 394 struct sock *sk, int segs) 395{ 396 struct iphdr *iph = ip_hdr(skb); 397 398 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { 399 /* This is only to work around buggy Windows95/2000 400 * VJ compression implementations. If the ID field 401 * does not change, they drop every other packet in 402 * a TCP stream using header compression. 403 */ 404 if (sk && inet_sk(sk)->inet_daddr) { 405 iph->id = htons(inet_sk(sk)->inet_id); 406 inet_sk(sk)->inet_id += segs; 407 } else { 408 iph->id = 0; 409 } 410 } else { 411 __ip_select_ident(net, iph, segs); 412 } 413} 414 415static inline void ip_select_ident(struct net *net, struct sk_buff *skb, 416 struct sock *sk) 417{ 418 ip_select_ident_segs(net, skb, sk, 1); 419} 420 421static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto) 422{ 423 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 424 skb->len, proto, 0); 425} 426 427/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store 428 * Equivalent to : flow->v4addrs.src = iph->saddr; 429 * flow->v4addrs.dst = iph->daddr; 430 */ 431static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow, 432 const struct iphdr *iph) 433{ 434 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) != 435 offsetof(typeof(flow->addrs), v4addrs.src) + 436 sizeof(flow->addrs.v4addrs.src)); 437 memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs)); 438 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 439} 440 441static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto) 442{ 443 const struct iphdr *iph = skb_gro_network_header(skb); 444 445 return csum_tcpudp_nofold(iph->saddr, iph->daddr, 446 skb_gro_len(skb), proto, 0); 447} 448 449/* 450 * Map a multicast IP onto multicast MAC for type ethernet. 451 */ 452 453static inline void ip_eth_mc_map(__be32 naddr, char *buf) 454{ 455 __u32 addr=ntohl(naddr); 456 buf[0]=0x01; 457 buf[1]=0x00; 458 buf[2]=0x5e; 459 buf[5]=addr&0xFF; 460 addr>>=8; 461 buf[4]=addr&0xFF; 462 addr>>=8; 463 buf[3]=addr&0x7F; 464} 465 466/* 467 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand. 468 * Leave P_Key as 0 to be filled in by driver. 469 */ 470 471static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) 472{ 473 __u32 addr; 474 unsigned char scope = broadcast[5] & 0xF; 475 476 buf[0] = 0; /* Reserved */ 477 buf[1] = 0xff; /* Multicast QPN */ 478 buf[2] = 0xff; 479 buf[3] = 0xff; 480 addr = ntohl(naddr); 481 buf[4] = 0xff; 482 buf[5] = 0x10 | scope; /* scope from broadcast address */ 483 buf[6] = 0x40; /* IPv4 signature */ 484 buf[7] = 0x1b; 485 buf[8] = broadcast[8]; /* P_Key */ 486 buf[9] = broadcast[9]; 487 buf[10] = 0; 488 buf[11] = 0; 489 buf[12] = 0; 490 buf[13] = 0; 491 buf[14] = 0; 492 buf[15] = 0; 493 buf[19] = addr & 0xff; 494 addr >>= 8; 495 buf[18] = addr & 0xff; 496 addr >>= 8; 497 buf[17] = addr & 0xff; 498 addr >>= 8; 499 buf[16] = addr & 0x0f; 500} 501 502static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) 503{ 504 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) 505 memcpy(buf, broadcast, 4); 506 else 507 memcpy(buf, &naddr, sizeof(naddr)); 508} 509 510#if IS_ENABLED(CONFIG_IPV6) 511#include <linux/ipv6.h> 512#endif 513 514static __inline__ void inet_reset_saddr(struct sock *sk) 515{ 516 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0; 517#if IS_ENABLED(CONFIG_IPV6) 518 if (sk->sk_family == PF_INET6) { 519 struct ipv6_pinfo *np = inet6_sk(sk); 520 521 memset(&np->saddr, 0, sizeof(np->saddr)); 522 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); 523 } 524#endif 525} 526 527#endif 528 529static inline unsigned int ipv4_addr_hash(__be32 ip) 530{ 531 return (__force unsigned int) ip; 532} 533 534static inline u32 ipv4_portaddr_hash(const struct net *net, 535 __be32 saddr, 536 unsigned int port) 537{ 538 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; 539} 540 541bool ip_call_ra_chain(struct sk_buff *skb); 542 543/* 544 * Functions provided by ip_fragment.c 545 */ 546 547enum ip_defrag_users { 548 IP_DEFRAG_LOCAL_DELIVER, 549 IP_DEFRAG_CALL_RA_CHAIN, 550 IP_DEFRAG_CONNTRACK_IN, 551 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX, 552 IP_DEFRAG_CONNTRACK_OUT, 553 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX, 554 IP_DEFRAG_CONNTRACK_BRIDGE_IN, 555 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, 556 IP_DEFRAG_VS_IN, 557 IP_DEFRAG_VS_OUT, 558 IP_DEFRAG_VS_FWD, 559 IP_DEFRAG_AF_PACKET, 560 IP_DEFRAG_MACVLAN, 561}; 562 563/* Return true if the value of 'user' is between 'lower_bond' 564 * and 'upper_bond' inclusively. 565 */ 566static inline bool ip_defrag_user_in_between(u32 user, 567 enum ip_defrag_users lower_bond, 568 enum ip_defrag_users upper_bond) 569{ 570 return user >= lower_bond && user <= upper_bond; 571} 572 573int ip_defrag(struct net *net, struct sk_buff *skb, u32 user); 574#ifdef CONFIG_INET 575struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user); 576#else 577static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) 578{ 579 return skb; 580} 581#endif 582int ip_frag_mem(struct net *net); 583 584/* 585 * Functions provided by ip_forward.c 586 */ 587 588int ip_forward(struct sk_buff *skb); 589 590/* 591 * Functions provided by ip_options.c 592 */ 593 594void ip_options_build(struct sk_buff *skb, struct ip_options *opt, 595 __be32 daddr, struct rtable *rt, int is_frag); 596 597int __ip_options_echo(struct net *net, struct ip_options *dopt, 598 struct sk_buff *skb, const struct ip_options *sopt); 599static inline int ip_options_echo(struct net *net, struct ip_options *dopt, 600 struct sk_buff *skb) 601{ 602 return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt); 603} 604 605void ip_options_fragment(struct sk_buff *skb); 606int ip_options_compile(struct net *net, struct ip_options *opt, 607 struct sk_buff *skb); 608int ip_options_get(struct net *net, struct ip_options_rcu **optp, 609 unsigned char *data, int optlen); 610int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, 611 unsigned char __user *data, int optlen); 612void ip_options_undo(struct ip_options *opt); 613void ip_forward_options(struct sk_buff *skb); 614int ip_options_rcv_srr(struct sk_buff *skb); 615 616/* 617 * Functions provided by ip_sockglue.c 618 */ 619 620void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); 621void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, 622 struct sk_buff *skb, int tlen, int offset); 623int ip_cmsg_send(struct sock *sk, struct msghdr *msg, 624 struct ipcm_cookie *ipc, bool allow_ipv6); 625int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 626 unsigned int optlen); 627int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 628 int __user *optlen); 629int compat_ip_setsockopt(struct sock *sk, int level, int optname, 630 char __user *optval, unsigned int optlen); 631int compat_ip_getsockopt(struct sock *sk, int level, int optname, 632 char __user *optval, int __user *optlen); 633int ip_ra_control(struct sock *sk, unsigned char on, 634 void (*destructor)(struct sock *)); 635 636int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); 637void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, 638 u32 info, u8 *payload); 639void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, 640 u32 info); 641 642static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) 643{ 644 ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0); 645} 646 647bool icmp_global_allow(void); 648extern int sysctl_icmp_msgs_per_sec; 649extern int sysctl_icmp_msgs_burst; 650 651#ifdef CONFIG_PROC_FS 652int ip_misc_proc_init(void); 653#endif 654 655#endif /* _IP_H */