at v4.14 18 kB view raw
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the IP module. 7 * 8 * Version: @(#)ip.h 1.0.2 05/07/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Alan Cox, <gw4pts@gw4pts.ampr.org> 13 * 14 * Changes: 15 * Mike McLagan : Routing by source 16 * 17 * This program is free software; you can redistribute it and/or 18 * modify it under the terms of the GNU General Public License 19 * as published by the Free Software Foundation; either version 20 * 2 of the License, or (at your option) any later version. 21 */ 22#ifndef _IP_H 23#define _IP_H 24 25#include <linux/types.h> 26#include <linux/ip.h> 27#include <linux/in.h> 28#include <linux/skbuff.h> 29 30#include <net/inet_sock.h> 31#include <net/route.h> 32#include <net/snmp.h> 33#include <net/flow.h> 34#include <net/flow_dissector.h> 35 36#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ 37 38struct sock; 39 40struct inet_skb_parm { 41 int iif; 42 struct ip_options opt; /* Compiled IP options */ 43 u16 flags; 44 45#define IPSKB_FORWARDED BIT(0) 46#define IPSKB_XFRM_TUNNEL_SIZE BIT(1) 47#define IPSKB_XFRM_TRANSFORMED BIT(2) 48#define IPSKB_FRAG_COMPLETE BIT(3) 49#define IPSKB_REROUTED BIT(4) 50#define IPSKB_DOREDIRECT BIT(5) 51#define IPSKB_FRAG_PMTU BIT(6) 52#define IPSKB_L3SLAVE BIT(7) 53 54 u16 frag_max_size; 55}; 56 57static inline bool ipv4_l3mdev_skb(u16 flags) 58{ 59 return !!(flags & IPSKB_L3SLAVE); 60} 61 62static inline unsigned int ip_hdrlen(const struct sk_buff *skb) 63{ 64 return ip_hdr(skb)->ihl * 4; 65} 66 67struct ipcm_cookie { 68 struct sockcm_cookie sockc; 69 __be32 addr; 70 int oif; 71 struct ip_options_rcu *opt; 72 __u8 tx_flags; 73 __u8 ttl; 74 __s16 tos; 75 char priority; 76}; 77 78#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) 79#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb)) 80 81/* return enslaved device index if relevant */ 82static inline int inet_sdif(struct sk_buff *skb) 83{ 84#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 85 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) 86 return IPCB(skb)->iif; 87#endif 88 return 0; 89} 90 91struct ip_ra_chain { 92 struct ip_ra_chain __rcu *next; 93 struct sock *sk; 94 union { 95 void (*destructor)(struct sock *); 96 struct sock *saved_sk; 97 }; 98 struct rcu_head rcu; 99}; 100 101extern struct ip_ra_chain __rcu *ip_ra_chain; 102 103/* IP flags. */ 104#define IP_CE 0x8000 /* Flag: "Congestion" */ 105#define IP_DF 0x4000 /* Flag: "Don't Fragment" */ 106#define IP_MF 0x2000 /* Flag: "More Fragments" */ 107#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ 108 109#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */ 110 111struct msghdr; 112struct net_device; 113struct packet_type; 114struct rtable; 115struct sockaddr; 116 117int igmp_mc_init(void); 118 119/* 120 * Functions provided by ip.c 121 */ 122 123int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, 124 __be32 saddr, __be32 daddr, 125 struct ip_options_rcu *opt); 126int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, 127 struct net_device *orig_dev); 128int ip_local_deliver(struct sk_buff *skb); 129int ip_mr_input(struct sk_buff *skb); 130int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb); 131int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb); 132int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 133 int (*output)(struct net *, struct sock *, struct sk_buff *)); 134void ip_send_check(struct iphdr *ip); 135int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); 136int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); 137 138int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); 139void ip_init(void); 140int ip_append_data(struct sock *sk, struct flowi4 *fl4, 141 int getfrag(void *from, char *to, int offset, int len, 142 int odd, struct sk_buff *skb), 143 void *from, int len, int protolen, 144 struct ipcm_cookie *ipc, 145 struct rtable **rt, 146 unsigned int flags); 147int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, 148 struct sk_buff *skb); 149ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, 150 int offset, size_t size, int flags); 151struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, 152 struct sk_buff_head *queue, 153 struct inet_cork *cork); 154int ip_send_skb(struct net *net, struct sk_buff *skb); 155int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4); 156void ip_flush_pending_frames(struct sock *sk); 157struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, 158 int getfrag(void *from, char *to, int offset, 159 int len, int odd, struct sk_buff *skb), 160 void *from, int length, int transhdrlen, 161 struct ipcm_cookie *ipc, struct rtable **rtp, 162 unsigned int flags); 163 164static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) 165{ 166 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); 167} 168 169static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet) 170{ 171 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos); 172} 173 174static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) 175{ 176 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk); 177} 178 179/* datagram.c */ 180int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 181int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 182 183void ip4_datagram_release_cb(struct sock *sk); 184 185struct ip_reply_arg { 186 struct kvec iov[1]; 187 int flags; 188 __wsum csum; 189 int csumoffset; /* u16 offset of csum in iov[0].iov_base */ 190 /* -1 if not needed */ 191 int bound_dev_if; 192 u8 tos; 193 kuid_t uid; 194}; 195 196#define IP_REPLY_ARG_NOSRCCHECK 1 197 198static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) 199{ 200 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; 201} 202 203void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, 204 const struct ip_options *sopt, 205 __be32 daddr, __be32 saddr, 206 const struct ip_reply_arg *arg, 207 unsigned int len); 208 209#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) 210#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field) 211#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) 212#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) 213#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) 214#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) 215#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) 216#define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field) 217#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) 218#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) 219 220u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); 221unsigned long snmp_fold_field(void __percpu *mib, int offt); 222#if BITS_PER_LONG==32 223u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, 224 size_t syncp_offset); 225u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off); 226#else 227static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, 228 size_t syncp_offset) 229{ 230 return snmp_get_cpu_field(mib, cpu, offct); 231 232} 233 234static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off) 235{ 236 return snmp_fold_field(mib, offt); 237} 238#endif 239 240#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \ 241{ \ 242 int i, c; \ 243 for_each_possible_cpu(c) { \ 244 for (i = 0; stats_list[i].name; i++) \ 245 buff64[i] += snmp_get_cpu_field64( \ 246 mib_statistic, \ 247 c, stats_list[i].entry, \ 248 offset); \ 249 } \ 250} 251 252#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \ 253{ \ 254 int i, c; \ 255 for_each_possible_cpu(c) { \ 256 for (i = 0; stats_list[i].name; i++) \ 257 buff[i] += snmp_get_cpu_field( \ 258 mib_statistic, \ 259 c, stats_list[i].entry); \ 260 } \ 261} 262 263void inet_get_local_port_range(struct net *net, int *low, int *high); 264 265#ifdef CONFIG_SYSCTL 266static inline int inet_is_local_reserved_port(struct net *net, int port) 267{ 268 if (!net->ipv4.sysctl_local_reserved_ports) 269 return 0; 270 return test_bit(port, net->ipv4.sysctl_local_reserved_ports); 271} 272 273static inline bool sysctl_dev_name_is_allowed(const char *name) 274{ 275 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0; 276} 277 278static inline int inet_prot_sock(struct net *net) 279{ 280 return net->ipv4.sysctl_ip_prot_sock; 281} 282 283#else 284static inline int inet_is_local_reserved_port(struct net *net, int port) 285{ 286 return 0; 287} 288 289static inline int inet_prot_sock(struct net *net) 290{ 291 return PROT_SOCK; 292} 293#endif 294 295__be32 inet_current_timestamp(void); 296 297/* From inetpeer.c */ 298extern int inet_peer_threshold; 299extern int inet_peer_minttl; 300extern int inet_peer_maxttl; 301 302void ipfrag_init(void); 303 304void ip_static_sysctl_init(void); 305 306#define IP4_REPLY_MARK(net, mark) \ 307 ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0) 308 309static inline bool ip_is_fragment(const struct iphdr *iph) 310{ 311 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0; 312} 313 314#ifdef CONFIG_INET 315#include <net/dst.h> 316 317/* The function in 2.2 was invalid, producing wrong result for 318 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */ 319static inline 320int ip_decrease_ttl(struct iphdr *iph) 321{ 322 u32 check = (__force u32)iph->check; 323 check += (__force u32)htons(0x0100); 324 iph->check = (__force __sum16)(check + (check>=0xFFFF)); 325 return --iph->ttl; 326} 327 328static inline 329int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) 330{ 331 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); 332 333 return pmtudisc == IP_PMTUDISC_DO || 334 (pmtudisc == IP_PMTUDISC_WANT && 335 !(dst_metric_locked(dst, RTAX_MTU))); 336} 337 338static inline bool ip_sk_accept_pmtu(const struct sock *sk) 339{ 340 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE && 341 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT; 342} 343 344static inline bool ip_sk_use_pmtu(const struct sock *sk) 345{ 346 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; 347} 348 349static inline bool ip_sk_ignore_df(const struct sock *sk) 350{ 351 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO || 352 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT; 353} 354 355static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, 356 bool forwarding) 357{ 358 struct net *net = dev_net(dst->dev); 359 360 if (net->ipv4.sysctl_ip_fwd_use_pmtu || 361 dst_metric_locked(dst, RTAX_MTU) || 362 !forwarding) 363 return dst_mtu(dst); 364 365 return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); 366} 367 368static inline unsigned int ip_skb_dst_mtu(struct sock *sk, 369 const struct sk_buff *skb) 370{ 371 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { 372 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; 373 374 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); 375 } 376 377 return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); 378} 379 380u32 ip_idents_reserve(u32 hash, int segs); 381void __ip_select_ident(struct net *net, struct iphdr *iph, int segs); 382 383static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, 384 struct sock *sk, int segs) 385{ 386 struct iphdr *iph = ip_hdr(skb); 387 388 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { 389 /* This is only to work around buggy Windows95/2000 390 * VJ compression implementations. If the ID field 391 * does not change, they drop every other packet in 392 * a TCP stream using header compression. 393 */ 394 if (sk && inet_sk(sk)->inet_daddr) { 395 iph->id = htons(inet_sk(sk)->inet_id); 396 inet_sk(sk)->inet_id += segs; 397 } else { 398 iph->id = 0; 399 } 400 } else { 401 __ip_select_ident(net, iph, segs); 402 } 403} 404 405static inline void ip_select_ident(struct net *net, struct sk_buff *skb, 406 struct sock *sk) 407{ 408 ip_select_ident_segs(net, skb, sk, 1); 409} 410 411static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto) 412{ 413 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 414 skb->len, proto, 0); 415} 416 417/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store 418 * Equivalent to : flow->v4addrs.src = iph->saddr; 419 * flow->v4addrs.dst = iph->daddr; 420 */ 421static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow, 422 const struct iphdr *iph) 423{ 424 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) != 425 offsetof(typeof(flow->addrs), v4addrs.src) + 426 sizeof(flow->addrs.v4addrs.src)); 427 memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs)); 428 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 429} 430 431static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto) 432{ 433 const struct iphdr *iph = skb_gro_network_header(skb); 434 435 return csum_tcpudp_nofold(iph->saddr, iph->daddr, 436 skb_gro_len(skb), proto, 0); 437} 438 439/* 440 * Map a multicast IP onto multicast MAC for type ethernet. 441 */ 442 443static inline void ip_eth_mc_map(__be32 naddr, char *buf) 444{ 445 __u32 addr=ntohl(naddr); 446 buf[0]=0x01; 447 buf[1]=0x00; 448 buf[2]=0x5e; 449 buf[5]=addr&0xFF; 450 addr>>=8; 451 buf[4]=addr&0xFF; 452 addr>>=8; 453 buf[3]=addr&0x7F; 454} 455 456/* 457 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand. 458 * Leave P_Key as 0 to be filled in by driver. 459 */ 460 461static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) 462{ 463 __u32 addr; 464 unsigned char scope = broadcast[5] & 0xF; 465 466 buf[0] = 0; /* Reserved */ 467 buf[1] = 0xff; /* Multicast QPN */ 468 buf[2] = 0xff; 469 buf[3] = 0xff; 470 addr = ntohl(naddr); 471 buf[4] = 0xff; 472 buf[5] = 0x10 | scope; /* scope from broadcast address */ 473 buf[6] = 0x40; /* IPv4 signature */ 474 buf[7] = 0x1b; 475 buf[8] = broadcast[8]; /* P_Key */ 476 buf[9] = broadcast[9]; 477 buf[10] = 0; 478 buf[11] = 0; 479 buf[12] = 0; 480 buf[13] = 0; 481 buf[14] = 0; 482 buf[15] = 0; 483 buf[19] = addr & 0xff; 484 addr >>= 8; 485 buf[18] = addr & 0xff; 486 addr >>= 8; 487 buf[17] = addr & 0xff; 488 addr >>= 8; 489 buf[16] = addr & 0x0f; 490} 491 492static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) 493{ 494 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) 495 memcpy(buf, broadcast, 4); 496 else 497 memcpy(buf, &naddr, sizeof(naddr)); 498} 499 500#if IS_ENABLED(CONFIG_IPV6) 501#include <linux/ipv6.h> 502#endif 503 504static __inline__ void inet_reset_saddr(struct sock *sk) 505{ 506 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0; 507#if IS_ENABLED(CONFIG_IPV6) 508 if (sk->sk_family == PF_INET6) { 509 struct ipv6_pinfo *np = inet6_sk(sk); 510 511 memset(&np->saddr, 0, sizeof(np->saddr)); 512 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); 513 } 514#endif 515} 516 517#endif 518 519static inline unsigned int ipv4_addr_hash(__be32 ip) 520{ 521 return (__force unsigned int) ip; 522} 523 524bool ip_call_ra_chain(struct sk_buff *skb); 525 526/* 527 * Functions provided by ip_fragment.c 528 */ 529 530enum ip_defrag_users { 531 IP_DEFRAG_LOCAL_DELIVER, 532 IP_DEFRAG_CALL_RA_CHAIN, 533 IP_DEFRAG_CONNTRACK_IN, 534 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX, 535 IP_DEFRAG_CONNTRACK_OUT, 536 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX, 537 IP_DEFRAG_CONNTRACK_BRIDGE_IN, 538 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, 539 IP_DEFRAG_VS_IN, 540 IP_DEFRAG_VS_OUT, 541 IP_DEFRAG_VS_FWD, 542 IP_DEFRAG_AF_PACKET, 543 IP_DEFRAG_MACVLAN, 544}; 545 546/* Return true if the value of 'user' is between 'lower_bond' 547 * and 'upper_bond' inclusively. 548 */ 549static inline bool ip_defrag_user_in_between(u32 user, 550 enum ip_defrag_users lower_bond, 551 enum ip_defrag_users upper_bond) 552{ 553 return user >= lower_bond && user <= upper_bond; 554} 555 556int ip_defrag(struct net *net, struct sk_buff *skb, u32 user); 557#ifdef CONFIG_INET 558struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user); 559#else 560static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) 561{ 562 return skb; 563} 564#endif 565int ip_frag_mem(struct net *net); 566 567/* 568 * Functions provided by ip_forward.c 569 */ 570 571int ip_forward(struct sk_buff *skb); 572 573/* 574 * Functions provided by ip_options.c 575 */ 576 577void ip_options_build(struct sk_buff *skb, struct ip_options *opt, 578 __be32 daddr, struct rtable *rt, int is_frag); 579 580int __ip_options_echo(struct net *net, struct ip_options *dopt, 581 struct sk_buff *skb, const struct ip_options *sopt); 582static inline int ip_options_echo(struct net *net, struct ip_options *dopt, 583 struct sk_buff *skb) 584{ 585 return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt); 586} 587 588void ip_options_fragment(struct sk_buff *skb); 589int ip_options_compile(struct net *net, struct ip_options *opt, 590 struct sk_buff *skb); 591int ip_options_get(struct net *net, struct ip_options_rcu **optp, 592 unsigned char *data, int optlen); 593int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, 594 unsigned char __user *data, int optlen); 595void ip_options_undo(struct ip_options *opt); 596void ip_forward_options(struct sk_buff *skb); 597int ip_options_rcv_srr(struct sk_buff *skb); 598 599/* 600 * Functions provided by ip_sockglue.c 601 */ 602 603void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); 604void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, 605 struct sk_buff *skb, int tlen, int offset); 606int ip_cmsg_send(struct sock *sk, struct msghdr *msg, 607 struct ipcm_cookie *ipc, bool allow_ipv6); 608int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 609 unsigned int optlen); 610int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 611 int __user *optlen); 612int compat_ip_setsockopt(struct sock *sk, int level, int optname, 613 char __user *optval, unsigned int optlen); 614int compat_ip_getsockopt(struct sock *sk, int level, int optname, 615 char __user *optval, int __user *optlen); 616int ip_ra_control(struct sock *sk, unsigned char on, 617 void (*destructor)(struct sock *)); 618 619int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); 620void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, 621 u32 info, u8 *payload); 622void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, 623 u32 info); 624 625static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) 626{ 627 ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0); 628} 629 630bool icmp_global_allow(void); 631extern int sysctl_icmp_msgs_per_sec; 632extern int sysctl_icmp_msgs_burst; 633 634#ifdef CONFIG_PROC_FS 635int ip_misc_proc_init(void); 636#endif 637 638#endif /* _IP_H */