Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.19-rc4 1389 lines 40 kB view raw
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the AF_INET socket handler. 7 * 8 * Version: @(#)sock.h 1.0.4 05/13/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche <flla@stud.uni-sb.de> 14 * 15 * Fixes: 16 * Alan Cox : Volatiles in skbuff pointers. See 17 * skbuff comments. May be overdone, 18 * better to prove they can be removed 19 * than the reverse. 20 * Alan Cox : Added a zapped field for tcp to note 21 * a socket is reset and must stay shut up 22 * Alan Cox : New fields for options 23 * Pauline Middelink : identd support 24 * Alan Cox : Eliminate low level recv/recvfrom 25 * David S. Miller : New socket lookup architecture. 26 * Steve Whitehouse: Default routines for sock_ops 27 * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made 28 * protinfo be just a void pointer, as the 29 * protocol specific parts were moved to 30 * respective headers and ipv4/v6, etc now 31 * use private slabcaches for its socks 32 * Pedro Hortas : New flags field for socket options 33 * 34 * 35 * This program is free software; you can redistribute it and/or 36 * modify it under the terms of the GNU General Public License 37 * as published by the Free Software Foundation; either version 38 * 2 of the License, or (at your option) any later version. 39 */ 40#ifndef _SOCK_H 41#define _SOCK_H 42 43#include <linux/list.h> 44#include <linux/timer.h> 45#include <linux/cache.h> 46#include <linux/module.h> 47#include <linux/lockdep.h> 48#include <linux/netdevice.h> 49#include <linux/skbuff.h> /* struct sk_buff */ 50#include <linux/security.h> 51 52#include <linux/filter.h> 53 54#include <asm/atomic.h> 55#include <net/dst.h> 56#include <net/checksum.h> 57 58/* 59 * This structure really needs to be cleaned up. 60 * Most of it is for TCP, and not used by any of 61 * the other protocols. 62 */ 63 64/* Define this to get the SOCK_DBG debugging facility. */ 65#define SOCK_DEBUGGING 66#ifdef SOCK_DEBUGGING 67#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ 68 printk(KERN_DEBUG msg); } while (0) 69#else 70#define SOCK_DEBUG(sk, msg...) do { } while (0) 71#endif 72 73/* This is the per-socket lock. The spinlock provides a synchronization 74 * between user contexts and software interrupt processing, whereas the 75 * mini-semaphore synchronizes multiple users amongst themselves. 76 */ 77struct sock_iocb; 78typedef struct { 79 spinlock_t slock; 80 struct sock_iocb *owner; 81 wait_queue_head_t wq; 82 /* 83 * We express the mutex-alike socket_lock semantics 84 * to the lock validator by explicitly managing 85 * the slock as a lock variant (in addition to 86 * the slock itself): 87 */ 88#ifdef CONFIG_DEBUG_LOCK_ALLOC 89 struct lockdep_map dep_map; 90#endif 91} socket_lock_t; 92 93struct sock; 94struct proto; 95 96/** 97 * struct sock_common - minimal network layer representation of sockets 98 * @skc_family: network address family 99 * @skc_state: Connection state 100 * @skc_reuse: %SO_REUSEADDR setting 101 * @skc_bound_dev_if: bound device index if != 0 102 * @skc_node: main hash linkage for various protocol lookup tables 103 * @skc_bind_node: bind hash linkage for various protocol lookup tables 104 * @skc_refcnt: reference count 105 * @skc_hash: hash value used with various protocol lookup tables 106 * @skc_prot: protocol handlers inside a network family 107 * 108 * This is the minimal network layer representation of sockets, the header 109 * for struct sock and struct inet_timewait_sock. 110 */ 111struct sock_common { 112 unsigned short skc_family; 113 volatile unsigned char skc_state; 114 unsigned char skc_reuse; 115 int skc_bound_dev_if; 116 struct hlist_node skc_node; 117 struct hlist_node skc_bind_node; 118 atomic_t skc_refcnt; 119 unsigned int skc_hash; 120 struct proto *skc_prot; 121}; 122 123/** 124 * struct sock - network layer representation of sockets 125 * @__sk_common: shared layout with inet_timewait_sock 126 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN 127 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings 128 * @sk_lock: synchronizer 129 * @sk_rcvbuf: size of receive buffer in bytes 130 * @sk_sleep: sock wait queue 131 * @sk_dst_cache: destination cache 132 * @sk_dst_lock: destination cache lock 133 * @sk_policy: flow policy 134 * @sk_rmem_alloc: receive queue bytes committed 135 * @sk_receive_queue: incoming packets 136 * @sk_wmem_alloc: transmit queue bytes committed 137 * @sk_write_queue: Packet sending queue 138 * @sk_async_wait_queue: DMA copied packets 139 * @sk_omem_alloc: "o" is "option" or "other" 140 * @sk_wmem_queued: persistent queue size 141 * @sk_forward_alloc: space allocated forward 142 * @sk_allocation: allocation mode 143 * @sk_sndbuf: size of send buffer in bytes 144 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings 145 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets 146 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 147 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) 148 * @sk_lingertime: %SO_LINGER l_linger setting 149 * @sk_backlog: always used with the per-socket spinlock held 150 * @sk_callback_lock: used with the callbacks in the end of this struct 151 * @sk_error_queue: rarely used 152 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance) 153 * @sk_err: last error 154 * @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out' 155 * @sk_ack_backlog: current listen backlog 156 * @sk_max_ack_backlog: listen backlog set in listen() 157 * @sk_priority: %SO_PRIORITY setting 158 * @sk_type: socket type (%SOCK_STREAM, etc) 159 * @sk_protocol: which protocol this socket belongs in this network family 160 * @sk_peercred: %SO_PEERCRED setting 161 * @sk_rcvlowat: %SO_RCVLOWAT setting 162 * @sk_rcvtimeo: %SO_RCVTIMEO setting 163 * @sk_sndtimeo: %SO_SNDTIMEO setting 164 * @sk_filter: socket filtering instructions 165 * @sk_protinfo: private area, net family specific, when not using slab 166 * @sk_timer: sock cleanup timer 167 * @sk_stamp: time stamp of last packet received 168 * @sk_socket: Identd and reporting IO signals 169 * @sk_user_data: RPC layer private data 170 * @sk_sndmsg_page: cached page for sendmsg 171 * @sk_sndmsg_off: cached offset for sendmsg 172 * @sk_send_head: front of stuff to transmit 173 * @sk_security: used by security modules 174 * @sk_write_pending: a write to stream socket waits to start 175 * @sk_state_change: callback to indicate change in the state of the sock 176 * @sk_data_ready: callback to indicate there is data to be processed 177 * @sk_write_space: callback to indicate there is bf sending space available 178 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE) 179 * @sk_backlog_rcv: callback to process the backlog 180 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 181 */ 182struct sock { 183 /* 184 * Now struct inet_timewait_sock also uses sock_common, so please just 185 * don't add nothing before this first member (__sk_common) --acme 186 */ 187 struct sock_common __sk_common; 188#define sk_family __sk_common.skc_family 189#define sk_state __sk_common.skc_state 190#define sk_reuse __sk_common.skc_reuse 191#define sk_bound_dev_if __sk_common.skc_bound_dev_if 192#define sk_node __sk_common.skc_node 193#define sk_bind_node __sk_common.skc_bind_node 194#define sk_refcnt __sk_common.skc_refcnt 195#define sk_hash __sk_common.skc_hash 196#define sk_prot __sk_common.skc_prot 197 unsigned char sk_shutdown : 2, 198 sk_no_check : 2, 199 sk_userlocks : 4; 200 unsigned char sk_protocol; 201 unsigned short sk_type; 202 int sk_rcvbuf; 203 socket_lock_t sk_lock; 204 wait_queue_head_t *sk_sleep; 205 struct dst_entry *sk_dst_cache; 206 struct xfrm_policy *sk_policy[2]; 207 rwlock_t sk_dst_lock; 208 atomic_t sk_rmem_alloc; 209 atomic_t sk_wmem_alloc; 210 atomic_t sk_omem_alloc; 211 struct sk_buff_head sk_receive_queue; 212 struct sk_buff_head sk_write_queue; 213 struct sk_buff_head sk_async_wait_queue; 214 int sk_wmem_queued; 215 int sk_forward_alloc; 216 gfp_t sk_allocation; 217 int sk_sndbuf; 218 int sk_route_caps; 219 int sk_gso_type; 220 int sk_rcvlowat; 221 unsigned long sk_flags; 222 unsigned long sk_lingertime; 223 /* 224 * The backlog queue is special, it is always used with 225 * the per-socket spinlock held and requires low latency 226 * access. Therefore we special case it's implementation. 227 */ 228 struct { 229 struct sk_buff *head; 230 struct sk_buff *tail; 231 } sk_backlog; 232 struct sk_buff_head sk_error_queue; 233 struct proto *sk_prot_creator; 234 rwlock_t sk_callback_lock; 235 int sk_err, 236 sk_err_soft; 237 unsigned short sk_ack_backlog; 238 unsigned short sk_max_ack_backlog; 239 __u32 sk_priority; 240 struct ucred sk_peercred; 241 long sk_rcvtimeo; 242 long sk_sndtimeo; 243 struct sk_filter *sk_filter; 244 void *sk_protinfo; 245 struct timer_list sk_timer; 246 struct timeval sk_stamp; 247 struct socket *sk_socket; 248 void *sk_user_data; 249 struct page *sk_sndmsg_page; 250 struct sk_buff *sk_send_head; 251 __u32 sk_sndmsg_off; 252 int sk_write_pending; 253 void *sk_security; 254 void (*sk_state_change)(struct sock *sk); 255 void (*sk_data_ready)(struct sock *sk, int bytes); 256 void (*sk_write_space)(struct sock *sk); 257 void (*sk_error_report)(struct sock *sk); 258 int (*sk_backlog_rcv)(struct sock *sk, 259 struct sk_buff *skb); 260 void (*sk_destruct)(struct sock *sk); 261}; 262 263/* 264 * Hashed lists helper routines 265 */ 266static inline struct sock *__sk_head(const struct hlist_head *head) 267{ 268 return hlist_entry(head->first, struct sock, sk_node); 269} 270 271static inline struct sock *sk_head(const struct hlist_head *head) 272{ 273 return hlist_empty(head) ? NULL : __sk_head(head); 274} 275 276static inline struct sock *sk_next(const struct sock *sk) 277{ 278 return sk->sk_node.next ? 279 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; 280} 281 282static inline int sk_unhashed(const struct sock *sk) 283{ 284 return hlist_unhashed(&sk->sk_node); 285} 286 287static inline int sk_hashed(const struct sock *sk) 288{ 289 return !sk_unhashed(sk); 290} 291 292static __inline__ void sk_node_init(struct hlist_node *node) 293{ 294 node->pprev = NULL; 295} 296 297static __inline__ void __sk_del_node(struct sock *sk) 298{ 299 __hlist_del(&sk->sk_node); 300} 301 302static __inline__ int __sk_del_node_init(struct sock *sk) 303{ 304 if (sk_hashed(sk)) { 305 __sk_del_node(sk); 306 sk_node_init(&sk->sk_node); 307 return 1; 308 } 309 return 0; 310} 311 312/* Grab socket reference count. This operation is valid only 313 when sk is ALREADY grabbed f.e. it is found in hash table 314 or a list and the lookup is made under lock preventing hash table 315 modifications. 316 */ 317 318static inline void sock_hold(struct sock *sk) 319{ 320 atomic_inc(&sk->sk_refcnt); 321} 322 323/* Ungrab socket in the context, which assumes that socket refcnt 324 cannot hit zero, f.e. it is true in context of any socketcall. 325 */ 326static inline void __sock_put(struct sock *sk) 327{ 328 atomic_dec(&sk->sk_refcnt); 329} 330 331static __inline__ int sk_del_node_init(struct sock *sk) 332{ 333 int rc = __sk_del_node_init(sk); 334 335 if (rc) { 336 /* paranoid for a while -acme */ 337 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 338 __sock_put(sk); 339 } 340 return rc; 341} 342 343static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) 344{ 345 hlist_add_head(&sk->sk_node, list); 346} 347 348static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) 349{ 350 sock_hold(sk); 351 __sk_add_node(sk, list); 352} 353 354static __inline__ void __sk_del_bind_node(struct sock *sk) 355{ 356 __hlist_del(&sk->sk_bind_node); 357} 358 359static __inline__ void sk_add_bind_node(struct sock *sk, 360 struct hlist_head *list) 361{ 362 hlist_add_head(&sk->sk_bind_node, list); 363} 364 365#define sk_for_each(__sk, node, list) \ 366 hlist_for_each_entry(__sk, node, list, sk_node) 367#define sk_for_each_from(__sk, node) \ 368 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 369 hlist_for_each_entry_from(__sk, node, sk_node) 370#define sk_for_each_continue(__sk, node) \ 371 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 372 hlist_for_each_entry_continue(__sk, node, sk_node) 373#define sk_for_each_safe(__sk, node, tmp, list) \ 374 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) 375#define sk_for_each_bound(__sk, node, list) \ 376 hlist_for_each_entry(__sk, node, list, sk_bind_node) 377 378/* Sock flags */ 379enum sock_flags { 380 SOCK_DEAD, 381 SOCK_DONE, 382 SOCK_URGINLINE, 383 SOCK_KEEPOPEN, 384 SOCK_LINGER, 385 SOCK_DESTROY, 386 SOCK_BROADCAST, 387 SOCK_TIMESTAMP, 388 SOCK_ZAPPED, 389 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 390 SOCK_DBG, /* %SO_DEBUG setting */ 391 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ 392 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 393 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 394}; 395 396static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 397{ 398 nsk->sk_flags = osk->sk_flags; 399} 400 401static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) 402{ 403 __set_bit(flag, &sk->sk_flags); 404} 405 406static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) 407{ 408 __clear_bit(flag, &sk->sk_flags); 409} 410 411static inline int sock_flag(struct sock *sk, enum sock_flags flag) 412{ 413 return test_bit(flag, &sk->sk_flags); 414} 415 416static inline void sk_acceptq_removed(struct sock *sk) 417{ 418 sk->sk_ack_backlog--; 419} 420 421static inline void sk_acceptq_added(struct sock *sk) 422{ 423 sk->sk_ack_backlog++; 424} 425 426static inline int sk_acceptq_is_full(struct sock *sk) 427{ 428 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; 429} 430 431/* 432 * Compute minimal free write space needed to queue new packets. 433 */ 434static inline int sk_stream_min_wspace(struct sock *sk) 435{ 436 return sk->sk_wmem_queued / 2; 437} 438 439static inline int sk_stream_wspace(struct sock *sk) 440{ 441 return sk->sk_sndbuf - sk->sk_wmem_queued; 442} 443 444extern void sk_stream_write_space(struct sock *sk); 445 446static inline int sk_stream_memory_free(struct sock *sk) 447{ 448 return sk->sk_wmem_queued < sk->sk_sndbuf; 449} 450 451extern void sk_stream_rfree(struct sk_buff *skb); 452 453static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk) 454{ 455 skb->sk = sk; 456 skb->destructor = sk_stream_rfree; 457 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 458 sk->sk_forward_alloc -= skb->truesize; 459} 460 461static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb) 462{ 463 skb_truesize_check(skb); 464 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 465 sk->sk_wmem_queued -= skb->truesize; 466 sk->sk_forward_alloc += skb->truesize; 467 __kfree_skb(skb); 468} 469 470/* The per-socket spinlock must be held here. */ 471static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) 472{ 473 if (!sk->sk_backlog.tail) { 474 sk->sk_backlog.head = sk->sk_backlog.tail = skb; 475 } else { 476 sk->sk_backlog.tail->next = skb; 477 sk->sk_backlog.tail = skb; 478 } 479 skb->next = NULL; 480} 481 482#define sk_wait_event(__sk, __timeo, __condition) \ 483({ int rc; \ 484 release_sock(__sk); \ 485 rc = __condition; \ 486 if (!rc) { \ 487 *(__timeo) = schedule_timeout(*(__timeo)); \ 488 } \ 489 lock_sock(__sk); \ 490 rc = __condition; \ 491 rc; \ 492}) 493 494extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); 495extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); 496extern void sk_stream_wait_close(struct sock *sk, long timeo_p); 497extern int sk_stream_error(struct sock *sk, int flags, int err); 498extern void sk_stream_kill_queues(struct sock *sk); 499 500extern int sk_wait_data(struct sock *sk, long *timeo); 501 502struct request_sock_ops; 503struct timewait_sock_ops; 504 505/* Networking protocol blocks we attach to sockets. 506 * socket layer -> transport layer interface 507 * transport -> network interface is defined by struct inet_proto 508 */ 509struct proto { 510 void (*close)(struct sock *sk, 511 long timeout); 512 int (*connect)(struct sock *sk, 513 struct sockaddr *uaddr, 514 int addr_len); 515 int (*disconnect)(struct sock *sk, int flags); 516 517 struct sock * (*accept) (struct sock *sk, int flags, int *err); 518 519 int (*ioctl)(struct sock *sk, int cmd, 520 unsigned long arg); 521 int (*init)(struct sock *sk); 522 int (*destroy)(struct sock *sk); 523 void (*shutdown)(struct sock *sk, int how); 524 int (*setsockopt)(struct sock *sk, int level, 525 int optname, char __user *optval, 526 int optlen); 527 int (*getsockopt)(struct sock *sk, int level, 528 int optname, char __user *optval, 529 int __user *option); 530 int (*compat_setsockopt)(struct sock *sk, 531 int level, 532 int optname, char __user *optval, 533 int optlen); 534 int (*compat_getsockopt)(struct sock *sk, 535 int level, 536 int optname, char __user *optval, 537 int __user *option); 538 int (*sendmsg)(struct kiocb *iocb, struct sock *sk, 539 struct msghdr *msg, size_t len); 540 int (*recvmsg)(struct kiocb *iocb, struct sock *sk, 541 struct msghdr *msg, 542 size_t len, int noblock, int flags, 543 int *addr_len); 544 int (*sendpage)(struct sock *sk, struct page *page, 545 int offset, size_t size, int flags); 546 int (*bind)(struct sock *sk, 547 struct sockaddr *uaddr, int addr_len); 548 549 int (*backlog_rcv) (struct sock *sk, 550 struct sk_buff *skb); 551 552 /* Keeping track of sk's, looking them up, and port selection methods. */ 553 void (*hash)(struct sock *sk); 554 void (*unhash)(struct sock *sk); 555 int (*get_port)(struct sock *sk, unsigned short snum); 556 557 /* Memory pressure */ 558 void (*enter_memory_pressure)(void); 559 atomic_t *memory_allocated; /* Current allocated memory. */ 560 atomic_t *sockets_allocated; /* Current number of sockets. */ 561 /* 562 * Pressure flag: try to collapse. 563 * Technical note: it is used by multiple contexts non atomically. 564 * All the sk_stream_mem_schedule() is of this nature: accounting 565 * is strict, actions are advisory and have some latency. 566 */ 567 int *memory_pressure; 568 int *sysctl_mem; 569 int *sysctl_wmem; 570 int *sysctl_rmem; 571 int max_header; 572 573 kmem_cache_t *slab; 574 unsigned int obj_size; 575 576 atomic_t *orphan_count; 577 578 struct request_sock_ops *rsk_prot; 579 struct timewait_sock_ops *twsk_prot; 580 581 struct module *owner; 582 583 char name[32]; 584 585 struct list_head node; 586#ifdef SOCK_REFCNT_DEBUG 587 atomic_t socks; 588#endif 589 struct { 590 int inuse; 591 u8 __pad[SMP_CACHE_BYTES - sizeof(int)]; 592 } stats[NR_CPUS]; 593}; 594 595extern int proto_register(struct proto *prot, int alloc_slab); 596extern void proto_unregister(struct proto *prot); 597 598#ifdef SOCK_REFCNT_DEBUG 599static inline void sk_refcnt_debug_inc(struct sock *sk) 600{ 601 atomic_inc(&sk->sk_prot->socks); 602} 603 604static inline void sk_refcnt_debug_dec(struct sock *sk) 605{ 606 atomic_dec(&sk->sk_prot->socks); 607 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n", 608 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); 609} 610 611static inline void sk_refcnt_debug_release(const struct sock *sk) 612{ 613 if (atomic_read(&sk->sk_refcnt) != 1) 614 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", 615 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt)); 616} 617#else /* SOCK_REFCNT_DEBUG */ 618#define sk_refcnt_debug_inc(sk) do { } while (0) 619#define sk_refcnt_debug_dec(sk) do { } while (0) 620#define sk_refcnt_debug_release(sk) do { } while (0) 621#endif /* SOCK_REFCNT_DEBUG */ 622 623/* Called with local bh disabled */ 624static __inline__ void sock_prot_inc_use(struct proto *prot) 625{ 626 prot->stats[smp_processor_id()].inuse++; 627} 628 629static __inline__ void sock_prot_dec_use(struct proto *prot) 630{ 631 prot->stats[smp_processor_id()].inuse--; 632} 633 634/* With per-bucket locks this operation is not-atomic, so that 635 * this version is not worse. 636 */ 637static inline void __sk_prot_rehash(struct sock *sk) 638{ 639 sk->sk_prot->unhash(sk); 640 sk->sk_prot->hash(sk); 641} 642 643/* About 10 seconds */ 644#define SOCK_DESTROY_TIME (10*HZ) 645 646/* Sockets 0-1023 can't be bound to unless you are superuser */ 647#define PROT_SOCK 1024 648 649#define SHUTDOWN_MASK 3 650#define RCV_SHUTDOWN 1 651#define SEND_SHUTDOWN 2 652 653#define SOCK_SNDBUF_LOCK 1 654#define SOCK_RCVBUF_LOCK 2 655#define SOCK_BINDADDR_LOCK 4 656#define SOCK_BINDPORT_LOCK 8 657 658/* sock_iocb: used to kick off async processing of socket ios */ 659struct sock_iocb { 660 struct list_head list; 661 662 int flags; 663 int size; 664 struct socket *sock; 665 struct sock *sk; 666 struct scm_cookie *scm; 667 struct msghdr *msg, async_msg; 668 struct kiocb *kiocb; 669}; 670 671static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb) 672{ 673 return (struct sock_iocb *)iocb->private; 674} 675 676static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si) 677{ 678 return si->kiocb; 679} 680 681struct socket_alloc { 682 struct socket socket; 683 struct inode vfs_inode; 684}; 685 686static inline struct socket *SOCKET_I(struct inode *inode) 687{ 688 return &container_of(inode, struct socket_alloc, vfs_inode)->socket; 689} 690 691static inline struct inode *SOCK_INODE(struct socket *socket) 692{ 693 return &container_of(socket, struct socket_alloc, socket)->vfs_inode; 694} 695 696extern void __sk_stream_mem_reclaim(struct sock *sk); 697extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind); 698 699#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE) 700 701static inline int sk_stream_pages(int amt) 702{ 703 return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM; 704} 705 706static inline void sk_stream_mem_reclaim(struct sock *sk) 707{ 708 if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) 709 __sk_stream_mem_reclaim(sk); 710} 711 712static inline void sk_stream_writequeue_purge(struct sock *sk) 713{ 714 struct sk_buff *skb; 715 716 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) 717 sk_stream_free_skb(sk, skb); 718 sk_stream_mem_reclaim(sk); 719} 720 721static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) 722{ 723 return (int)skb->truesize <= sk->sk_forward_alloc || 724 sk_stream_mem_schedule(sk, skb->truesize, 1); 725} 726 727static inline int sk_stream_wmem_schedule(struct sock *sk, int size) 728{ 729 return size <= sk->sk_forward_alloc || 730 sk_stream_mem_schedule(sk, size, 0); 731} 732 733/* Used by processes to "lock" a socket state, so that 734 * interrupts and bottom half handlers won't change it 735 * from under us. It essentially blocks any incoming 736 * packets, so that we won't get any new data or any 737 * packets that change the state of the socket. 738 * 739 * While locked, BH processing will add new packets to 740 * the backlog queue. This queue is processed by the 741 * owner of the socket lock right before it is released. 742 * 743 * Since ~2.3.5 it is also exclusive sleep lock serializing 744 * accesses from user process context. 745 */ 746#define sock_owned_by_user(sk) ((sk)->sk_lock.owner) 747 748extern void FASTCALL(lock_sock(struct sock *sk)); 749extern void FASTCALL(release_sock(struct sock *sk)); 750 751/* BH context may only use the following locking interface. */ 752#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) 753#define bh_lock_sock_nested(__sk) \ 754 spin_lock_nested(&((__sk)->sk_lock.slock), \ 755 SINGLE_DEPTH_NESTING) 756#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 757 758extern struct sock *sk_alloc(int family, 759 gfp_t priority, 760 struct proto *prot, int zero_it); 761extern void sk_free(struct sock *sk); 762extern struct sock *sk_clone(const struct sock *sk, 763 const gfp_t priority); 764 765extern struct sk_buff *sock_wmalloc(struct sock *sk, 766 unsigned long size, int force, 767 gfp_t priority); 768extern struct sk_buff *sock_rmalloc(struct sock *sk, 769 unsigned long size, int force, 770 gfp_t priority); 771extern void sock_wfree(struct sk_buff *skb); 772extern void sock_rfree(struct sk_buff *skb); 773 774extern int sock_setsockopt(struct socket *sock, int level, 775 int op, char __user *optval, 776 int optlen); 777 778extern int sock_getsockopt(struct socket *sock, int level, 779 int op, char __user *optval, 780 int __user *optlen); 781extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, 782 unsigned long size, 783 int noblock, 784 int *errcode); 785extern void *sock_kmalloc(struct sock *sk, int size, 786 gfp_t priority); 787extern void sock_kfree_s(struct sock *sk, void *mem, int size); 788extern void sk_send_sigurg(struct sock *sk); 789 790/* 791 * Functions to fill in entries in struct proto_ops when a protocol 792 * does not implement a particular function. 793 */ 794extern int sock_no_bind(struct socket *, 795 struct sockaddr *, int); 796extern int sock_no_connect(struct socket *, 797 struct sockaddr *, int, int); 798extern int sock_no_socketpair(struct socket *, 799 struct socket *); 800extern int sock_no_accept(struct socket *, 801 struct socket *, int); 802extern int sock_no_getname(struct socket *, 803 struct sockaddr *, int *, int); 804extern unsigned int sock_no_poll(struct file *, struct socket *, 805 struct poll_table_struct *); 806extern int sock_no_ioctl(struct socket *, unsigned int, 807 unsigned long); 808extern int sock_no_listen(struct socket *, int); 809extern int sock_no_shutdown(struct socket *, int); 810extern int sock_no_getsockopt(struct socket *, int , int, 811 char __user *, int __user *); 812extern int sock_no_setsockopt(struct socket *, int, int, 813 char __user *, int); 814extern int sock_no_sendmsg(struct kiocb *, struct socket *, 815 struct msghdr *, size_t); 816extern int sock_no_recvmsg(struct kiocb *, struct socket *, 817 struct msghdr *, size_t, int); 818extern int sock_no_mmap(struct file *file, 819 struct socket *sock, 820 struct vm_area_struct *vma); 821extern ssize_t sock_no_sendpage(struct socket *sock, 822 struct page *page, 823 int offset, size_t size, 824 int flags); 825 826/* 827 * Functions to fill in entries in struct proto_ops when a protocol 828 * uses the inet style. 829 */ 830extern int sock_common_getsockopt(struct socket *sock, int level, int optname, 831 char __user *optval, int __user *optlen); 832extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 833 struct msghdr *msg, size_t size, int flags); 834extern int sock_common_setsockopt(struct socket *sock, int level, int optname, 835 char __user *optval, int optlen); 836extern int compat_sock_common_getsockopt(struct socket *sock, int level, 837 int optname, char __user *optval, int __user *optlen); 838extern int compat_sock_common_setsockopt(struct socket *sock, int level, 839 int optname, char __user *optval, int optlen); 840 841extern void sk_common_release(struct sock *sk); 842 843/* 844 * Default socket callbacks and setup code 845 */ 846 847/* Initialise core socket variables */ 848extern void sock_init_data(struct socket *sock, struct sock *sk); 849 850/** 851 * sk_filter - run a packet through a socket filter 852 * @sk: sock associated with &sk_buff 853 * @skb: buffer to filter 854 * @needlock: set to 1 if the sock is not locked by caller. 855 * 856 * Run the filter code and then cut skb->data to correct size returned by 857 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller 858 * than pkt_len we keep whole skb->data. This is the socket level 859 * wrapper to sk_run_filter. It returns 0 if the packet should 860 * be accepted or -EPERM if the packet should be tossed. 861 * 862 */ 863 864static inline int sk_filter(struct sock *sk, struct sk_buff *skb) 865{ 866 int err; 867 struct sk_filter *filter; 868 869 err = security_sock_rcv_skb(sk, skb); 870 if (err) 871 return err; 872 873 rcu_read_lock_bh(); 874 filter = sk->sk_filter; 875 if (filter) { 876 unsigned int pkt_len = sk_run_filter(skb, filter->insns, 877 filter->len); 878 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; 879 } 880 rcu_read_unlock_bh(); 881 882 return err; 883} 884 885/** 886 * sk_filter_release: Release a socket filter 887 * @rcu: rcu_head that contains the sk_filter info to remove 888 * 889 * Remove a filter from a socket and release its resources. 890 */ 891 892static inline void sk_filter_rcu_free(struct rcu_head *rcu) 893{ 894 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 895 kfree(fp); 896} 897 898static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp) 899{ 900 unsigned int size = sk_filter_len(fp); 901 902 atomic_sub(size, &sk->sk_omem_alloc); 903 904 if (atomic_dec_and_test(&fp->refcnt)) 905 call_rcu_bh(&fp->rcu, sk_filter_rcu_free); 906} 907 908static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) 909{ 910 atomic_inc(&fp->refcnt); 911 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); 912} 913 914/* 915 * Socket reference counting postulates. 916 * 917 * * Each user of socket SHOULD hold a reference count. 918 * * Each access point to socket (an hash table bucket, reference from a list, 919 * running timer, skb in flight MUST hold a reference count. 920 * * When reference count hits 0, it means it will never increase back. 921 * * When reference count hits 0, it means that no references from 922 * outside exist to this socket and current process on current CPU 923 * is last user and may/should destroy this socket. 924 * * sk_free is called from any context: process, BH, IRQ. When 925 * it is called, socket has no references from outside -> sk_free 926 * may release descendant resources allocated by the socket, but 927 * to the time when it is called, socket is NOT referenced by any 928 * hash tables, lists etc. 929 * * Packets, delivered from outside (from network or from another process) 930 * and enqueued on receive/error queues SHOULD NOT grab reference count, 931 * when they sit in queue. Otherwise, packets will leak to hole, when 932 * socket is looked up by one cpu and unhasing is made by another CPU. 933 * It is true for udp/raw, netlink (leak to receive and error queues), tcp 934 * (leak to backlog). Packet socket does all the processing inside 935 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets 936 * use separate SMP lock, so that they are prone too. 937 */ 938 939/* Ungrab socket and destroy it, if it was the last reference. */ 940static inline void sock_put(struct sock *sk) 941{ 942 if (atomic_dec_and_test(&sk->sk_refcnt)) 943 sk_free(sk); 944} 945 946extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb); 947 948/* Detach socket from process context. 949 * Announce socket dead, detach it from wait queue and inode. 950 * Note that parent inode held reference count on this struct sock, 951 * we do not release it in this function, because protocol 952 * probably wants some additional cleanups or even continuing 953 * to work with this socket (TCP). 954 */ 955static inline void sock_orphan(struct sock *sk) 956{ 957 write_lock_bh(&sk->sk_callback_lock); 958 sock_set_flag(sk, SOCK_DEAD); 959 sk->sk_socket = NULL; 960 sk->sk_sleep = NULL; 961 write_unlock_bh(&sk->sk_callback_lock); 962} 963 964static inline void sock_graft(struct sock *sk, struct socket *parent) 965{ 966 write_lock_bh(&sk->sk_callback_lock); 967 sk->sk_sleep = &parent->wait; 968 parent->sk = sk; 969 sk->sk_socket = parent; 970 security_sock_graft(sk, parent); 971 write_unlock_bh(&sk->sk_callback_lock); 972} 973 974static inline void sock_copy(struct sock *nsk, const struct sock *osk) 975{ 976#ifdef CONFIG_SECURITY_NETWORK 977 void *sptr = nsk->sk_security; 978#endif 979 980 memcpy(nsk, osk, osk->sk_prot->obj_size); 981#ifdef CONFIG_SECURITY_NETWORK 982 nsk->sk_security = sptr; 983 security_sk_clone(osk, nsk); 984#endif 985} 986 987extern int sock_i_uid(struct sock *sk); 988extern unsigned long sock_i_ino(struct sock *sk); 989 990static inline struct dst_entry * 991__sk_dst_get(struct sock *sk) 992{ 993 return sk->sk_dst_cache; 994} 995 996static inline struct dst_entry * 997sk_dst_get(struct sock *sk) 998{ 999 struct dst_entry *dst; 1000 1001 read_lock(&sk->sk_dst_lock); 1002 dst = sk->sk_dst_cache; 1003 if (dst) 1004 dst_hold(dst); 1005 read_unlock(&sk->sk_dst_lock); 1006 return dst; 1007} 1008 1009static inline void 1010__sk_dst_set(struct sock *sk, struct dst_entry *dst) 1011{ 1012 struct dst_entry *old_dst; 1013 1014 old_dst = sk->sk_dst_cache; 1015 sk->sk_dst_cache = dst; 1016 dst_release(old_dst); 1017} 1018 1019static inline void 1020sk_dst_set(struct sock *sk, struct dst_entry *dst) 1021{ 1022 write_lock(&sk->sk_dst_lock); 1023 __sk_dst_set(sk, dst); 1024 write_unlock(&sk->sk_dst_lock); 1025} 1026 1027static inline void 1028__sk_dst_reset(struct sock *sk) 1029{ 1030 struct dst_entry *old_dst; 1031 1032 old_dst = sk->sk_dst_cache; 1033 sk->sk_dst_cache = NULL; 1034 dst_release(old_dst); 1035} 1036 1037static inline void 1038sk_dst_reset(struct sock *sk) 1039{ 1040 write_lock(&sk->sk_dst_lock); 1041 __sk_dst_reset(sk); 1042 write_unlock(&sk->sk_dst_lock); 1043} 1044 1045extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1046 1047extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); 1048 1049static inline int sk_can_gso(const struct sock *sk) 1050{ 1051 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); 1052} 1053 1054static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1055{ 1056 __sk_dst_set(sk, dst); 1057 sk->sk_route_caps = dst->dev->features; 1058 if (sk->sk_route_caps & NETIF_F_GSO) 1059 sk->sk_route_caps |= NETIF_F_GSO_MASK; 1060 if (sk_can_gso(sk)) { 1061 if (dst->header_len) 1062 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1063 else 1064 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1065 } 1066} 1067 1068static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb) 1069{ 1070 sk->sk_wmem_queued += skb->truesize; 1071 sk->sk_forward_alloc -= skb->truesize; 1072} 1073 1074static inline int skb_copy_to_page(struct sock *sk, char __user *from, 1075 struct sk_buff *skb, struct page *page, 1076 int off, int copy) 1077{ 1078 if (skb->ip_summed == CHECKSUM_NONE) { 1079 int err = 0; 1080 unsigned int csum = csum_and_copy_from_user(from, 1081 page_address(page) + off, 1082 copy, 0, &err); 1083 if (err) 1084 return err; 1085 skb->csum = csum_block_add(skb->csum, csum, skb->len); 1086 } else if (copy_from_user(page_address(page) + off, from, copy)) 1087 return -EFAULT; 1088 1089 skb->len += copy; 1090 skb->data_len += copy; 1091 skb->truesize += copy; 1092 sk->sk_wmem_queued += copy; 1093 sk->sk_forward_alloc -= copy; 1094 return 0; 1095} 1096 1097/* 1098 * Queue a received datagram if it will fit. Stream and sequenced 1099 * protocols can't normally use this as they need to fit buffers in 1100 * and play with them. 1101 * 1102 * Inlined as it's very short and called for pretty much every 1103 * packet ever received. 1104 */ 1105 1106static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 1107{ 1108 sock_hold(sk); 1109 skb->sk = sk; 1110 skb->destructor = sock_wfree; 1111 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 1112} 1113 1114static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 1115{ 1116 skb->sk = sk; 1117 skb->destructor = sock_rfree; 1118 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 1119} 1120 1121extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1122 unsigned long expires); 1123 1124extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); 1125 1126extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 1127 1128static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 1129{ 1130 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 1131 number of warnings when compiling with -W --ANK 1132 */ 1133 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 1134 (unsigned)sk->sk_rcvbuf) 1135 return -ENOMEM; 1136 skb_set_owner_r(skb, sk); 1137 skb_queue_tail(&sk->sk_error_queue, skb); 1138 if (!sock_flag(sk, SOCK_DEAD)) 1139 sk->sk_data_ready(sk, skb->len); 1140 return 0; 1141} 1142 1143/* 1144 * Recover an error report and clear atomically 1145 */ 1146 1147static inline int sock_error(struct sock *sk) 1148{ 1149 int err; 1150 if (likely(!sk->sk_err)) 1151 return 0; 1152 err = xchg(&sk->sk_err, 0); 1153 return -err; 1154} 1155 1156static inline unsigned long sock_wspace(struct sock *sk) 1157{ 1158 int amt = 0; 1159 1160 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 1161 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1162 if (amt < 0) 1163 amt = 0; 1164 } 1165 return amt; 1166} 1167 1168static inline void sk_wake_async(struct sock *sk, int how, int band) 1169{ 1170 if (sk->sk_socket && sk->sk_socket->fasync_list) 1171 sock_wake_async(sk->sk_socket, how, band); 1172} 1173 1174#define SOCK_MIN_SNDBUF 2048 1175#define SOCK_MIN_RCVBUF 256 1176 1177static inline void sk_stream_moderate_sndbuf(struct sock *sk) 1178{ 1179 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { 1180 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2); 1181 sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); 1182 } 1183} 1184 1185static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, 1186 int size, int mem, 1187 gfp_t gfp) 1188{ 1189 struct sk_buff *skb; 1190 int hdr_len; 1191 1192 hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header); 1193 skb = alloc_skb_fclone(size + hdr_len, gfp); 1194 if (skb) { 1195 skb->truesize += mem; 1196 if (sk_stream_wmem_schedule(sk, skb->truesize)) { 1197 skb_reserve(skb, hdr_len); 1198 return skb; 1199 } 1200 __kfree_skb(skb); 1201 } else { 1202 sk->sk_prot->enter_memory_pressure(); 1203 sk_stream_moderate_sndbuf(sk); 1204 } 1205 return NULL; 1206} 1207 1208static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, 1209 int size, 1210 gfp_t gfp) 1211{ 1212 return sk_stream_alloc_pskb(sk, size, 0, gfp); 1213} 1214 1215static inline struct page *sk_stream_alloc_page(struct sock *sk) 1216{ 1217 struct page *page = NULL; 1218 1219 page = alloc_pages(sk->sk_allocation, 0); 1220 if (!page) { 1221 sk->sk_prot->enter_memory_pressure(); 1222 sk_stream_moderate_sndbuf(sk); 1223 } 1224 return page; 1225} 1226 1227#define sk_stream_for_retrans_queue(skb, sk) \ 1228 for (skb = (sk)->sk_write_queue.next; \ 1229 (skb != (sk)->sk_send_head) && \ 1230 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ 1231 skb = skb->next) 1232 1233/*from STCP for fast SACK Process*/ 1234#define sk_stream_for_retrans_queue_from(skb, sk) \ 1235 for (; (skb != (sk)->sk_send_head) && \ 1236 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ 1237 skb = skb->next) 1238 1239/* 1240 * Default write policy as shown to user space via poll/select/SIGIO 1241 */ 1242static inline int sock_writeable(const struct sock *sk) 1243{ 1244 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); 1245} 1246 1247static inline gfp_t gfp_any(void) 1248{ 1249 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 1250} 1251 1252static inline long sock_rcvtimeo(const struct sock *sk, int noblock) 1253{ 1254 return noblock ? 0 : sk->sk_rcvtimeo; 1255} 1256 1257static inline long sock_sndtimeo(const struct sock *sk, int noblock) 1258{ 1259 return noblock ? 0 : sk->sk_sndtimeo; 1260} 1261 1262static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) 1263{ 1264 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; 1265} 1266 1267/* Alas, with timeout socket operations are not restartable. 1268 * Compare this to poll(). 1269 */ 1270static inline int sock_intr_errno(long timeo) 1271{ 1272 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; 1273} 1274 1275static __inline__ void 1276sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 1277{ 1278 struct timeval stamp; 1279 1280 skb_get_timestamp(skb, &stamp); 1281 if (sock_flag(sk, SOCK_RCVTSTAMP)) { 1282 /* Race occurred between timestamp enabling and packet 1283 receiving. Fill in the current time for now. */ 1284 if (stamp.tv_sec == 0) 1285 do_gettimeofday(&stamp); 1286 skb_set_timestamp(skb, &stamp); 1287 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval), 1288 &stamp); 1289 } else 1290 sk->sk_stamp = stamp; 1291} 1292 1293/** 1294 * sk_eat_skb - Release a skb if it is no longer needed 1295 * @sk: socket to eat this skb from 1296 * @skb: socket buffer to eat 1297 * @copied_early: flag indicating whether DMA operations copied this data early 1298 * 1299 * This routine must be called with interrupts disabled or with the socket 1300 * locked so that the sk_buff queue operation is ok. 1301*/ 1302#ifdef CONFIG_NET_DMA 1303static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) 1304{ 1305 __skb_unlink(skb, &sk->sk_receive_queue); 1306 if (!copied_early) 1307 __kfree_skb(skb); 1308 else 1309 __skb_queue_tail(&sk->sk_async_wait_queue, skb); 1310} 1311#else 1312static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) 1313{ 1314 __skb_unlink(skb, &sk->sk_receive_queue); 1315 __kfree_skb(skb); 1316} 1317#endif 1318 1319extern void sock_enable_timestamp(struct sock *sk); 1320extern int sock_get_timestamp(struct sock *, struct timeval __user *); 1321 1322/* 1323 * Enable debug/info messages 1324 */ 1325 1326#ifdef CONFIG_NETDEBUG 1327#define NETDEBUG(fmt, args...) printk(fmt,##args) 1328#define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0) 1329#else 1330#define NETDEBUG(fmt, args...) do { } while (0) 1331#define LIMIT_NETDEBUG(fmt, args...) do { } while(0) 1332#endif 1333 1334/* 1335 * Macros for sleeping on a socket. Use them like this: 1336 * 1337 * SOCK_SLEEP_PRE(sk) 1338 * if (condition) 1339 * schedule(); 1340 * SOCK_SLEEP_POST(sk) 1341 * 1342 * N.B. These are now obsolete and were, afaik, only ever used in DECnet 1343 * and when the last use of them in DECnet has gone, I'm intending to 1344 * remove them. 1345 */ 1346 1347#define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \ 1348 DECLARE_WAITQUEUE(wait, tsk); \ 1349 tsk->state = TASK_INTERRUPTIBLE; \ 1350 add_wait_queue((sk)->sk_sleep, &wait); \ 1351 release_sock(sk); 1352 1353#define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \ 1354 remove_wait_queue((sk)->sk_sleep, &wait); \ 1355 lock_sock(sk); \ 1356 } 1357 1358static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 1359{ 1360 if (valbool) 1361 sock_set_flag(sk, bit); 1362 else 1363 sock_reset_flag(sk, bit); 1364} 1365 1366extern __u32 sysctl_wmem_max; 1367extern __u32 sysctl_rmem_max; 1368 1369#ifdef CONFIG_NET 1370int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); 1371#else 1372static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) 1373{ 1374 return -ENODEV; 1375} 1376#endif 1377 1378extern void sk_init(void); 1379 1380#ifdef CONFIG_SYSCTL 1381extern struct ctl_table core_table[]; 1382#endif 1383 1384extern int sysctl_optmem_max; 1385 1386extern __u32 sysctl_wmem_default; 1387extern __u32 sysctl_rmem_default; 1388 1389#endif /* _SOCK_H */