Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at 71e1f55ad4bc4c8bcfe696400a950a34263a750e 1316 lines 38 kB view raw
1/* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the AF_INET socket handler. 7 * 8 * Version: @(#)sock.h 1.0.4 05/13/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche <flla@stud.uni-sb.de> 14 * 15 * Fixes: 16 * Alan Cox : Volatiles in skbuff pointers. See 17 * skbuff comments. May be overdone, 18 * better to prove they can be removed 19 * than the reverse. 20 * Alan Cox : Added a zapped field for tcp to note 21 * a socket is reset and must stay shut up 22 * Alan Cox : New fields for options 23 * Pauline Middelink : identd support 24 * Alan Cox : Eliminate low level recv/recvfrom 25 * David S. Miller : New socket lookup architecture. 26 * Steve Whitehouse: Default routines for sock_ops 27 * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made 28 * protinfo be just a void pointer, as the 29 * protocol specific parts were moved to 30 * respective headers and ipv4/v6, etc now 31 * use private slabcaches for its socks 32 * Pedro Hortas : New flags field for socket options 33 * 34 * 35 * This program is free software; you can redistribute it and/or 36 * modify it under the terms of the GNU General Public License 37 * as published by the Free Software Foundation; either version 38 * 2 of the License, or (at your option) any later version. 39 */ 40#ifndef _SOCK_H 41#define _SOCK_H 42 43#include <linux/config.h> 44#include <linux/list.h> 45#include <linux/timer.h> 46#include <linux/cache.h> 47#include <linux/module.h> 48#include <linux/netdevice.h> 49#include <linux/skbuff.h> /* struct sk_buff */ 50#include <linux/security.h> 51 52#include <linux/filter.h> 53 54#include <asm/atomic.h> 55#include <net/dst.h> 56#include <net/checksum.h> 57 58/* 59 * This structure really needs to be cleaned up. 60 * Most of it is for TCP, and not used by any of 61 * the other protocols. 62 */ 63 64/* Define this to get the SOCK_DBG debugging facility. */ 65#define SOCK_DEBUGGING 66#ifdef SOCK_DEBUGGING 67#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ 68 printk(KERN_DEBUG msg); } while (0) 69#else 70#define SOCK_DEBUG(sk, msg...) do { } while (0) 71#endif 72 73/* This is the per-socket lock. The spinlock provides a synchronization 74 * between user contexts and software interrupt processing, whereas the 75 * mini-semaphore synchronizes multiple users amongst themselves. 76 */ 77struct sock_iocb; 78typedef struct { 79 spinlock_t slock; 80 struct sock_iocb *owner; 81 wait_queue_head_t wq; 82} socket_lock_t; 83 84#define sock_lock_init(__sk) \ 85do { spin_lock_init(&((__sk)->sk_lock.slock)); \ 86 (__sk)->sk_lock.owner = NULL; \ 87 init_waitqueue_head(&((__sk)->sk_lock.wq)); \ 88} while(0) 89 90struct sock; 91 92/** 93 * struct sock_common - minimal network layer representation of sockets 94 * @skc_family: network address family 95 * @skc_state: Connection state 96 * @skc_reuse: %SO_REUSEADDR setting 97 * @skc_bound_dev_if: bound device index if != 0 98 * @skc_node: main hash linkage for various protocol lookup tables 99 * @skc_bind_node: bind hash linkage for various protocol lookup tables 100 * @skc_refcnt: reference count 101 * 102 * This is the minimal network layer representation of sockets, the header 103 * for struct sock and struct tcp_tw_bucket. 104 */ 105struct sock_common { 106 unsigned short skc_family; 107 volatile unsigned char skc_state; 108 unsigned char skc_reuse; 109 int skc_bound_dev_if; 110 struct hlist_node skc_node; 111 struct hlist_node skc_bind_node; 112 atomic_t skc_refcnt; 113}; 114 115/** 116 * struct sock - network layer representation of sockets 117 * @__sk_common: shared layout with tcp_tw_bucket 118 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN 119 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings 120 * @sk_lock: synchronizer 121 * @sk_rcvbuf: size of receive buffer in bytes 122 * @sk_sleep: sock wait queue 123 * @sk_dst_cache: destination cache 124 * @sk_dst_lock: destination cache lock 125 * @sk_policy: flow policy 126 * @sk_rmem_alloc: receive queue bytes committed 127 * @sk_receive_queue: incoming packets 128 * @sk_wmem_alloc: transmit queue bytes committed 129 * @sk_write_queue: Packet sending queue 130 * @sk_omem_alloc: "o" is "option" or "other" 131 * @sk_wmem_queued: persistent queue size 132 * @sk_forward_alloc: space allocated forward 133 * @sk_allocation: allocation mode 134 * @sk_sndbuf: size of send buffer in bytes 135 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings 136 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets 137 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 138 * @sk_lingertime: %SO_LINGER l_linger setting 139 * @sk_hashent: hash entry in several tables (e.g. tcp_ehash) 140 * @sk_backlog: always used with the per-socket spinlock held 141 * @sk_callback_lock: used with the callbacks in the end of this struct 142 * @sk_error_queue: rarely used 143 * @sk_prot: protocol handlers inside a network family 144 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance) 145 * @sk_err: last error 146 * @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out' 147 * @sk_ack_backlog: current listen backlog 148 * @sk_max_ack_backlog: listen backlog set in listen() 149 * @sk_priority: %SO_PRIORITY setting 150 * @sk_type: socket type (%SOCK_STREAM, etc) 151 * @sk_protocol: which protocol this socket belongs in this network family 152 * @sk_peercred: %SO_PEERCRED setting 153 * @sk_rcvlowat: %SO_RCVLOWAT setting 154 * @sk_rcvtimeo: %SO_RCVTIMEO setting 155 * @sk_sndtimeo: %SO_SNDTIMEO setting 156 * @sk_filter: socket filtering instructions 157 * @sk_protinfo: private area, net family specific, when not using slab 158 * @sk_timer: sock cleanup timer 159 * @sk_stamp: time stamp of last packet received 160 * @sk_socket: Identd and reporting IO signals 161 * @sk_user_data: RPC layer private data 162 * @sk_sndmsg_page: cached page for sendmsg 163 * @sk_sndmsg_off: cached offset for sendmsg 164 * @sk_send_head: front of stuff to transmit 165 * @sk_security: used by security modules 166 * @sk_write_pending: a write to stream socket waits to start 167 * @sk_state_change: callback to indicate change in the state of the sock 168 * @sk_data_ready: callback to indicate there is data to be processed 169 * @sk_write_space: callback to indicate there is bf sending space available 170 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE) 171 * @sk_backlog_rcv: callback to process the backlog 172 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 173 */ 174struct sock { 175 /* 176 * Now struct tcp_tw_bucket also uses sock_common, so please just 177 * don't add nothing before this first member (__sk_common) --acme 178 */ 179 struct sock_common __sk_common; 180#define sk_family __sk_common.skc_family 181#define sk_state __sk_common.skc_state 182#define sk_reuse __sk_common.skc_reuse 183#define sk_bound_dev_if __sk_common.skc_bound_dev_if 184#define sk_node __sk_common.skc_node 185#define sk_bind_node __sk_common.skc_bind_node 186#define sk_refcnt __sk_common.skc_refcnt 187 unsigned char sk_shutdown : 2, 188 sk_no_check : 2, 189 sk_userlocks : 4; 190 unsigned char sk_protocol; 191 unsigned short sk_type; 192 int sk_rcvbuf; 193 socket_lock_t sk_lock; 194 wait_queue_head_t *sk_sleep; 195 struct dst_entry *sk_dst_cache; 196 struct xfrm_policy *sk_policy[2]; 197 rwlock_t sk_dst_lock; 198 atomic_t sk_rmem_alloc; 199 atomic_t sk_wmem_alloc; 200 atomic_t sk_omem_alloc; 201 struct sk_buff_head sk_receive_queue; 202 struct sk_buff_head sk_write_queue; 203 int sk_wmem_queued; 204 int sk_forward_alloc; 205 unsigned int sk_allocation; 206 int sk_sndbuf; 207 int sk_route_caps; 208 int sk_hashent; 209 unsigned long sk_flags; 210 unsigned long sk_lingertime; 211 /* 212 * The backlog queue is special, it is always used with 213 * the per-socket spinlock held and requires low latency 214 * access. Therefore we special case it's implementation. 215 */ 216 struct { 217 struct sk_buff *head; 218 struct sk_buff *tail; 219 } sk_backlog; 220 struct sk_buff_head sk_error_queue; 221 struct proto *sk_prot; 222 struct proto *sk_prot_creator; 223 rwlock_t sk_callback_lock; 224 int sk_err, 225 sk_err_soft; 226 unsigned short sk_ack_backlog; 227 unsigned short sk_max_ack_backlog; 228 __u32 sk_priority; 229 struct ucred sk_peercred; 230 int sk_rcvlowat; 231 long sk_rcvtimeo; 232 long sk_sndtimeo; 233 struct sk_filter *sk_filter; 234 void *sk_protinfo; 235 struct timer_list sk_timer; 236 struct timeval sk_stamp; 237 struct socket *sk_socket; 238 void *sk_user_data; 239 struct page *sk_sndmsg_page; 240 struct sk_buff *sk_send_head; 241 __u32 sk_sndmsg_off; 242 int sk_write_pending; 243 void *sk_security; 244 void (*sk_state_change)(struct sock *sk); 245 void (*sk_data_ready)(struct sock *sk, int bytes); 246 void (*sk_write_space)(struct sock *sk); 247 void (*sk_error_report)(struct sock *sk); 248 int (*sk_backlog_rcv)(struct sock *sk, 249 struct sk_buff *skb); 250 void (*sk_destruct)(struct sock *sk); 251}; 252 253/* 254 * Hashed lists helper routines 255 */ 256static inline struct sock *__sk_head(struct hlist_head *head) 257{ 258 return hlist_entry(head->first, struct sock, sk_node); 259} 260 261static inline struct sock *sk_head(struct hlist_head *head) 262{ 263 return hlist_empty(head) ? NULL : __sk_head(head); 264} 265 266static inline struct sock *sk_next(struct sock *sk) 267{ 268 return sk->sk_node.next ? 269 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; 270} 271 272static inline int sk_unhashed(struct sock *sk) 273{ 274 return hlist_unhashed(&sk->sk_node); 275} 276 277static inline int sk_hashed(struct sock *sk) 278{ 279 return sk->sk_node.pprev != NULL; 280} 281 282static __inline__ void sk_node_init(struct hlist_node *node) 283{ 284 node->pprev = NULL; 285} 286 287static __inline__ void __sk_del_node(struct sock *sk) 288{ 289 __hlist_del(&sk->sk_node); 290} 291 292static __inline__ int __sk_del_node_init(struct sock *sk) 293{ 294 if (sk_hashed(sk)) { 295 __sk_del_node(sk); 296 sk_node_init(&sk->sk_node); 297 return 1; 298 } 299 return 0; 300} 301 302/* Grab socket reference count. This operation is valid only 303 when sk is ALREADY grabbed f.e. it is found in hash table 304 or a list and the lookup is made under lock preventing hash table 305 modifications. 306 */ 307 308static inline void sock_hold(struct sock *sk) 309{ 310 atomic_inc(&sk->sk_refcnt); 311} 312 313/* Ungrab socket in the context, which assumes that socket refcnt 314 cannot hit zero, f.e. it is true in context of any socketcall. 315 */ 316static inline void __sock_put(struct sock *sk) 317{ 318 atomic_dec(&sk->sk_refcnt); 319} 320 321static __inline__ int sk_del_node_init(struct sock *sk) 322{ 323 int rc = __sk_del_node_init(sk); 324 325 if (rc) { 326 /* paranoid for a while -acme */ 327 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 328 __sock_put(sk); 329 } 330 return rc; 331} 332 333static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) 334{ 335 hlist_add_head(&sk->sk_node, list); 336} 337 338static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) 339{ 340 sock_hold(sk); 341 __sk_add_node(sk, list); 342} 343 344static __inline__ void __sk_del_bind_node(struct sock *sk) 345{ 346 __hlist_del(&sk->sk_bind_node); 347} 348 349static __inline__ void sk_add_bind_node(struct sock *sk, 350 struct hlist_head *list) 351{ 352 hlist_add_head(&sk->sk_bind_node, list); 353} 354 355#define sk_for_each(__sk, node, list) \ 356 hlist_for_each_entry(__sk, node, list, sk_node) 357#define sk_for_each_from(__sk, node) \ 358 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 359 hlist_for_each_entry_from(__sk, node, sk_node) 360#define sk_for_each_continue(__sk, node) \ 361 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 362 hlist_for_each_entry_continue(__sk, node, sk_node) 363#define sk_for_each_safe(__sk, node, tmp, list) \ 364 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) 365#define sk_for_each_bound(__sk, node, list) \ 366 hlist_for_each_entry(__sk, node, list, sk_bind_node) 367 368/* Sock flags */ 369enum sock_flags { 370 SOCK_DEAD, 371 SOCK_DONE, 372 SOCK_URGINLINE, 373 SOCK_KEEPOPEN, 374 SOCK_LINGER, 375 SOCK_DESTROY, 376 SOCK_BROADCAST, 377 SOCK_TIMESTAMP, 378 SOCK_ZAPPED, 379 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 380 SOCK_DBG, /* %SO_DEBUG setting */ 381 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ 382 SOCK_NO_LARGESEND, /* whether to sent large segments or not */ 383 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 384 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 385}; 386 387static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 388{ 389 nsk->sk_flags = osk->sk_flags; 390} 391 392static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) 393{ 394 __set_bit(flag, &sk->sk_flags); 395} 396 397static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) 398{ 399 __clear_bit(flag, &sk->sk_flags); 400} 401 402static inline int sock_flag(struct sock *sk, enum sock_flags flag) 403{ 404 return test_bit(flag, &sk->sk_flags); 405} 406 407static inline void sk_acceptq_removed(struct sock *sk) 408{ 409 sk->sk_ack_backlog--; 410} 411 412static inline void sk_acceptq_added(struct sock *sk) 413{ 414 sk->sk_ack_backlog++; 415} 416 417static inline int sk_acceptq_is_full(struct sock *sk) 418{ 419 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; 420} 421 422/* 423 * Compute minimal free write space needed to queue new packets. 424 */ 425static inline int sk_stream_min_wspace(struct sock *sk) 426{ 427 return sk->sk_wmem_queued / 2; 428} 429 430static inline int sk_stream_wspace(struct sock *sk) 431{ 432 return sk->sk_sndbuf - sk->sk_wmem_queued; 433} 434 435extern void sk_stream_write_space(struct sock *sk); 436 437static inline int sk_stream_memory_free(struct sock *sk) 438{ 439 return sk->sk_wmem_queued < sk->sk_sndbuf; 440} 441 442extern void sk_stream_rfree(struct sk_buff *skb); 443 444static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk) 445{ 446 skb->sk = sk; 447 skb->destructor = sk_stream_rfree; 448 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 449 sk->sk_forward_alloc -= skb->truesize; 450} 451 452static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb) 453{ 454 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 455 sk->sk_wmem_queued -= skb->truesize; 456 sk->sk_forward_alloc += skb->truesize; 457 __kfree_skb(skb); 458} 459 460/* The per-socket spinlock must be held here. */ 461#define sk_add_backlog(__sk, __skb) \ 462do { if (!(__sk)->sk_backlog.tail) { \ 463 (__sk)->sk_backlog.head = \ 464 (__sk)->sk_backlog.tail = (__skb); \ 465 } else { \ 466 ((__sk)->sk_backlog.tail)->next = (__skb); \ 467 (__sk)->sk_backlog.tail = (__skb); \ 468 } \ 469 (__skb)->next = NULL; \ 470} while(0) 471 472#define sk_wait_event(__sk, __timeo, __condition) \ 473({ int rc; \ 474 release_sock(__sk); \ 475 rc = __condition; \ 476 if (!rc) { \ 477 *(__timeo) = schedule_timeout(*(__timeo)); \ 478 rc = __condition; \ 479 } \ 480 lock_sock(__sk); \ 481 rc; \ 482}) 483 484extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); 485extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); 486extern void sk_stream_wait_close(struct sock *sk, long timeo_p); 487extern int sk_stream_error(struct sock *sk, int flags, int err); 488extern void sk_stream_kill_queues(struct sock *sk); 489 490extern int sk_wait_data(struct sock *sk, long *timeo); 491 492struct request_sock_ops; 493 494/* Networking protocol blocks we attach to sockets. 495 * socket layer -> transport layer interface 496 * transport -> network interface is defined by struct inet_proto 497 */ 498struct proto { 499 void (*close)(struct sock *sk, 500 long timeout); 501 int (*connect)(struct sock *sk, 502 struct sockaddr *uaddr, 503 int addr_len); 504 int (*disconnect)(struct sock *sk, int flags); 505 506 struct sock * (*accept) (struct sock *sk, int flags, int *err); 507 508 int (*ioctl)(struct sock *sk, int cmd, 509 unsigned long arg); 510 int (*init)(struct sock *sk); 511 int (*destroy)(struct sock *sk); 512 void (*shutdown)(struct sock *sk, int how); 513 int (*setsockopt)(struct sock *sk, int level, 514 int optname, char __user *optval, 515 int optlen); 516 int (*getsockopt)(struct sock *sk, int level, 517 int optname, char __user *optval, 518 int __user *option); 519 int (*sendmsg)(struct kiocb *iocb, struct sock *sk, 520 struct msghdr *msg, size_t len); 521 int (*recvmsg)(struct kiocb *iocb, struct sock *sk, 522 struct msghdr *msg, 523 size_t len, int noblock, int flags, 524 int *addr_len); 525 int (*sendpage)(struct sock *sk, struct page *page, 526 int offset, size_t size, int flags); 527 int (*bind)(struct sock *sk, 528 struct sockaddr *uaddr, int addr_len); 529 530 int (*backlog_rcv) (struct sock *sk, 531 struct sk_buff *skb); 532 533 /* Keeping track of sk's, looking them up, and port selection methods. */ 534 void (*hash)(struct sock *sk); 535 void (*unhash)(struct sock *sk); 536 int (*get_port)(struct sock *sk, unsigned short snum); 537 538 /* Memory pressure */ 539 void (*enter_memory_pressure)(void); 540 atomic_t *memory_allocated; /* Current allocated memory. */ 541 atomic_t *sockets_allocated; /* Current number of sockets. */ 542 /* 543 * Pressure flag: try to collapse. 544 * Technical note: it is used by multiple contexts non atomically. 545 * All the sk_stream_mem_schedule() is of this nature: accounting 546 * is strict, actions are advisory and have some latency. 547 */ 548 int *memory_pressure; 549 int *sysctl_mem; 550 int *sysctl_wmem; 551 int *sysctl_rmem; 552 int max_header; 553 554 kmem_cache_t *slab; 555 unsigned int obj_size; 556 557 struct request_sock_ops *rsk_prot; 558 559 struct module *owner; 560 561 char name[32]; 562 563 struct list_head node; 564 565 struct { 566 int inuse; 567 u8 __pad[SMP_CACHE_BYTES - sizeof(int)]; 568 } stats[NR_CPUS]; 569}; 570 571extern int proto_register(struct proto *prot, int alloc_slab); 572extern void proto_unregister(struct proto *prot); 573 574/* Called with local bh disabled */ 575static __inline__ void sock_prot_inc_use(struct proto *prot) 576{ 577 prot->stats[smp_processor_id()].inuse++; 578} 579 580static __inline__ void sock_prot_dec_use(struct proto *prot) 581{ 582 prot->stats[smp_processor_id()].inuse--; 583} 584 585/* About 10 seconds */ 586#define SOCK_DESTROY_TIME (10*HZ) 587 588/* Sockets 0-1023 can't be bound to unless you are superuser */ 589#define PROT_SOCK 1024 590 591#define SHUTDOWN_MASK 3 592#define RCV_SHUTDOWN 1 593#define SEND_SHUTDOWN 2 594 595#define SOCK_SNDBUF_LOCK 1 596#define SOCK_RCVBUF_LOCK 2 597#define SOCK_BINDADDR_LOCK 4 598#define SOCK_BINDPORT_LOCK 8 599 600/* sock_iocb: used to kick off async processing of socket ios */ 601struct sock_iocb { 602 struct list_head list; 603 604 int flags; 605 int size; 606 struct socket *sock; 607 struct sock *sk; 608 struct scm_cookie *scm; 609 struct msghdr *msg, async_msg; 610 struct iovec async_iov; 611 struct kiocb *kiocb; 612}; 613 614static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb) 615{ 616 return (struct sock_iocb *)iocb->private; 617} 618 619static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si) 620{ 621 return si->kiocb; 622} 623 624struct socket_alloc { 625 struct socket socket; 626 struct inode vfs_inode; 627}; 628 629static inline struct socket *SOCKET_I(struct inode *inode) 630{ 631 return &container_of(inode, struct socket_alloc, vfs_inode)->socket; 632} 633 634static inline struct inode *SOCK_INODE(struct socket *socket) 635{ 636 return &container_of(socket, struct socket_alloc, socket)->vfs_inode; 637} 638 639extern void __sk_stream_mem_reclaim(struct sock *sk); 640extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind); 641 642#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE) 643 644static inline int sk_stream_pages(int amt) 645{ 646 return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM; 647} 648 649static inline void sk_stream_mem_reclaim(struct sock *sk) 650{ 651 if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) 652 __sk_stream_mem_reclaim(sk); 653} 654 655static inline void sk_stream_writequeue_purge(struct sock *sk) 656{ 657 struct sk_buff *skb; 658 659 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) 660 sk_stream_free_skb(sk, skb); 661 sk_stream_mem_reclaim(sk); 662} 663 664static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) 665{ 666 return (int)skb->truesize <= sk->sk_forward_alloc || 667 sk_stream_mem_schedule(sk, skb->truesize, 1); 668} 669 670/* Used by processes to "lock" a socket state, so that 671 * interrupts and bottom half handlers won't change it 672 * from under us. It essentially blocks any incoming 673 * packets, so that we won't get any new data or any 674 * packets that change the state of the socket. 675 * 676 * While locked, BH processing will add new packets to 677 * the backlog queue. This queue is processed by the 678 * owner of the socket lock right before it is released. 679 * 680 * Since ~2.3.5 it is also exclusive sleep lock serializing 681 * accesses from user process context. 682 */ 683#define sock_owned_by_user(sk) ((sk)->sk_lock.owner) 684 685extern void FASTCALL(lock_sock(struct sock *sk)); 686extern void FASTCALL(release_sock(struct sock *sk)); 687 688/* BH context may only use the following locking interface. */ 689#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) 690#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 691 692extern struct sock *sk_alloc(int family, 693 unsigned int __nocast priority, 694 struct proto *prot, int zero_it); 695extern void sk_free(struct sock *sk); 696 697extern struct sk_buff *sock_wmalloc(struct sock *sk, 698 unsigned long size, int force, 699 unsigned int __nocast priority); 700extern struct sk_buff *sock_rmalloc(struct sock *sk, 701 unsigned long size, int force, 702 unsigned int __nocast priority); 703extern void sock_wfree(struct sk_buff *skb); 704extern void sock_rfree(struct sk_buff *skb); 705 706extern int sock_setsockopt(struct socket *sock, int level, 707 int op, char __user *optval, 708 int optlen); 709 710extern int sock_getsockopt(struct socket *sock, int level, 711 int op, char __user *optval, 712 int __user *optlen); 713extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, 714 unsigned long size, 715 int noblock, 716 int *errcode); 717extern void *sock_kmalloc(struct sock *sk, int size, 718 unsigned int __nocast priority); 719extern void sock_kfree_s(struct sock *sk, void *mem, int size); 720extern void sk_send_sigurg(struct sock *sk); 721 722/* 723 * Functions to fill in entries in struct proto_ops when a protocol 724 * does not implement a particular function. 725 */ 726extern int sock_no_bind(struct socket *, 727 struct sockaddr *, int); 728extern int sock_no_connect(struct socket *, 729 struct sockaddr *, int, int); 730extern int sock_no_socketpair(struct socket *, 731 struct socket *); 732extern int sock_no_accept(struct socket *, 733 struct socket *, int); 734extern int sock_no_getname(struct socket *, 735 struct sockaddr *, int *, int); 736extern unsigned int sock_no_poll(struct file *, struct socket *, 737 struct poll_table_struct *); 738extern int sock_no_ioctl(struct socket *, unsigned int, 739 unsigned long); 740extern int sock_no_listen(struct socket *, int); 741extern int sock_no_shutdown(struct socket *, int); 742extern int sock_no_getsockopt(struct socket *, int , int, 743 char __user *, int __user *); 744extern int sock_no_setsockopt(struct socket *, int, int, 745 char __user *, int); 746extern int sock_no_sendmsg(struct kiocb *, struct socket *, 747 struct msghdr *, size_t); 748extern int sock_no_recvmsg(struct kiocb *, struct socket *, 749 struct msghdr *, size_t, int); 750extern int sock_no_mmap(struct file *file, 751 struct socket *sock, 752 struct vm_area_struct *vma); 753extern ssize_t sock_no_sendpage(struct socket *sock, 754 struct page *page, 755 int offset, size_t size, 756 int flags); 757 758/* 759 * Functions to fill in entries in struct proto_ops when a protocol 760 * uses the inet style. 761 */ 762extern int sock_common_getsockopt(struct socket *sock, int level, int optname, 763 char __user *optval, int __user *optlen); 764extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 765 struct msghdr *msg, size_t size, int flags); 766extern int sock_common_setsockopt(struct socket *sock, int level, int optname, 767 char __user *optval, int optlen); 768 769extern void sk_common_release(struct sock *sk); 770 771/* 772 * Default socket callbacks and setup code 773 */ 774 775/* Initialise core socket variables */ 776extern void sock_init_data(struct socket *sock, struct sock *sk); 777 778/** 779 * sk_filter - run a packet through a socket filter 780 * @sk: sock associated with &sk_buff 781 * @skb: buffer to filter 782 * @needlock: set to 1 if the sock is not locked by caller. 783 * 784 * Run the filter code and then cut skb->data to correct size returned by 785 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller 786 * than pkt_len we keep whole skb->data. This is the socket level 787 * wrapper to sk_run_filter. It returns 0 if the packet should 788 * be accepted or -EPERM if the packet should be tossed. 789 * 790 */ 791 792static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock) 793{ 794 int err; 795 796 err = security_sock_rcv_skb(sk, skb); 797 if (err) 798 return err; 799 800 if (sk->sk_filter) { 801 struct sk_filter *filter; 802 803 if (needlock) 804 bh_lock_sock(sk); 805 806 filter = sk->sk_filter; 807 if (filter) { 808 int pkt_len = sk_run_filter(skb, filter->insns, 809 filter->len); 810 if (!pkt_len) 811 err = -EPERM; 812 else 813 skb_trim(skb, pkt_len); 814 } 815 816 if (needlock) 817 bh_unlock_sock(sk); 818 } 819 return err; 820} 821 822/** 823 * sk_filter_release: Release a socket filter 824 * @sk: socket 825 * @fp: filter to remove 826 * 827 * Remove a filter from a socket and release its resources. 828 */ 829 830static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp) 831{ 832 unsigned int size = sk_filter_len(fp); 833 834 atomic_sub(size, &sk->sk_omem_alloc); 835 836 if (atomic_dec_and_test(&fp->refcnt)) 837 kfree(fp); 838} 839 840static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) 841{ 842 atomic_inc(&fp->refcnt); 843 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); 844} 845 846/* 847 * Socket reference counting postulates. 848 * 849 * * Each user of socket SHOULD hold a reference count. 850 * * Each access point to socket (an hash table bucket, reference from a list, 851 * running timer, skb in flight MUST hold a reference count. 852 * * When reference count hits 0, it means it will never increase back. 853 * * When reference count hits 0, it means that no references from 854 * outside exist to this socket and current process on current CPU 855 * is last user and may/should destroy this socket. 856 * * sk_free is called from any context: process, BH, IRQ. When 857 * it is called, socket has no references from outside -> sk_free 858 * may release descendant resources allocated by the socket, but 859 * to the time when it is called, socket is NOT referenced by any 860 * hash tables, lists etc. 861 * * Packets, delivered from outside (from network or from another process) 862 * and enqueued on receive/error queues SHOULD NOT grab reference count, 863 * when they sit in queue. Otherwise, packets will leak to hole, when 864 * socket is looked up by one cpu and unhasing is made by another CPU. 865 * It is true for udp/raw, netlink (leak to receive and error queues), tcp 866 * (leak to backlog). Packet socket does all the processing inside 867 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets 868 * use separate SMP lock, so that they are prone too. 869 */ 870 871/* Ungrab socket and destroy it, if it was the last reference. */ 872static inline void sock_put(struct sock *sk) 873{ 874 if (atomic_dec_and_test(&sk->sk_refcnt)) 875 sk_free(sk); 876} 877 878/* Detach socket from process context. 879 * Announce socket dead, detach it from wait queue and inode. 880 * Note that parent inode held reference count on this struct sock, 881 * we do not release it in this function, because protocol 882 * probably wants some additional cleanups or even continuing 883 * to work with this socket (TCP). 884 */ 885static inline void sock_orphan(struct sock *sk) 886{ 887 write_lock_bh(&sk->sk_callback_lock); 888 sock_set_flag(sk, SOCK_DEAD); 889 sk->sk_socket = NULL; 890 sk->sk_sleep = NULL; 891 write_unlock_bh(&sk->sk_callback_lock); 892} 893 894static inline void sock_graft(struct sock *sk, struct socket *parent) 895{ 896 write_lock_bh(&sk->sk_callback_lock); 897 sk->sk_sleep = &parent->wait; 898 parent->sk = sk; 899 sk->sk_socket = parent; 900 write_unlock_bh(&sk->sk_callback_lock); 901} 902 903extern int sock_i_uid(struct sock *sk); 904extern unsigned long sock_i_ino(struct sock *sk); 905 906static inline struct dst_entry * 907__sk_dst_get(struct sock *sk) 908{ 909 return sk->sk_dst_cache; 910} 911 912static inline struct dst_entry * 913sk_dst_get(struct sock *sk) 914{ 915 struct dst_entry *dst; 916 917 read_lock(&sk->sk_dst_lock); 918 dst = sk->sk_dst_cache; 919 if (dst) 920 dst_hold(dst); 921 read_unlock(&sk->sk_dst_lock); 922 return dst; 923} 924 925static inline void 926__sk_dst_set(struct sock *sk, struct dst_entry *dst) 927{ 928 struct dst_entry *old_dst; 929 930 old_dst = sk->sk_dst_cache; 931 sk->sk_dst_cache = dst; 932 dst_release(old_dst); 933} 934 935static inline void 936sk_dst_set(struct sock *sk, struct dst_entry *dst) 937{ 938 write_lock(&sk->sk_dst_lock); 939 __sk_dst_set(sk, dst); 940 write_unlock(&sk->sk_dst_lock); 941} 942 943static inline void 944__sk_dst_reset(struct sock *sk) 945{ 946 struct dst_entry *old_dst; 947 948 old_dst = sk->sk_dst_cache; 949 sk->sk_dst_cache = NULL; 950 dst_release(old_dst); 951} 952 953static inline void 954sk_dst_reset(struct sock *sk) 955{ 956 write_lock(&sk->sk_dst_lock); 957 __sk_dst_reset(sk); 958 write_unlock(&sk->sk_dst_lock); 959} 960 961static inline struct dst_entry * 962__sk_dst_check(struct sock *sk, u32 cookie) 963{ 964 struct dst_entry *dst = sk->sk_dst_cache; 965 966 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 967 sk->sk_dst_cache = NULL; 968 dst_release(dst); 969 return NULL; 970 } 971 972 return dst; 973} 974 975static inline struct dst_entry * 976sk_dst_check(struct sock *sk, u32 cookie) 977{ 978 struct dst_entry *dst = sk_dst_get(sk); 979 980 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 981 sk_dst_reset(sk); 982 dst_release(dst); 983 return NULL; 984 } 985 986 return dst; 987} 988 989static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb) 990{ 991 sk->sk_wmem_queued += skb->truesize; 992 sk->sk_forward_alloc -= skb->truesize; 993} 994 995static inline int skb_copy_to_page(struct sock *sk, char __user *from, 996 struct sk_buff *skb, struct page *page, 997 int off, int copy) 998{ 999 if (skb->ip_summed == CHECKSUM_NONE) { 1000 int err = 0; 1001 unsigned int csum = csum_and_copy_from_user(from, 1002 page_address(page) + off, 1003 copy, 0, &err); 1004 if (err) 1005 return err; 1006 skb->csum = csum_block_add(skb->csum, csum, skb->len); 1007 } else if (copy_from_user(page_address(page) + off, from, copy)) 1008 return -EFAULT; 1009 1010 skb->len += copy; 1011 skb->data_len += copy; 1012 skb->truesize += copy; 1013 sk->sk_wmem_queued += copy; 1014 sk->sk_forward_alloc -= copy; 1015 return 0; 1016} 1017 1018/* 1019 * Queue a received datagram if it will fit. Stream and sequenced 1020 * protocols can't normally use this as they need to fit buffers in 1021 * and play with them. 1022 * 1023 * Inlined as it's very short and called for pretty much every 1024 * packet ever received. 1025 */ 1026 1027static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 1028{ 1029 sock_hold(sk); 1030 skb->sk = sk; 1031 skb->destructor = sock_wfree; 1032 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 1033} 1034 1035static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 1036{ 1037 skb->sk = sk; 1038 skb->destructor = sock_rfree; 1039 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 1040} 1041 1042extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1043 unsigned long expires); 1044 1045extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); 1046 1047static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1048{ 1049 int err = 0; 1050 int skb_len; 1051 1052 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 1053 number of warnings when compiling with -W --ANK 1054 */ 1055 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 1056 (unsigned)sk->sk_rcvbuf) { 1057 err = -ENOMEM; 1058 goto out; 1059 } 1060 1061 /* It would be deadlock, if sock_queue_rcv_skb is used 1062 with socket lock! We assume that users of this 1063 function are lock free. 1064 */ 1065 err = sk_filter(sk, skb, 1); 1066 if (err) 1067 goto out; 1068 1069 skb->dev = NULL; 1070 skb_set_owner_r(skb, sk); 1071 1072 /* Cache the SKB length before we tack it onto the receive 1073 * queue. Once it is added it no longer belongs to us and 1074 * may be freed by other threads of control pulling packets 1075 * from the queue. 1076 */ 1077 skb_len = skb->len; 1078 1079 skb_queue_tail(&sk->sk_receive_queue, skb); 1080 1081 if (!sock_flag(sk, SOCK_DEAD)) 1082 sk->sk_data_ready(sk, skb_len); 1083out: 1084 return err; 1085} 1086 1087static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 1088{ 1089 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 1090 number of warnings when compiling with -W --ANK 1091 */ 1092 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 1093 (unsigned)sk->sk_rcvbuf) 1094 return -ENOMEM; 1095 skb_set_owner_r(skb, sk); 1096 skb_queue_tail(&sk->sk_error_queue, skb); 1097 if (!sock_flag(sk, SOCK_DEAD)) 1098 sk->sk_data_ready(sk, skb->len); 1099 return 0; 1100} 1101 1102/* 1103 * Recover an error report and clear atomically 1104 */ 1105 1106static inline int sock_error(struct sock *sk) 1107{ 1108 int err = xchg(&sk->sk_err, 0); 1109 return -err; 1110} 1111 1112static inline unsigned long sock_wspace(struct sock *sk) 1113{ 1114 int amt = 0; 1115 1116 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 1117 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1118 if (amt < 0) 1119 amt = 0; 1120 } 1121 return amt; 1122} 1123 1124static inline void sk_wake_async(struct sock *sk, int how, int band) 1125{ 1126 if (sk->sk_socket && sk->sk_socket->fasync_list) 1127 sock_wake_async(sk->sk_socket, how, band); 1128} 1129 1130#define SOCK_MIN_SNDBUF 2048 1131#define SOCK_MIN_RCVBUF 256 1132 1133static inline void sk_stream_moderate_sndbuf(struct sock *sk) 1134{ 1135 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { 1136 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2); 1137 sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); 1138 } 1139} 1140 1141static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, 1142 int size, int mem, 1143 unsigned int __nocast gfp) 1144{ 1145 struct sk_buff *skb; 1146 int hdr_len; 1147 1148 hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header); 1149 skb = alloc_skb(size + hdr_len, gfp); 1150 if (skb) { 1151 skb->truesize += mem; 1152 if (sk->sk_forward_alloc >= (int)skb->truesize || 1153 sk_stream_mem_schedule(sk, skb->truesize, 0)) { 1154 skb_reserve(skb, hdr_len); 1155 return skb; 1156 } 1157 __kfree_skb(skb); 1158 } else { 1159 sk->sk_prot->enter_memory_pressure(); 1160 sk_stream_moderate_sndbuf(sk); 1161 } 1162 return NULL; 1163} 1164 1165static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, 1166 int size, 1167 unsigned int __nocast gfp) 1168{ 1169 return sk_stream_alloc_pskb(sk, size, 0, gfp); 1170} 1171 1172static inline struct page *sk_stream_alloc_page(struct sock *sk) 1173{ 1174 struct page *page = NULL; 1175 1176 if (sk->sk_forward_alloc >= (int)PAGE_SIZE || 1177 sk_stream_mem_schedule(sk, PAGE_SIZE, 0)) 1178 page = alloc_pages(sk->sk_allocation, 0); 1179 else { 1180 sk->sk_prot->enter_memory_pressure(); 1181 sk_stream_moderate_sndbuf(sk); 1182 } 1183 return page; 1184} 1185 1186#define sk_stream_for_retrans_queue(skb, sk) \ 1187 for (skb = (sk)->sk_write_queue.next; \ 1188 (skb != (sk)->sk_send_head) && \ 1189 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ 1190 skb = skb->next) 1191 1192/* 1193 * Default write policy as shown to user space via poll/select/SIGIO 1194 */ 1195static inline int sock_writeable(const struct sock *sk) 1196{ 1197 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); 1198} 1199 1200static inline unsigned int __nocast gfp_any(void) 1201{ 1202 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 1203} 1204 1205static inline long sock_rcvtimeo(const struct sock *sk, int noblock) 1206{ 1207 return noblock ? 0 : sk->sk_rcvtimeo; 1208} 1209 1210static inline long sock_sndtimeo(const struct sock *sk, int noblock) 1211{ 1212 return noblock ? 0 : sk->sk_sndtimeo; 1213} 1214 1215static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) 1216{ 1217 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; 1218} 1219 1220/* Alas, with timeout socket operations are not restartable. 1221 * Compare this to poll(). 1222 */ 1223static inline int sock_intr_errno(long timeo) 1224{ 1225 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; 1226} 1227 1228static __inline__ void 1229sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 1230{ 1231 struct timeval *stamp = &skb->stamp; 1232 if (sock_flag(sk, SOCK_RCVTSTAMP)) { 1233 /* Race occurred between timestamp enabling and packet 1234 receiving. Fill in the current time for now. */ 1235 if (stamp->tv_sec == 0) 1236 do_gettimeofday(stamp); 1237 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval), 1238 stamp); 1239 } else 1240 sk->sk_stamp = *stamp; 1241} 1242 1243/** 1244 * sk_eat_skb - Release a skb if it is no longer needed 1245 * @sk: socket to eat this skb from 1246 * @skb: socket buffer to eat 1247 * 1248 * This routine must be called with interrupts disabled or with the socket 1249 * locked so that the sk_buff queue operation is ok. 1250*/ 1251static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) 1252{ 1253 __skb_unlink(skb, &sk->sk_receive_queue); 1254 __kfree_skb(skb); 1255} 1256 1257extern void sock_enable_timestamp(struct sock *sk); 1258extern int sock_get_timestamp(struct sock *, struct timeval __user *); 1259 1260/* 1261 * Enable debug/info messages 1262 */ 1263 1264#if 0 1265#define NETDEBUG(x) do { } while (0) 1266#define LIMIT_NETDEBUG(x) do {} while(0) 1267#else 1268#define NETDEBUG(x) do { x; } while (0) 1269#define LIMIT_NETDEBUG(x) do { if (net_ratelimit()) { x; } } while(0) 1270#endif 1271 1272/* 1273 * Macros for sleeping on a socket. Use them like this: 1274 * 1275 * SOCK_SLEEP_PRE(sk) 1276 * if (condition) 1277 * schedule(); 1278 * SOCK_SLEEP_POST(sk) 1279 * 1280 * N.B. These are now obsolete and were, afaik, only ever used in DECnet 1281 * and when the last use of them in DECnet has gone, I'm intending to 1282 * remove them. 1283 */ 1284 1285#define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \ 1286 DECLARE_WAITQUEUE(wait, tsk); \ 1287 tsk->state = TASK_INTERRUPTIBLE; \ 1288 add_wait_queue((sk)->sk_sleep, &wait); \ 1289 release_sock(sk); 1290 1291#define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \ 1292 remove_wait_queue((sk)->sk_sleep, &wait); \ 1293 lock_sock(sk); \ 1294 } 1295 1296static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 1297{ 1298 if (valbool) 1299 sock_set_flag(sk, bit); 1300 else 1301 sock_reset_flag(sk, bit); 1302} 1303 1304extern __u32 sysctl_wmem_max; 1305extern __u32 sysctl_rmem_max; 1306 1307#ifdef CONFIG_NET 1308int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); 1309#else 1310static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) 1311{ 1312 return -ENODEV; 1313} 1314#endif 1315 1316#endif /* _SOCK_H */