Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.38-rc6 2934 lines 70 kB view raw
1/* 2 * Generic PPP layer for Linux. 3 * 4 * Copyright 1999-2002 Paul Mackerras. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * The generic PPP layer handles the PPP network interfaces, the 12 * /dev/ppp device, packet and VJ compression, and multilink. 13 * It talks to PPP `channels' via the interface defined in 14 * include/linux/ppp_channel.h. Channels provide the basic means for 15 * sending and receiving PPP frames on some kind of communications 16 * channel. 17 * 18 * Part of the code in this driver was inspired by the old async-only 19 * PPP driver, written by Michael Callahan and Al Longyear, and 20 * subsequently hacked by Paul Mackerras. 21 * 22 * ==FILEVERSION 20041108== 23 */ 24 25#include <linux/module.h> 26#include <linux/kernel.h> 27#include <linux/kmod.h> 28#include <linux/init.h> 29#include <linux/list.h> 30#include <linux/idr.h> 31#include <linux/netdevice.h> 32#include <linux/poll.h> 33#include <linux/ppp_defs.h> 34#include <linux/filter.h> 35#include <linux/if_ppp.h> 36#include <linux/ppp_channel.h> 37#include <linux/ppp-comp.h> 38#include <linux/skbuff.h> 39#include <linux/rtnetlink.h> 40#include <linux/if_arp.h> 41#include <linux/ip.h> 42#include <linux/tcp.h> 43#include <linux/spinlock.h> 44#include <linux/rwsem.h> 45#include <linux/stddef.h> 46#include <linux/device.h> 47#include <linux/mutex.h> 48#include <linux/slab.h> 49#include <asm/unaligned.h> 50#include <net/slhc_vj.h> 51#include <asm/atomic.h> 52 53#include <linux/nsproxy.h> 54#include <net/net_namespace.h> 55#include <net/netns/generic.h> 56 57#define PPP_VERSION "2.4.2" 58 59/* 60 * Network protocols we support. 61 */ 62#define NP_IP 0 /* Internet Protocol V4 */ 63#define NP_IPV6 1 /* Internet Protocol V6 */ 64#define NP_IPX 2 /* IPX protocol */ 65#define NP_AT 3 /* Appletalk protocol */ 66#define NP_MPLS_UC 4 /* MPLS unicast */ 67#define NP_MPLS_MC 5 /* MPLS multicast */ 68#define NUM_NP 6 /* Number of NPs. */ 69 70#define MPHDRLEN 6 /* multilink protocol header length */ 71#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ 72 73/* 74 * An instance of /dev/ppp can be associated with either a ppp 75 * interface unit or a ppp channel. In both cases, file->private_data 76 * points to one of these. 77 */ 78struct ppp_file { 79 enum { 80 INTERFACE=1, CHANNEL 81 } kind; 82 struct sk_buff_head xq; /* pppd transmit queue */ 83 struct sk_buff_head rq; /* receive queue for pppd */ 84 wait_queue_head_t rwait; /* for poll on reading /dev/ppp */ 85 atomic_t refcnt; /* # refs (incl /dev/ppp attached) */ 86 int hdrlen; /* space to leave for headers */ 87 int index; /* interface unit / channel number */ 88 int dead; /* unit/channel has been shut down */ 89}; 90 91#define PF_TO_X(pf, X) container_of(pf, X, file) 92 93#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) 94#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) 95 96/* 97 * Data structure describing one ppp unit. 98 * A ppp unit corresponds to a ppp network interface device 99 * and represents a multilink bundle. 100 * It can have 0 or more ppp channels connected to it. 101 */ 102struct ppp { 103 struct ppp_file file; /* stuff for read/write/poll 0 */ 104 struct file *owner; /* file that owns this unit 48 */ 105 struct list_head channels; /* list of attached channels 4c */ 106 int n_channels; /* how many channels are attached 54 */ 107 spinlock_t rlock; /* lock for receive side 58 */ 108 spinlock_t wlock; /* lock for transmit side 5c */ 109 int mru; /* max receive unit 60 */ 110 unsigned int flags; /* control bits 64 */ 111 unsigned int xstate; /* transmit state bits 68 */ 112 unsigned int rstate; /* receive state bits 6c */ 113 int debug; /* debug flags 70 */ 114 struct slcompress *vj; /* state for VJ header compression */ 115 enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */ 116 struct sk_buff *xmit_pending; /* a packet ready to go out 88 */ 117 struct compressor *xcomp; /* transmit packet compressor 8c */ 118 void *xc_state; /* its internal state 90 */ 119 struct compressor *rcomp; /* receive decompressor 94 */ 120 void *rc_state; /* its internal state 98 */ 121 unsigned long last_xmit; /* jiffies when last pkt sent 9c */ 122 unsigned long last_recv; /* jiffies when last pkt rcvd a0 */ 123 struct net_device *dev; /* network interface device a4 */ 124 int closing; /* is device closing down? a8 */ 125#ifdef CONFIG_PPP_MULTILINK 126 int nxchan; /* next channel to send something on */ 127 u32 nxseq; /* next sequence number to send */ 128 int mrru; /* MP: max reconst. receive unit */ 129 u32 nextseq; /* MP: seq no of next packet */ 130 u32 minseq; /* MP: min of most recent seqnos */ 131 struct sk_buff_head mrq; /* MP: receive reconstruction queue */ 132#endif /* CONFIG_PPP_MULTILINK */ 133#ifdef CONFIG_PPP_FILTER 134 struct sock_filter *pass_filter; /* filter for packets to pass */ 135 struct sock_filter *active_filter;/* filter for pkts to reset idle */ 136 unsigned pass_len, active_len; 137#endif /* CONFIG_PPP_FILTER */ 138 struct net *ppp_net; /* the net we belong to */ 139}; 140 141/* 142 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC, 143 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP, 144 * SC_MUST_COMP 145 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR. 146 * Bits in xstate: SC_COMP_RUN 147 */ 148#define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \ 149 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \ 150 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP) 151 152/* 153 * Private data structure for each channel. 154 * This includes the data structure used for multilink. 155 */ 156struct channel { 157 struct ppp_file file; /* stuff for read/write/poll */ 158 struct list_head list; /* link in all/new_channels list */ 159 struct ppp_channel *chan; /* public channel data structure */ 160 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ 161 spinlock_t downl; /* protects `chan', file.xq dequeue */ 162 struct ppp *ppp; /* ppp unit we're connected to */ 163 struct net *chan_net; /* the net channel belongs to */ 164 struct list_head clist; /* link in list of channels per unit */ 165 rwlock_t upl; /* protects `ppp' */ 166#ifdef CONFIG_PPP_MULTILINK 167 u8 avail; /* flag used in multilink stuff */ 168 u8 had_frag; /* >= 1 fragments have been sent */ 169 u32 lastseq; /* MP: last sequence # received */ 170 int speed; /* speed of the corresponding ppp channel*/ 171#endif /* CONFIG_PPP_MULTILINK */ 172}; 173 174/* 175 * SMP locking issues: 176 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels 177 * list and the ppp.n_channels field, you need to take both locks 178 * before you modify them. 179 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock -> 180 * channel.downl. 181 */ 182 183static DEFINE_MUTEX(ppp_mutex); 184static atomic_t ppp_unit_count = ATOMIC_INIT(0); 185static atomic_t channel_count = ATOMIC_INIT(0); 186 187/* per-net private data for this module */ 188static int ppp_net_id __read_mostly; 189struct ppp_net { 190 /* units to ppp mapping */ 191 struct idr units_idr; 192 193 /* 194 * all_ppp_mutex protects the units_idr mapping. 195 * It also ensures that finding a ppp unit in the units_idr 196 * map and updating its file.refcnt field is atomic. 197 */ 198 struct mutex all_ppp_mutex; 199 200 /* channels */ 201 struct list_head all_channels; 202 struct list_head new_channels; 203 int last_channel_index; 204 205 /* 206 * all_channels_lock protects all_channels and 207 * last_channel_index, and the atomicity of find 208 * a channel and updating its file.refcnt field. 209 */ 210 spinlock_t all_channels_lock; 211}; 212 213/* Get the PPP protocol number from a skb */ 214#define PPP_PROTO(skb) get_unaligned_be16((skb)->data) 215 216/* We limit the length of ppp->file.rq to this (arbitrary) value */ 217#define PPP_MAX_RQLEN 32 218 219/* 220 * Maximum number of multilink fragments queued up. 221 * This has to be large enough to cope with the maximum latency of 222 * the slowest channel relative to the others. Strictly it should 223 * depend on the number of channels and their characteristics. 224 */ 225#define PPP_MP_MAX_QLEN 128 226 227/* Multilink header bits. */ 228#define B 0x80 /* this fragment begins a packet */ 229#define E 0x40 /* this fragment ends a packet */ 230 231/* Compare multilink sequence numbers (assumed to be 32 bits wide) */ 232#define seq_before(a, b) ((s32)((a) - (b)) < 0) 233#define seq_after(a, b) ((s32)((a) - (b)) > 0) 234 235/* Prototypes. */ 236static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 237 struct file *file, unsigned int cmd, unsigned long arg); 238static void ppp_xmit_process(struct ppp *ppp); 239static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 240static void ppp_push(struct ppp *ppp); 241static void ppp_channel_push(struct channel *pch); 242static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, 243 struct channel *pch); 244static void ppp_receive_error(struct ppp *ppp); 245static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb); 246static struct sk_buff *ppp_decompress_frame(struct ppp *ppp, 247 struct sk_buff *skb); 248#ifdef CONFIG_PPP_MULTILINK 249static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, 250 struct channel *pch); 251static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb); 252static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp); 253static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb); 254#endif /* CONFIG_PPP_MULTILINK */ 255static int ppp_set_compress(struct ppp *ppp, unsigned long arg); 256static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); 257static void ppp_ccp_closed(struct ppp *ppp); 258static struct compressor *find_compressor(int type); 259static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 260static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); 261static void init_ppp_file(struct ppp_file *pf, int kind); 262static void ppp_shutdown_interface(struct ppp *ppp); 263static void ppp_destroy_interface(struct ppp *ppp); 264static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); 265static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); 266static int ppp_connect_channel(struct channel *pch, int unit); 267static int ppp_disconnect_channel(struct channel *pch); 268static void ppp_destroy_channel(struct channel *pch); 269static int unit_get(struct idr *p, void *ptr); 270static int unit_set(struct idr *p, void *ptr, int n); 271static void unit_put(struct idr *p, int n); 272static void *unit_find(struct idr *p, int n); 273 274static struct class *ppp_class; 275 276/* per net-namespace data */ 277static inline struct ppp_net *ppp_pernet(struct net *net) 278{ 279 BUG_ON(!net); 280 281 return net_generic(net, ppp_net_id); 282} 283 284/* Translates a PPP protocol number to a NP index (NP == network protocol) */ 285static inline int proto_to_npindex(int proto) 286{ 287 switch (proto) { 288 case PPP_IP: 289 return NP_IP; 290 case PPP_IPV6: 291 return NP_IPV6; 292 case PPP_IPX: 293 return NP_IPX; 294 case PPP_AT: 295 return NP_AT; 296 case PPP_MPLS_UC: 297 return NP_MPLS_UC; 298 case PPP_MPLS_MC: 299 return NP_MPLS_MC; 300 } 301 return -EINVAL; 302} 303 304/* Translates an NP index into a PPP protocol number */ 305static const int npindex_to_proto[NUM_NP] = { 306 PPP_IP, 307 PPP_IPV6, 308 PPP_IPX, 309 PPP_AT, 310 PPP_MPLS_UC, 311 PPP_MPLS_MC, 312}; 313 314/* Translates an ethertype into an NP index */ 315static inline int ethertype_to_npindex(int ethertype) 316{ 317 switch (ethertype) { 318 case ETH_P_IP: 319 return NP_IP; 320 case ETH_P_IPV6: 321 return NP_IPV6; 322 case ETH_P_IPX: 323 return NP_IPX; 324 case ETH_P_PPPTALK: 325 case ETH_P_ATALK: 326 return NP_AT; 327 case ETH_P_MPLS_UC: 328 return NP_MPLS_UC; 329 case ETH_P_MPLS_MC: 330 return NP_MPLS_MC; 331 } 332 return -1; 333} 334 335/* Translates an NP index into an ethertype */ 336static const int npindex_to_ethertype[NUM_NP] = { 337 ETH_P_IP, 338 ETH_P_IPV6, 339 ETH_P_IPX, 340 ETH_P_PPPTALK, 341 ETH_P_MPLS_UC, 342 ETH_P_MPLS_MC, 343}; 344 345/* 346 * Locking shorthand. 347 */ 348#define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock) 349#define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock) 350#define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock) 351#define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock) 352#define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \ 353 ppp_recv_lock(ppp); } while (0) 354#define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \ 355 ppp_xmit_unlock(ppp); } while (0) 356 357/* 358 * /dev/ppp device routines. 359 * The /dev/ppp device is used by pppd to control the ppp unit. 360 * It supports the read, write, ioctl and poll functions. 361 * Open instances of /dev/ppp can be in one of three states: 362 * unattached, attached to a ppp unit, or attached to a ppp channel. 363 */ 364static int ppp_open(struct inode *inode, struct file *file) 365{ 366 /* 367 * This could (should?) be enforced by the permissions on /dev/ppp. 368 */ 369 if (!capable(CAP_NET_ADMIN)) 370 return -EPERM; 371 return 0; 372} 373 374static int ppp_release(struct inode *unused, struct file *file) 375{ 376 struct ppp_file *pf = file->private_data; 377 struct ppp *ppp; 378 379 if (pf) { 380 file->private_data = NULL; 381 if (pf->kind == INTERFACE) { 382 ppp = PF_TO_PPP(pf); 383 if (file == ppp->owner) 384 ppp_shutdown_interface(ppp); 385 } 386 if (atomic_dec_and_test(&pf->refcnt)) { 387 switch (pf->kind) { 388 case INTERFACE: 389 ppp_destroy_interface(PF_TO_PPP(pf)); 390 break; 391 case CHANNEL: 392 ppp_destroy_channel(PF_TO_CHANNEL(pf)); 393 break; 394 } 395 } 396 } 397 return 0; 398} 399 400static ssize_t ppp_read(struct file *file, char __user *buf, 401 size_t count, loff_t *ppos) 402{ 403 struct ppp_file *pf = file->private_data; 404 DECLARE_WAITQUEUE(wait, current); 405 ssize_t ret; 406 struct sk_buff *skb = NULL; 407 struct iovec iov; 408 409 ret = count; 410 411 if (!pf) 412 return -ENXIO; 413 add_wait_queue(&pf->rwait, &wait); 414 for (;;) { 415 set_current_state(TASK_INTERRUPTIBLE); 416 skb = skb_dequeue(&pf->rq); 417 if (skb) 418 break; 419 ret = 0; 420 if (pf->dead) 421 break; 422 if (pf->kind == INTERFACE) { 423 /* 424 * Return 0 (EOF) on an interface that has no 425 * channels connected, unless it is looping 426 * network traffic (demand mode). 427 */ 428 struct ppp *ppp = PF_TO_PPP(pf); 429 if (ppp->n_channels == 0 && 430 (ppp->flags & SC_LOOP_TRAFFIC) == 0) 431 break; 432 } 433 ret = -EAGAIN; 434 if (file->f_flags & O_NONBLOCK) 435 break; 436 ret = -ERESTARTSYS; 437 if (signal_pending(current)) 438 break; 439 schedule(); 440 } 441 set_current_state(TASK_RUNNING); 442 remove_wait_queue(&pf->rwait, &wait); 443 444 if (!skb) 445 goto out; 446 447 ret = -EOVERFLOW; 448 if (skb->len > count) 449 goto outf; 450 ret = -EFAULT; 451 iov.iov_base = buf; 452 iov.iov_len = count; 453 if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len)) 454 goto outf; 455 ret = skb->len; 456 457 outf: 458 kfree_skb(skb); 459 out: 460 return ret; 461} 462 463static ssize_t ppp_write(struct file *file, const char __user *buf, 464 size_t count, loff_t *ppos) 465{ 466 struct ppp_file *pf = file->private_data; 467 struct sk_buff *skb; 468 ssize_t ret; 469 470 if (!pf) 471 return -ENXIO; 472 ret = -ENOMEM; 473 skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); 474 if (!skb) 475 goto out; 476 skb_reserve(skb, pf->hdrlen); 477 ret = -EFAULT; 478 if (copy_from_user(skb_put(skb, count), buf, count)) { 479 kfree_skb(skb); 480 goto out; 481 } 482 483 skb_queue_tail(&pf->xq, skb); 484 485 switch (pf->kind) { 486 case INTERFACE: 487 ppp_xmit_process(PF_TO_PPP(pf)); 488 break; 489 case CHANNEL: 490 ppp_channel_push(PF_TO_CHANNEL(pf)); 491 break; 492 } 493 494 ret = count; 495 496 out: 497 return ret; 498} 499 500/* No kernel lock - fine */ 501static unsigned int ppp_poll(struct file *file, poll_table *wait) 502{ 503 struct ppp_file *pf = file->private_data; 504 unsigned int mask; 505 506 if (!pf) 507 return 0; 508 poll_wait(file, &pf->rwait, wait); 509 mask = POLLOUT | POLLWRNORM; 510 if (skb_peek(&pf->rq)) 511 mask |= POLLIN | POLLRDNORM; 512 if (pf->dead) 513 mask |= POLLHUP; 514 else if (pf->kind == INTERFACE) { 515 /* see comment in ppp_read */ 516 struct ppp *ppp = PF_TO_PPP(pf); 517 if (ppp->n_channels == 0 && 518 (ppp->flags & SC_LOOP_TRAFFIC) == 0) 519 mask |= POLLIN | POLLRDNORM; 520 } 521 522 return mask; 523} 524 525#ifdef CONFIG_PPP_FILTER 526static int get_filter(void __user *arg, struct sock_filter **p) 527{ 528 struct sock_fprog uprog; 529 struct sock_filter *code = NULL; 530 int len, err; 531 532 if (copy_from_user(&uprog, arg, sizeof(uprog))) 533 return -EFAULT; 534 535 if (!uprog.len) { 536 *p = NULL; 537 return 0; 538 } 539 540 len = uprog.len * sizeof(struct sock_filter); 541 code = memdup_user(uprog.filter, len); 542 if (IS_ERR(code)) 543 return PTR_ERR(code); 544 545 err = sk_chk_filter(code, uprog.len); 546 if (err) { 547 kfree(code); 548 return err; 549 } 550 551 *p = code; 552 return uprog.len; 553} 554#endif /* CONFIG_PPP_FILTER */ 555 556static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 557{ 558 struct ppp_file *pf = file->private_data; 559 struct ppp *ppp; 560 int err = -EFAULT, val, val2, i; 561 struct ppp_idle idle; 562 struct npioctl npi; 563 int unit, cflags; 564 struct slcompress *vj; 565 void __user *argp = (void __user *)arg; 566 int __user *p = argp; 567 568 if (!pf) 569 return ppp_unattached_ioctl(current->nsproxy->net_ns, 570 pf, file, cmd, arg); 571 572 if (cmd == PPPIOCDETACH) { 573 /* 574 * We have to be careful here... if the file descriptor 575 * has been dup'd, we could have another process in the 576 * middle of a poll using the same file *, so we had 577 * better not free the interface data structures - 578 * instead we fail the ioctl. Even in this case, we 579 * shut down the interface if we are the owner of it. 580 * Actually, we should get rid of PPPIOCDETACH, userland 581 * (i.e. pppd) could achieve the same effect by closing 582 * this fd and reopening /dev/ppp. 583 */ 584 err = -EINVAL; 585 mutex_lock(&ppp_mutex); 586 if (pf->kind == INTERFACE) { 587 ppp = PF_TO_PPP(pf); 588 if (file == ppp->owner) 589 ppp_shutdown_interface(ppp); 590 } 591 if (atomic_long_read(&file->f_count) <= 2) { 592 ppp_release(NULL, file); 593 err = 0; 594 } else 595 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", 596 atomic_long_read(&file->f_count)); 597 mutex_unlock(&ppp_mutex); 598 return err; 599 } 600 601 if (pf->kind == CHANNEL) { 602 struct channel *pch; 603 struct ppp_channel *chan; 604 605 mutex_lock(&ppp_mutex); 606 pch = PF_TO_CHANNEL(pf); 607 608 switch (cmd) { 609 case PPPIOCCONNECT: 610 if (get_user(unit, p)) 611 break; 612 err = ppp_connect_channel(pch, unit); 613 break; 614 615 case PPPIOCDISCONN: 616 err = ppp_disconnect_channel(pch); 617 break; 618 619 default: 620 down_read(&pch->chan_sem); 621 chan = pch->chan; 622 err = -ENOTTY; 623 if (chan && chan->ops->ioctl) 624 err = chan->ops->ioctl(chan, cmd, arg); 625 up_read(&pch->chan_sem); 626 } 627 mutex_unlock(&ppp_mutex); 628 return err; 629 } 630 631 if (pf->kind != INTERFACE) { 632 /* can't happen */ 633 printk(KERN_ERR "PPP: not interface or channel??\n"); 634 return -EINVAL; 635 } 636 637 mutex_lock(&ppp_mutex); 638 ppp = PF_TO_PPP(pf); 639 switch (cmd) { 640 case PPPIOCSMRU: 641 if (get_user(val, p)) 642 break; 643 ppp->mru = val; 644 err = 0; 645 break; 646 647 case PPPIOCSFLAGS: 648 if (get_user(val, p)) 649 break; 650 ppp_lock(ppp); 651 cflags = ppp->flags & ~val; 652 ppp->flags = val & SC_FLAG_BITS; 653 ppp_unlock(ppp); 654 if (cflags & SC_CCP_OPEN) 655 ppp_ccp_closed(ppp); 656 err = 0; 657 break; 658 659 case PPPIOCGFLAGS: 660 val = ppp->flags | ppp->xstate | ppp->rstate; 661 if (put_user(val, p)) 662 break; 663 err = 0; 664 break; 665 666 case PPPIOCSCOMPRESS: 667 err = ppp_set_compress(ppp, arg); 668 break; 669 670 case PPPIOCGUNIT: 671 if (put_user(ppp->file.index, p)) 672 break; 673 err = 0; 674 break; 675 676 case PPPIOCSDEBUG: 677 if (get_user(val, p)) 678 break; 679 ppp->debug = val; 680 err = 0; 681 break; 682 683 case PPPIOCGDEBUG: 684 if (put_user(ppp->debug, p)) 685 break; 686 err = 0; 687 break; 688 689 case PPPIOCGIDLE: 690 idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ; 691 idle.recv_idle = (jiffies - ppp->last_recv) / HZ; 692 if (copy_to_user(argp, &idle, sizeof(idle))) 693 break; 694 err = 0; 695 break; 696 697 case PPPIOCSMAXCID: 698 if (get_user(val, p)) 699 break; 700 val2 = 15; 701 if ((val >> 16) != 0) { 702 val2 = val >> 16; 703 val &= 0xffff; 704 } 705 vj = slhc_init(val2+1, val+1); 706 if (!vj) { 707 printk(KERN_ERR "PPP: no memory (VJ compressor)\n"); 708 err = -ENOMEM; 709 break; 710 } 711 ppp_lock(ppp); 712 if (ppp->vj) 713 slhc_free(ppp->vj); 714 ppp->vj = vj; 715 ppp_unlock(ppp); 716 err = 0; 717 break; 718 719 case PPPIOCGNPMODE: 720 case PPPIOCSNPMODE: 721 if (copy_from_user(&npi, argp, sizeof(npi))) 722 break; 723 err = proto_to_npindex(npi.protocol); 724 if (err < 0) 725 break; 726 i = err; 727 if (cmd == PPPIOCGNPMODE) { 728 err = -EFAULT; 729 npi.mode = ppp->npmode[i]; 730 if (copy_to_user(argp, &npi, sizeof(npi))) 731 break; 732 } else { 733 ppp->npmode[i] = npi.mode; 734 /* we may be able to transmit more packets now (??) */ 735 netif_wake_queue(ppp->dev); 736 } 737 err = 0; 738 break; 739 740#ifdef CONFIG_PPP_FILTER 741 case PPPIOCSPASS: 742 { 743 struct sock_filter *code; 744 err = get_filter(argp, &code); 745 if (err >= 0) { 746 ppp_lock(ppp); 747 kfree(ppp->pass_filter); 748 ppp->pass_filter = code; 749 ppp->pass_len = err; 750 ppp_unlock(ppp); 751 err = 0; 752 } 753 break; 754 } 755 case PPPIOCSACTIVE: 756 { 757 struct sock_filter *code; 758 err = get_filter(argp, &code); 759 if (err >= 0) { 760 ppp_lock(ppp); 761 kfree(ppp->active_filter); 762 ppp->active_filter = code; 763 ppp->active_len = err; 764 ppp_unlock(ppp); 765 err = 0; 766 } 767 break; 768 } 769#endif /* CONFIG_PPP_FILTER */ 770 771#ifdef CONFIG_PPP_MULTILINK 772 case PPPIOCSMRRU: 773 if (get_user(val, p)) 774 break; 775 ppp_recv_lock(ppp); 776 ppp->mrru = val; 777 ppp_recv_unlock(ppp); 778 err = 0; 779 break; 780#endif /* CONFIG_PPP_MULTILINK */ 781 782 default: 783 err = -ENOTTY; 784 } 785 mutex_unlock(&ppp_mutex); 786 return err; 787} 788 789static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 790 struct file *file, unsigned int cmd, unsigned long arg) 791{ 792 int unit, err = -EFAULT; 793 struct ppp *ppp; 794 struct channel *chan; 795 struct ppp_net *pn; 796 int __user *p = (int __user *)arg; 797 798 mutex_lock(&ppp_mutex); 799 switch (cmd) { 800 case PPPIOCNEWUNIT: 801 /* Create a new ppp unit */ 802 if (get_user(unit, p)) 803 break; 804 ppp = ppp_create_interface(net, unit, &err); 805 if (!ppp) 806 break; 807 file->private_data = &ppp->file; 808 ppp->owner = file; 809 err = -EFAULT; 810 if (put_user(ppp->file.index, p)) 811 break; 812 err = 0; 813 break; 814 815 case PPPIOCATTACH: 816 /* Attach to an existing ppp unit */ 817 if (get_user(unit, p)) 818 break; 819 err = -ENXIO; 820 pn = ppp_pernet(net); 821 mutex_lock(&pn->all_ppp_mutex); 822 ppp = ppp_find_unit(pn, unit); 823 if (ppp) { 824 atomic_inc(&ppp->file.refcnt); 825 file->private_data = &ppp->file; 826 err = 0; 827 } 828 mutex_unlock(&pn->all_ppp_mutex); 829 break; 830 831 case PPPIOCATTCHAN: 832 if (get_user(unit, p)) 833 break; 834 err = -ENXIO; 835 pn = ppp_pernet(net); 836 spin_lock_bh(&pn->all_channels_lock); 837 chan = ppp_find_channel(pn, unit); 838 if (chan) { 839 atomic_inc(&chan->file.refcnt); 840 file->private_data = &chan->file; 841 err = 0; 842 } 843 spin_unlock_bh(&pn->all_channels_lock); 844 break; 845 846 default: 847 err = -ENOTTY; 848 } 849 mutex_unlock(&ppp_mutex); 850 return err; 851} 852 853static const struct file_operations ppp_device_fops = { 854 .owner = THIS_MODULE, 855 .read = ppp_read, 856 .write = ppp_write, 857 .poll = ppp_poll, 858 .unlocked_ioctl = ppp_ioctl, 859 .open = ppp_open, 860 .release = ppp_release, 861 .llseek = noop_llseek, 862}; 863 864static __net_init int ppp_init_net(struct net *net) 865{ 866 struct ppp_net *pn = net_generic(net, ppp_net_id); 867 868 idr_init(&pn->units_idr); 869 mutex_init(&pn->all_ppp_mutex); 870 871 INIT_LIST_HEAD(&pn->all_channels); 872 INIT_LIST_HEAD(&pn->new_channels); 873 874 spin_lock_init(&pn->all_channels_lock); 875 876 return 0; 877} 878 879static __net_exit void ppp_exit_net(struct net *net) 880{ 881 struct ppp_net *pn = net_generic(net, ppp_net_id); 882 883 idr_destroy(&pn->units_idr); 884} 885 886static struct pernet_operations ppp_net_ops = { 887 .init = ppp_init_net, 888 .exit = ppp_exit_net, 889 .id = &ppp_net_id, 890 .size = sizeof(struct ppp_net), 891}; 892 893#define PPP_MAJOR 108 894 895/* Called at boot time if ppp is compiled into the kernel, 896 or at module load time (from init_module) if compiled as a module. */ 897static int __init ppp_init(void) 898{ 899 int err; 900 901 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); 902 903 err = register_pernet_device(&ppp_net_ops); 904 if (err) { 905 printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err); 906 goto out; 907 } 908 909 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 910 if (err) { 911 printk(KERN_ERR "failed to register PPP device (%d)\n", err); 912 goto out_net; 913 } 914 915 ppp_class = class_create(THIS_MODULE, "ppp"); 916 if (IS_ERR(ppp_class)) { 917 err = PTR_ERR(ppp_class); 918 goto out_chrdev; 919 } 920 921 /* not a big deal if we fail here :-) */ 922 device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); 923 924 return 0; 925 926out_chrdev: 927 unregister_chrdev(PPP_MAJOR, "ppp"); 928out_net: 929 unregister_pernet_device(&ppp_net_ops); 930out: 931 return err; 932} 933 934/* 935 * Network interface unit routines. 936 */ 937static netdev_tx_t 938ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) 939{ 940 struct ppp *ppp = netdev_priv(dev); 941 int npi, proto; 942 unsigned char *pp; 943 944 npi = ethertype_to_npindex(ntohs(skb->protocol)); 945 if (npi < 0) 946 goto outf; 947 948 /* Drop, accept or reject the packet */ 949 switch (ppp->npmode[npi]) { 950 case NPMODE_PASS: 951 break; 952 case NPMODE_QUEUE: 953 /* it would be nice to have a way to tell the network 954 system to queue this one up for later. */ 955 goto outf; 956 case NPMODE_DROP: 957 case NPMODE_ERROR: 958 goto outf; 959 } 960 961 /* Put the 2-byte PPP protocol number on the front, 962 making sure there is room for the address and control fields. */ 963 if (skb_cow_head(skb, PPP_HDRLEN)) 964 goto outf; 965 966 pp = skb_push(skb, 2); 967 proto = npindex_to_proto[npi]; 968 put_unaligned_be16(proto, pp); 969 970 netif_stop_queue(dev); 971 skb_queue_tail(&ppp->file.xq, skb); 972 ppp_xmit_process(ppp); 973 return NETDEV_TX_OK; 974 975 outf: 976 kfree_skb(skb); 977 ++dev->stats.tx_dropped; 978 return NETDEV_TX_OK; 979} 980 981static int 982ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 983{ 984 struct ppp *ppp = netdev_priv(dev); 985 int err = -EFAULT; 986 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; 987 struct ppp_stats stats; 988 struct ppp_comp_stats cstats; 989 char *vers; 990 991 switch (cmd) { 992 case SIOCGPPPSTATS: 993 ppp_get_stats(ppp, &stats); 994 if (copy_to_user(addr, &stats, sizeof(stats))) 995 break; 996 err = 0; 997 break; 998 999 case SIOCGPPPCSTATS: 1000 memset(&cstats, 0, sizeof(cstats)); 1001 if (ppp->xc_state) 1002 ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); 1003 if (ppp->rc_state) 1004 ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d); 1005 if (copy_to_user(addr, &cstats, sizeof(cstats))) 1006 break; 1007 err = 0; 1008 break; 1009 1010 case SIOCGPPPVER: 1011 vers = PPP_VERSION; 1012 if (copy_to_user(addr, vers, strlen(vers) + 1)) 1013 break; 1014 err = 0; 1015 break; 1016 1017 default: 1018 err = -EINVAL; 1019 } 1020 1021 return err; 1022} 1023 1024static const struct net_device_ops ppp_netdev_ops = { 1025 .ndo_start_xmit = ppp_start_xmit, 1026 .ndo_do_ioctl = ppp_net_ioctl, 1027}; 1028 1029static void ppp_setup(struct net_device *dev) 1030{ 1031 dev->netdev_ops = &ppp_netdev_ops; 1032 dev->hard_header_len = PPP_HDRLEN; 1033 dev->mtu = PPP_MTU; 1034 dev->addr_len = 0; 1035 dev->tx_queue_len = 3; 1036 dev->type = ARPHRD_PPP; 1037 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1038 dev->features |= NETIF_F_NETNS_LOCAL; 1039 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1040} 1041 1042/* 1043 * Transmit-side routines. 1044 */ 1045 1046/* 1047 * Called to do any work queued up on the transmit side 1048 * that can now be done. 1049 */ 1050static void 1051ppp_xmit_process(struct ppp *ppp) 1052{ 1053 struct sk_buff *skb; 1054 1055 ppp_xmit_lock(ppp); 1056 if (!ppp->closing) { 1057 ppp_push(ppp); 1058 while (!ppp->xmit_pending && 1059 (skb = skb_dequeue(&ppp->file.xq))) 1060 ppp_send_frame(ppp, skb); 1061 /* If there's no work left to do, tell the core net 1062 code that we can accept some more. */ 1063 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) 1064 netif_wake_queue(ppp->dev); 1065 } 1066 ppp_xmit_unlock(ppp); 1067} 1068 1069static inline struct sk_buff * 1070pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) 1071{ 1072 struct sk_buff *new_skb; 1073 int len; 1074 int new_skb_size = ppp->dev->mtu + 1075 ppp->xcomp->comp_extra + ppp->dev->hard_header_len; 1076 int compressor_skb_size = ppp->dev->mtu + 1077 ppp->xcomp->comp_extra + PPP_HDRLEN; 1078 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); 1079 if (!new_skb) { 1080 if (net_ratelimit()) 1081 printk(KERN_ERR "PPP: no memory (comp pkt)\n"); 1082 return NULL; 1083 } 1084 if (ppp->dev->hard_header_len > PPP_HDRLEN) 1085 skb_reserve(new_skb, 1086 ppp->dev->hard_header_len - PPP_HDRLEN); 1087 1088 /* compressor still expects A/C bytes in hdr */ 1089 len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2, 1090 new_skb->data, skb->len + 2, 1091 compressor_skb_size); 1092 if (len > 0 && (ppp->flags & SC_CCP_UP)) { 1093 kfree_skb(skb); 1094 skb = new_skb; 1095 skb_put(skb, len); 1096 skb_pull(skb, 2); /* pull off A/C bytes */ 1097 } else if (len == 0) { 1098 /* didn't compress, or CCP not up yet */ 1099 kfree_skb(new_skb); 1100 new_skb = skb; 1101 } else { 1102 /* 1103 * (len < 0) 1104 * MPPE requires that we do not send unencrypted 1105 * frames. The compressor will return -1 if we 1106 * should drop the frame. We cannot simply test 1107 * the compress_proto because MPPE and MPPC share 1108 * the same number. 1109 */ 1110 if (net_ratelimit()) 1111 printk(KERN_ERR "ppp: compressor dropped pkt\n"); 1112 kfree_skb(skb); 1113 kfree_skb(new_skb); 1114 new_skb = NULL; 1115 } 1116 return new_skb; 1117} 1118 1119/* 1120 * Compress and send a frame. 1121 * The caller should have locked the xmit path, 1122 * and xmit_pending should be 0. 1123 */ 1124static void 1125ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) 1126{ 1127 int proto = PPP_PROTO(skb); 1128 struct sk_buff *new_skb; 1129 int len; 1130 unsigned char *cp; 1131 1132 if (proto < 0x8000) { 1133#ifdef CONFIG_PPP_FILTER 1134 /* check if we should pass this packet */ 1135 /* the filter instructions are constructed assuming 1136 a four-byte PPP header on each packet */ 1137 *skb_push(skb, 2) = 1; 1138 if (ppp->pass_filter && 1139 sk_run_filter(skb, ppp->pass_filter) == 0) { 1140 if (ppp->debug & 1) 1141 printk(KERN_DEBUG "PPP: outbound frame not passed\n"); 1142 kfree_skb(skb); 1143 return; 1144 } 1145 /* if this packet passes the active filter, record the time */ 1146 if (!(ppp->active_filter && 1147 sk_run_filter(skb, ppp->active_filter) == 0)) 1148 ppp->last_xmit = jiffies; 1149 skb_pull(skb, 2); 1150#else 1151 /* for data packets, record the time */ 1152 ppp->last_xmit = jiffies; 1153#endif /* CONFIG_PPP_FILTER */ 1154 } 1155 1156 ++ppp->dev->stats.tx_packets; 1157 ppp->dev->stats.tx_bytes += skb->len - 2; 1158 1159 switch (proto) { 1160 case PPP_IP: 1161 if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0) 1162 break; 1163 /* try to do VJ TCP header compression */ 1164 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, 1165 GFP_ATOMIC); 1166 if (!new_skb) { 1167 printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); 1168 goto drop; 1169 } 1170 skb_reserve(new_skb, ppp->dev->hard_header_len - 2); 1171 cp = skb->data + 2; 1172 len = slhc_compress(ppp->vj, cp, skb->len - 2, 1173 new_skb->data + 2, &cp, 1174 !(ppp->flags & SC_NO_TCP_CCID)); 1175 if (cp == skb->data + 2) { 1176 /* didn't compress */ 1177 kfree_skb(new_skb); 1178 } else { 1179 if (cp[0] & SL_TYPE_COMPRESSED_TCP) { 1180 proto = PPP_VJC_COMP; 1181 cp[0] &= ~SL_TYPE_COMPRESSED_TCP; 1182 } else { 1183 proto = PPP_VJC_UNCOMP; 1184 cp[0] = skb->data[2]; 1185 } 1186 kfree_skb(skb); 1187 skb = new_skb; 1188 cp = skb_put(skb, len + 2); 1189 cp[0] = 0; 1190 cp[1] = proto; 1191 } 1192 break; 1193 1194 case PPP_CCP: 1195 /* peek at outbound CCP frames */ 1196 ppp_ccp_peek(ppp, skb, 0); 1197 break; 1198 } 1199 1200 /* try to do packet compression */ 1201 if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state && 1202 proto != PPP_LCP && proto != PPP_CCP) { 1203 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { 1204 if (net_ratelimit()) 1205 printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n"); 1206 goto drop; 1207 } 1208 skb = pad_compress_skb(ppp, skb); 1209 if (!skb) 1210 goto drop; 1211 } 1212 1213 /* 1214 * If we are waiting for traffic (demand dialling), 1215 * queue it up for pppd to receive. 1216 */ 1217 if (ppp->flags & SC_LOOP_TRAFFIC) { 1218 if (ppp->file.rq.qlen > PPP_MAX_RQLEN) 1219 goto drop; 1220 skb_queue_tail(&ppp->file.rq, skb); 1221 wake_up_interruptible(&ppp->file.rwait); 1222 return; 1223 } 1224 1225 ppp->xmit_pending = skb; 1226 ppp_push(ppp); 1227 return; 1228 1229 drop: 1230 kfree_skb(skb); 1231 ++ppp->dev->stats.tx_errors; 1232} 1233 1234/* 1235 * Try to send the frame in xmit_pending. 1236 * The caller should have the xmit path locked. 1237 */ 1238static void 1239ppp_push(struct ppp *ppp) 1240{ 1241 struct list_head *list; 1242 struct channel *pch; 1243 struct sk_buff *skb = ppp->xmit_pending; 1244 1245 if (!skb) 1246 return; 1247 1248 list = &ppp->channels; 1249 if (list_empty(list)) { 1250 /* nowhere to send the packet, just drop it */ 1251 ppp->xmit_pending = NULL; 1252 kfree_skb(skb); 1253 return; 1254 } 1255 1256 if ((ppp->flags & SC_MULTILINK) == 0) { 1257 /* not doing multilink: send it down the first channel */ 1258 list = list->next; 1259 pch = list_entry(list, struct channel, clist); 1260 1261 spin_lock_bh(&pch->downl); 1262 if (pch->chan) { 1263 if (pch->chan->ops->start_xmit(pch->chan, skb)) 1264 ppp->xmit_pending = NULL; 1265 } else { 1266 /* channel got unregistered */ 1267 kfree_skb(skb); 1268 ppp->xmit_pending = NULL; 1269 } 1270 spin_unlock_bh(&pch->downl); 1271 return; 1272 } 1273 1274#ifdef CONFIG_PPP_MULTILINK 1275 /* Multilink: fragment the packet over as many links 1276 as can take the packet at the moment. */ 1277 if (!ppp_mp_explode(ppp, skb)) 1278 return; 1279#endif /* CONFIG_PPP_MULTILINK */ 1280 1281 ppp->xmit_pending = NULL; 1282 kfree_skb(skb); 1283} 1284 1285#ifdef CONFIG_PPP_MULTILINK 1286static bool mp_protocol_compress __read_mostly = true; 1287module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR); 1288MODULE_PARM_DESC(mp_protocol_compress, 1289 "compress protocol id in multilink fragments"); 1290 1291/* 1292 * Divide a packet to be transmitted into fragments and 1293 * send them out the individual links. 1294 */ 1295static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) 1296{ 1297 int len, totlen; 1298 int i, bits, hdrlen, mtu; 1299 int flen; 1300 int navail, nfree, nzero; 1301 int nbigger; 1302 int totspeed; 1303 int totfree; 1304 unsigned char *p, *q; 1305 struct list_head *list; 1306 struct channel *pch; 1307 struct sk_buff *frag; 1308 struct ppp_channel *chan; 1309 1310 totspeed = 0; /*total bitrate of the bundle*/ 1311 nfree = 0; /* # channels which have no packet already queued */ 1312 navail = 0; /* total # of usable channels (not deregistered) */ 1313 nzero = 0; /* number of channels with zero speed associated*/ 1314 totfree = 0; /*total # of channels available and 1315 *having no queued packets before 1316 *starting the fragmentation*/ 1317 1318 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1319 i = 0; 1320 list_for_each_entry(pch, &ppp->channels, clist) { 1321 if (pch->chan) { 1322 pch->avail = 1; 1323 navail++; 1324 pch->speed = pch->chan->speed; 1325 } else { 1326 pch->avail = 0; 1327 } 1328 if (pch->avail) { 1329 if (skb_queue_empty(&pch->file.xq) || 1330 !pch->had_frag) { 1331 if (pch->speed == 0) 1332 nzero++; 1333 else 1334 totspeed += pch->speed; 1335 1336 pch->avail = 2; 1337 ++nfree; 1338 ++totfree; 1339 } 1340 if (!pch->had_frag && i < ppp->nxchan) 1341 ppp->nxchan = i; 1342 } 1343 ++i; 1344 } 1345 /* 1346 * Don't start sending this packet unless at least half of 1347 * the channels are free. This gives much better TCP 1348 * performance if we have a lot of channels. 1349 */ 1350 if (nfree == 0 || nfree < navail / 2) 1351 return 0; /* can't take now, leave it in xmit_pending */ 1352 1353 /* Do protocol field compression */ 1354 p = skb->data; 1355 len = skb->len; 1356 if (*p == 0 && mp_protocol_compress) { 1357 ++p; 1358 --len; 1359 } 1360 1361 totlen = len; 1362 nbigger = len % nfree; 1363 1364 /* skip to the channel after the one we last used 1365 and start at that one */ 1366 list = &ppp->channels; 1367 for (i = 0; i < ppp->nxchan; ++i) { 1368 list = list->next; 1369 if (list == &ppp->channels) { 1370 i = 0; 1371 break; 1372 } 1373 } 1374 1375 /* create a fragment for each channel */ 1376 bits = B; 1377 while (len > 0) { 1378 list = list->next; 1379 if (list == &ppp->channels) { 1380 i = 0; 1381 continue; 1382 } 1383 pch = list_entry(list, struct channel, clist); 1384 ++i; 1385 if (!pch->avail) 1386 continue; 1387 1388 /* 1389 * Skip this channel if it has a fragment pending already and 1390 * we haven't given a fragment to all of the free channels. 1391 */ 1392 if (pch->avail == 1) { 1393 if (nfree > 0) 1394 continue; 1395 } else { 1396 pch->avail = 1; 1397 } 1398 1399 /* check the channel's mtu and whether it is still attached. */ 1400 spin_lock_bh(&pch->downl); 1401 if (pch->chan == NULL) { 1402 /* can't use this channel, it's being deregistered */ 1403 if (pch->speed == 0) 1404 nzero--; 1405 else 1406 totspeed -= pch->speed; 1407 1408 spin_unlock_bh(&pch->downl); 1409 pch->avail = 0; 1410 totlen = len; 1411 totfree--; 1412 nfree--; 1413 if (--navail == 0) 1414 break; 1415 continue; 1416 } 1417 1418 /* 1419 *if the channel speed is not set divide 1420 *the packet evenly among the free channels; 1421 *otherwise divide it according to the speed 1422 *of the channel we are going to transmit on 1423 */ 1424 flen = len; 1425 if (nfree > 0) { 1426 if (pch->speed == 0) { 1427 flen = len/nfree; 1428 if (nbigger > 0) { 1429 flen++; 1430 nbigger--; 1431 } 1432 } else { 1433 flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / 1434 ((totspeed*totfree)/pch->speed)) - hdrlen; 1435 if (nbigger > 0) { 1436 flen += ((totfree - nzero)*pch->speed)/totspeed; 1437 nbigger -= ((totfree - nzero)*pch->speed)/ 1438 totspeed; 1439 } 1440 } 1441 nfree--; 1442 } 1443 1444 /* 1445 *check if we are on the last channel or 1446 *we exceded the lenght of the data to 1447 *fragment 1448 */ 1449 if ((nfree <= 0) || (flen > len)) 1450 flen = len; 1451 /* 1452 *it is not worth to tx on slow channels: 1453 *in that case from the resulting flen according to the 1454 *above formula will be equal or less than zero. 1455 *Skip the channel in this case 1456 */ 1457 if (flen <= 0) { 1458 pch->avail = 2; 1459 spin_unlock_bh(&pch->downl); 1460 continue; 1461 } 1462 1463 mtu = pch->chan->mtu - hdrlen; 1464 if (mtu < 4) 1465 mtu = 4; 1466 if (flen > mtu) 1467 flen = mtu; 1468 if (flen == len) 1469 bits |= E; 1470 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); 1471 if (!frag) 1472 goto noskb; 1473 q = skb_put(frag, flen + hdrlen); 1474 1475 /* make the MP header */ 1476 put_unaligned_be16(PPP_MP, q); 1477 if (ppp->flags & SC_MP_XSHORTSEQ) { 1478 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1479 q[3] = ppp->nxseq; 1480 } else { 1481 q[2] = bits; 1482 q[3] = ppp->nxseq >> 16; 1483 q[4] = ppp->nxseq >> 8; 1484 q[5] = ppp->nxseq; 1485 } 1486 1487 memcpy(q + hdrlen, p, flen); 1488 1489 /* try to send it down the channel */ 1490 chan = pch->chan; 1491 if (!skb_queue_empty(&pch->file.xq) || 1492 !chan->ops->start_xmit(chan, frag)) 1493 skb_queue_tail(&pch->file.xq, frag); 1494 pch->had_frag = 1; 1495 p += flen; 1496 len -= flen; 1497 ++ppp->nxseq; 1498 bits = 0; 1499 spin_unlock_bh(&pch->downl); 1500 } 1501 ppp->nxchan = i; 1502 1503 return 1; 1504 1505 noskb: 1506 spin_unlock_bh(&pch->downl); 1507 if (ppp->debug & 1) 1508 printk(KERN_ERR "PPP: no memory (fragment)\n"); 1509 ++ppp->dev->stats.tx_errors; 1510 ++ppp->nxseq; 1511 return 1; /* abandon the frame */ 1512} 1513#endif /* CONFIG_PPP_MULTILINK */ 1514 1515/* 1516 * Try to send data out on a channel. 1517 */ 1518static void 1519ppp_channel_push(struct channel *pch) 1520{ 1521 struct sk_buff *skb; 1522 struct ppp *ppp; 1523 1524 spin_lock_bh(&pch->downl); 1525 if (pch->chan) { 1526 while (!skb_queue_empty(&pch->file.xq)) { 1527 skb = skb_dequeue(&pch->file.xq); 1528 if (!pch->chan->ops->start_xmit(pch->chan, skb)) { 1529 /* put the packet back and try again later */ 1530 skb_queue_head(&pch->file.xq, skb); 1531 break; 1532 } 1533 } 1534 } else { 1535 /* channel got deregistered */ 1536 skb_queue_purge(&pch->file.xq); 1537 } 1538 spin_unlock_bh(&pch->downl); 1539 /* see if there is anything from the attached unit to be sent */ 1540 if (skb_queue_empty(&pch->file.xq)) { 1541 read_lock_bh(&pch->upl); 1542 ppp = pch->ppp; 1543 if (ppp) 1544 ppp_xmit_process(ppp); 1545 read_unlock_bh(&pch->upl); 1546 } 1547} 1548 1549/* 1550 * Receive-side routines. 1551 */ 1552 1553struct ppp_mp_skb_parm { 1554 u32 sequence; 1555 u8 BEbits; 1556}; 1557#define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb)) 1558 1559static inline void 1560ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1561{ 1562 ppp_recv_lock(ppp); 1563 if (!ppp->closing) 1564 ppp_receive_frame(ppp, skb, pch); 1565 else 1566 kfree_skb(skb); 1567 ppp_recv_unlock(ppp); 1568} 1569 1570void 1571ppp_input(struct ppp_channel *chan, struct sk_buff *skb) 1572{ 1573 struct channel *pch = chan->ppp; 1574 int proto; 1575 1576 if (!pch) { 1577 kfree_skb(skb); 1578 return; 1579 } 1580 1581 read_lock_bh(&pch->upl); 1582 if (!pskb_may_pull(skb, 2)) { 1583 kfree_skb(skb); 1584 if (pch->ppp) { 1585 ++pch->ppp->dev->stats.rx_length_errors; 1586 ppp_receive_error(pch->ppp); 1587 } 1588 goto done; 1589 } 1590 1591 proto = PPP_PROTO(skb); 1592 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { 1593 /* put it on the channel queue */ 1594 skb_queue_tail(&pch->file.rq, skb); 1595 /* drop old frames if queue too long */ 1596 while (pch->file.rq.qlen > PPP_MAX_RQLEN && 1597 (skb = skb_dequeue(&pch->file.rq))) 1598 kfree_skb(skb); 1599 wake_up_interruptible(&pch->file.rwait); 1600 } else { 1601 ppp_do_recv(pch->ppp, skb, pch); 1602 } 1603 1604done: 1605 read_unlock_bh(&pch->upl); 1606} 1607 1608/* Put a 0-length skb in the receive queue as an error indication */ 1609void 1610ppp_input_error(struct ppp_channel *chan, int code) 1611{ 1612 struct channel *pch = chan->ppp; 1613 struct sk_buff *skb; 1614 1615 if (!pch) 1616 return; 1617 1618 read_lock_bh(&pch->upl); 1619 if (pch->ppp) { 1620 skb = alloc_skb(0, GFP_ATOMIC); 1621 if (skb) { 1622 skb->len = 0; /* probably unnecessary */ 1623 skb->cb[0] = code; 1624 ppp_do_recv(pch->ppp, skb, pch); 1625 } 1626 } 1627 read_unlock_bh(&pch->upl); 1628} 1629 1630/* 1631 * We come in here to process a received frame. 1632 * The receive side of the ppp unit is locked. 1633 */ 1634static void 1635ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1636{ 1637 /* note: a 0-length skb is used as an error indication */ 1638 if (skb->len > 0) { 1639#ifdef CONFIG_PPP_MULTILINK 1640 /* XXX do channel-level decompression here */ 1641 if (PPP_PROTO(skb) == PPP_MP) 1642 ppp_receive_mp_frame(ppp, skb, pch); 1643 else 1644#endif /* CONFIG_PPP_MULTILINK */ 1645 ppp_receive_nonmp_frame(ppp, skb); 1646 } else { 1647 kfree_skb(skb); 1648 ppp_receive_error(ppp); 1649 } 1650} 1651 1652static void 1653ppp_receive_error(struct ppp *ppp) 1654{ 1655 ++ppp->dev->stats.rx_errors; 1656 if (ppp->vj) 1657 slhc_toss(ppp->vj); 1658} 1659 1660static void 1661ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) 1662{ 1663 struct sk_buff *ns; 1664 int proto, len, npi; 1665 1666 /* 1667 * Decompress the frame, if compressed. 1668 * Note that some decompressors need to see uncompressed frames 1669 * that come in as well as compressed frames. 1670 */ 1671 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) && 1672 (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0) 1673 skb = ppp_decompress_frame(ppp, skb); 1674 1675 if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR) 1676 goto err; 1677 1678 proto = PPP_PROTO(skb); 1679 switch (proto) { 1680 case PPP_VJC_COMP: 1681 /* decompress VJ compressed packets */ 1682 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) 1683 goto err; 1684 1685 if (skb_tailroom(skb) < 124 || skb_cloned(skb)) { 1686 /* copy to a new sk_buff with more tailroom */ 1687 ns = dev_alloc_skb(skb->len + 128); 1688 if (!ns) { 1689 printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); 1690 goto err; 1691 } 1692 skb_reserve(ns, 2); 1693 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); 1694 kfree_skb(skb); 1695 skb = ns; 1696 } 1697 else 1698 skb->ip_summed = CHECKSUM_NONE; 1699 1700 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); 1701 if (len <= 0) { 1702 printk(KERN_DEBUG "PPP: VJ decompression error\n"); 1703 goto err; 1704 } 1705 len += 2; 1706 if (len > skb->len) 1707 skb_put(skb, len - skb->len); 1708 else if (len < skb->len) 1709 skb_trim(skb, len); 1710 proto = PPP_IP; 1711 break; 1712 1713 case PPP_VJC_UNCOMP: 1714 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) 1715 goto err; 1716 1717 /* Until we fix the decompressor need to make sure 1718 * data portion is linear. 1719 */ 1720 if (!pskb_may_pull(skb, skb->len)) 1721 goto err; 1722 1723 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { 1724 printk(KERN_ERR "PPP: VJ uncompressed error\n"); 1725 goto err; 1726 } 1727 proto = PPP_IP; 1728 break; 1729 1730 case PPP_CCP: 1731 ppp_ccp_peek(ppp, skb, 1); 1732 break; 1733 } 1734 1735 ++ppp->dev->stats.rx_packets; 1736 ppp->dev->stats.rx_bytes += skb->len - 2; 1737 1738 npi = proto_to_npindex(proto); 1739 if (npi < 0) { 1740 /* control or unknown frame - pass it to pppd */ 1741 skb_queue_tail(&ppp->file.rq, skb); 1742 /* limit queue length by dropping old frames */ 1743 while (ppp->file.rq.qlen > PPP_MAX_RQLEN && 1744 (skb = skb_dequeue(&ppp->file.rq))) 1745 kfree_skb(skb); 1746 /* wake up any process polling or blocking on read */ 1747 wake_up_interruptible(&ppp->file.rwait); 1748 1749 } else { 1750 /* network protocol frame - give it to the kernel */ 1751 1752#ifdef CONFIG_PPP_FILTER 1753 /* check if the packet passes the pass and active filters */ 1754 /* the filter instructions are constructed assuming 1755 a four-byte PPP header on each packet */ 1756 if (ppp->pass_filter || ppp->active_filter) { 1757 if (skb_cloned(skb) && 1758 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1759 goto err; 1760 1761 *skb_push(skb, 2) = 0; 1762 if (ppp->pass_filter && 1763 sk_run_filter(skb, ppp->pass_filter) == 0) { 1764 if (ppp->debug & 1) 1765 printk(KERN_DEBUG "PPP: inbound frame " 1766 "not passed\n"); 1767 kfree_skb(skb); 1768 return; 1769 } 1770 if (!(ppp->active_filter && 1771 sk_run_filter(skb, ppp->active_filter) == 0)) 1772 ppp->last_recv = jiffies; 1773 __skb_pull(skb, 2); 1774 } else 1775#endif /* CONFIG_PPP_FILTER */ 1776 ppp->last_recv = jiffies; 1777 1778 if ((ppp->dev->flags & IFF_UP) == 0 || 1779 ppp->npmode[npi] != NPMODE_PASS) { 1780 kfree_skb(skb); 1781 } else { 1782 /* chop off protocol */ 1783 skb_pull_rcsum(skb, 2); 1784 skb->dev = ppp->dev; 1785 skb->protocol = htons(npindex_to_ethertype[npi]); 1786 skb_reset_mac_header(skb); 1787 netif_rx(skb); 1788 } 1789 } 1790 return; 1791 1792 err: 1793 kfree_skb(skb); 1794 ppp_receive_error(ppp); 1795} 1796 1797static struct sk_buff * 1798ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) 1799{ 1800 int proto = PPP_PROTO(skb); 1801 struct sk_buff *ns; 1802 int len; 1803 1804 /* Until we fix all the decompressor's need to make sure 1805 * data portion is linear. 1806 */ 1807 if (!pskb_may_pull(skb, skb->len)) 1808 goto err; 1809 1810 if (proto == PPP_COMP) { 1811 int obuff_size; 1812 1813 switch(ppp->rcomp->compress_proto) { 1814 case CI_MPPE: 1815 obuff_size = ppp->mru + PPP_HDRLEN + 1; 1816 break; 1817 default: 1818 obuff_size = ppp->mru + PPP_HDRLEN; 1819 break; 1820 } 1821 1822 ns = dev_alloc_skb(obuff_size); 1823 if (!ns) { 1824 printk(KERN_ERR "ppp_decompress_frame: no memory\n"); 1825 goto err; 1826 } 1827 /* the decompressor still expects the A/C bytes in the hdr */ 1828 len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, 1829 skb->len + 2, ns->data, obuff_size); 1830 if (len < 0) { 1831 /* Pass the compressed frame to pppd as an 1832 error indication. */ 1833 if (len == DECOMP_FATALERROR) 1834 ppp->rstate |= SC_DC_FERROR; 1835 kfree_skb(ns); 1836 goto err; 1837 } 1838 1839 kfree_skb(skb); 1840 skb = ns; 1841 skb_put(skb, len); 1842 skb_pull(skb, 2); /* pull off the A/C bytes */ 1843 1844 } else { 1845 /* Uncompressed frame - pass to decompressor so it 1846 can update its dictionary if necessary. */ 1847 if (ppp->rcomp->incomp) 1848 ppp->rcomp->incomp(ppp->rc_state, skb->data - 2, 1849 skb->len + 2); 1850 } 1851 1852 return skb; 1853 1854 err: 1855 ppp->rstate |= SC_DC_ERROR; 1856 ppp_receive_error(ppp); 1857 return skb; 1858} 1859 1860#ifdef CONFIG_PPP_MULTILINK 1861/* 1862 * Receive a multilink frame. 1863 * We put it on the reconstruction queue and then pull off 1864 * as many completed frames as we can. 1865 */ 1866static void 1867ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1868{ 1869 u32 mask, seq; 1870 struct channel *ch; 1871 int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1872 1873 if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0) 1874 goto err; /* no good, throw it away */ 1875 1876 /* Decode sequence number and begin/end bits */ 1877 if (ppp->flags & SC_MP_SHORTSEQ) { 1878 seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3]; 1879 mask = 0xfff; 1880 } else { 1881 seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5]; 1882 mask = 0xffffff; 1883 } 1884 PPP_MP_CB(skb)->BEbits = skb->data[2]; 1885 skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */ 1886 1887 /* 1888 * Do protocol ID decompression on the first fragment of each packet. 1889 */ 1890 if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1)) 1891 *skb_push(skb, 1) = 0; 1892 1893 /* 1894 * Expand sequence number to 32 bits, making it as close 1895 * as possible to ppp->minseq. 1896 */ 1897 seq |= ppp->minseq & ~mask; 1898 if ((int)(ppp->minseq - seq) > (int)(mask >> 1)) 1899 seq += mask + 1; 1900 else if ((int)(seq - ppp->minseq) > (int)(mask >> 1)) 1901 seq -= mask + 1; /* should never happen */ 1902 PPP_MP_CB(skb)->sequence = seq; 1903 pch->lastseq = seq; 1904 1905 /* 1906 * If this packet comes before the next one we were expecting, 1907 * drop it. 1908 */ 1909 if (seq_before(seq, ppp->nextseq)) { 1910 kfree_skb(skb); 1911 ++ppp->dev->stats.rx_dropped; 1912 ppp_receive_error(ppp); 1913 return; 1914 } 1915 1916 /* 1917 * Reevaluate minseq, the minimum over all channels of the 1918 * last sequence number received on each channel. Because of 1919 * the increasing sequence number rule, we know that any fragment 1920 * before `minseq' which hasn't arrived is never going to arrive. 1921 * The list of channels can't change because we have the receive 1922 * side of the ppp unit locked. 1923 */ 1924 list_for_each_entry(ch, &ppp->channels, clist) { 1925 if (seq_before(ch->lastseq, seq)) 1926 seq = ch->lastseq; 1927 } 1928 if (seq_before(ppp->minseq, seq)) 1929 ppp->minseq = seq; 1930 1931 /* Put the fragment on the reconstruction queue */ 1932 ppp_mp_insert(ppp, skb); 1933 1934 /* If the queue is getting long, don't wait any longer for packets 1935 before the start of the queue. */ 1936 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { 1937 struct sk_buff *mskb = skb_peek(&ppp->mrq); 1938 if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence)) 1939 ppp->minseq = PPP_MP_CB(mskb)->sequence; 1940 } 1941 1942 /* Pull completed packets off the queue and receive them. */ 1943 while ((skb = ppp_mp_reconstruct(ppp))) { 1944 if (pskb_may_pull(skb, 2)) 1945 ppp_receive_nonmp_frame(ppp, skb); 1946 else { 1947 ++ppp->dev->stats.rx_length_errors; 1948 kfree_skb(skb); 1949 ppp_receive_error(ppp); 1950 } 1951 } 1952 1953 return; 1954 1955 err: 1956 kfree_skb(skb); 1957 ppp_receive_error(ppp); 1958} 1959 1960/* 1961 * Insert a fragment on the MP reconstruction queue. 1962 * The queue is ordered by increasing sequence number. 1963 */ 1964static void 1965ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb) 1966{ 1967 struct sk_buff *p; 1968 struct sk_buff_head *list = &ppp->mrq; 1969 u32 seq = PPP_MP_CB(skb)->sequence; 1970 1971 /* N.B. we don't need to lock the list lock because we have the 1972 ppp unit receive-side lock. */ 1973 skb_queue_walk(list, p) { 1974 if (seq_before(seq, PPP_MP_CB(p)->sequence)) 1975 break; 1976 } 1977 __skb_queue_before(list, p, skb); 1978} 1979 1980/* 1981 * Reconstruct a packet from the MP fragment queue. 1982 * We go through increasing sequence numbers until we find a 1983 * complete packet, or we get to the sequence number for a fragment 1984 * which hasn't arrived but might still do so. 1985 */ 1986static struct sk_buff * 1987ppp_mp_reconstruct(struct ppp *ppp) 1988{ 1989 u32 seq = ppp->nextseq; 1990 u32 minseq = ppp->minseq; 1991 struct sk_buff_head *list = &ppp->mrq; 1992 struct sk_buff *p, *next; 1993 struct sk_buff *head, *tail; 1994 struct sk_buff *skb = NULL; 1995 int lost = 0, len = 0; 1996 1997 if (ppp->mrru == 0) /* do nothing until mrru is set */ 1998 return NULL; 1999 head = list->next; 2000 tail = NULL; 2001 for (p = head; p != (struct sk_buff *) list; p = next) { 2002 next = p->next; 2003 if (seq_before(PPP_MP_CB(p)->sequence, seq)) { 2004 /* this can't happen, anyway ignore the skb */ 2005 printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", 2006 PPP_MP_CB(p)->sequence, seq); 2007 head = next; 2008 continue; 2009 } 2010 if (PPP_MP_CB(p)->sequence != seq) { 2011 /* Fragment `seq' is missing. If it is after 2012 minseq, it might arrive later, so stop here. */ 2013 if (seq_after(seq, minseq)) 2014 break; 2015 /* Fragment `seq' is lost, keep going. */ 2016 lost = 1; 2017 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? 2018 minseq + 1: PPP_MP_CB(p)->sequence; 2019 next = p; 2020 continue; 2021 } 2022 2023 /* 2024 * At this point we know that all the fragments from 2025 * ppp->nextseq to seq are either present or lost. 2026 * Also, there are no complete packets in the queue 2027 * that have no missing fragments and end before this 2028 * fragment. 2029 */ 2030 2031 /* B bit set indicates this fragment starts a packet */ 2032 if (PPP_MP_CB(p)->BEbits & B) { 2033 head = p; 2034 lost = 0; 2035 len = 0; 2036 } 2037 2038 len += p->len; 2039 2040 /* Got a complete packet yet? */ 2041 if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) && 2042 (PPP_MP_CB(head)->BEbits & B)) { 2043 if (len > ppp->mrru + 2) { 2044 ++ppp->dev->stats.rx_length_errors; 2045 printk(KERN_DEBUG "PPP: reconstructed packet" 2046 " is too long (%d)\n", len); 2047 } else if (p == head) { 2048 /* fragment is complete packet - reuse skb */ 2049 tail = p; 2050 skb = skb_get(p); 2051 break; 2052 } else if ((skb = dev_alloc_skb(len)) == NULL) { 2053 ++ppp->dev->stats.rx_missed_errors; 2054 printk(KERN_DEBUG "PPP: no memory for " 2055 "reconstructed packet"); 2056 } else { 2057 tail = p; 2058 break; 2059 } 2060 ppp->nextseq = seq + 1; 2061 } 2062 2063 /* 2064 * If this is the ending fragment of a packet, 2065 * and we haven't found a complete valid packet yet, 2066 * we can discard up to and including this fragment. 2067 */ 2068 if (PPP_MP_CB(p)->BEbits & E) 2069 head = next; 2070 2071 ++seq; 2072 } 2073 2074 /* If we have a complete packet, copy it all into one skb. */ 2075 if (tail != NULL) { 2076 /* If we have discarded any fragments, 2077 signal a receive error. */ 2078 if (PPP_MP_CB(head)->sequence != ppp->nextseq) { 2079 if (ppp->debug & 1) 2080 printk(KERN_DEBUG " missed pkts %u..%u\n", 2081 ppp->nextseq, 2082 PPP_MP_CB(head)->sequence-1); 2083 ++ppp->dev->stats.rx_dropped; 2084 ppp_receive_error(ppp); 2085 } 2086 2087 if (head != tail) 2088 /* copy to a single skb */ 2089 for (p = head; p != tail->next; p = p->next) 2090 skb_copy_bits(p, 0, skb_put(skb, p->len), p->len); 2091 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; 2092 head = tail->next; 2093 } 2094 2095 /* Discard all the skbuffs that we have copied the data out of 2096 or that we can't use. */ 2097 while ((p = list->next) != head) { 2098 __skb_unlink(p, list); 2099 kfree_skb(p); 2100 } 2101 2102 return skb; 2103} 2104#endif /* CONFIG_PPP_MULTILINK */ 2105 2106/* 2107 * Channel interface. 2108 */ 2109 2110/* Create a new, unattached ppp channel. */ 2111int ppp_register_channel(struct ppp_channel *chan) 2112{ 2113 return ppp_register_net_channel(current->nsproxy->net_ns, chan); 2114} 2115 2116/* Create a new, unattached ppp channel for specified net. */ 2117int ppp_register_net_channel(struct net *net, struct ppp_channel *chan) 2118{ 2119 struct channel *pch; 2120 struct ppp_net *pn; 2121 2122 pch = kzalloc(sizeof(struct channel), GFP_KERNEL); 2123 if (!pch) 2124 return -ENOMEM; 2125 2126 pn = ppp_pernet(net); 2127 2128 pch->ppp = NULL; 2129 pch->chan = chan; 2130 pch->chan_net = net; 2131 chan->ppp = pch; 2132 init_ppp_file(&pch->file, CHANNEL); 2133 pch->file.hdrlen = chan->hdrlen; 2134#ifdef CONFIG_PPP_MULTILINK 2135 pch->lastseq = -1; 2136#endif /* CONFIG_PPP_MULTILINK */ 2137 init_rwsem(&pch->chan_sem); 2138 spin_lock_init(&pch->downl); 2139 rwlock_init(&pch->upl); 2140 2141 spin_lock_bh(&pn->all_channels_lock); 2142 pch->file.index = ++pn->last_channel_index; 2143 list_add(&pch->list, &pn->new_channels); 2144 atomic_inc(&channel_count); 2145 spin_unlock_bh(&pn->all_channels_lock); 2146 2147 return 0; 2148} 2149 2150/* 2151 * Return the index of a channel. 2152 */ 2153int ppp_channel_index(struct ppp_channel *chan) 2154{ 2155 struct channel *pch = chan->ppp; 2156 2157 if (pch) 2158 return pch->file.index; 2159 return -1; 2160} 2161 2162/* 2163 * Return the PPP unit number to which a channel is connected. 2164 */ 2165int ppp_unit_number(struct ppp_channel *chan) 2166{ 2167 struct channel *pch = chan->ppp; 2168 int unit = -1; 2169 2170 if (pch) { 2171 read_lock_bh(&pch->upl); 2172 if (pch->ppp) 2173 unit = pch->ppp->file.index; 2174 read_unlock_bh(&pch->upl); 2175 } 2176 return unit; 2177} 2178 2179/* 2180 * Return the PPP device interface name of a channel. 2181 */ 2182char *ppp_dev_name(struct ppp_channel *chan) 2183{ 2184 struct channel *pch = chan->ppp; 2185 char *name = NULL; 2186 2187 if (pch) { 2188 read_lock_bh(&pch->upl); 2189 if (pch->ppp && pch->ppp->dev) 2190 name = pch->ppp->dev->name; 2191 read_unlock_bh(&pch->upl); 2192 } 2193 return name; 2194} 2195 2196 2197/* 2198 * Disconnect a channel from the generic layer. 2199 * This must be called in process context. 2200 */ 2201void 2202ppp_unregister_channel(struct ppp_channel *chan) 2203{ 2204 struct channel *pch = chan->ppp; 2205 struct ppp_net *pn; 2206 2207 if (!pch) 2208 return; /* should never happen */ 2209 2210 chan->ppp = NULL; 2211 2212 /* 2213 * This ensures that we have returned from any calls into the 2214 * the channel's start_xmit or ioctl routine before we proceed. 2215 */ 2216 down_write(&pch->chan_sem); 2217 spin_lock_bh(&pch->downl); 2218 pch->chan = NULL; 2219 spin_unlock_bh(&pch->downl); 2220 up_write(&pch->chan_sem); 2221 ppp_disconnect_channel(pch); 2222 2223 pn = ppp_pernet(pch->chan_net); 2224 spin_lock_bh(&pn->all_channels_lock); 2225 list_del(&pch->list); 2226 spin_unlock_bh(&pn->all_channels_lock); 2227 2228 pch->file.dead = 1; 2229 wake_up_interruptible(&pch->file.rwait); 2230 if (atomic_dec_and_test(&pch->file.refcnt)) 2231 ppp_destroy_channel(pch); 2232} 2233 2234/* 2235 * Callback from a channel when it can accept more to transmit. 2236 * This should be called at BH/softirq level, not interrupt level. 2237 */ 2238void 2239ppp_output_wakeup(struct ppp_channel *chan) 2240{ 2241 struct channel *pch = chan->ppp; 2242 2243 if (!pch) 2244 return; 2245 ppp_channel_push(pch); 2246} 2247 2248/* 2249 * Compression control. 2250 */ 2251 2252/* Process the PPPIOCSCOMPRESS ioctl. */ 2253static int 2254ppp_set_compress(struct ppp *ppp, unsigned long arg) 2255{ 2256 int err; 2257 struct compressor *cp, *ocomp; 2258 struct ppp_option_data data; 2259 void *state, *ostate; 2260 unsigned char ccp_option[CCP_MAX_OPTION_LENGTH]; 2261 2262 err = -EFAULT; 2263 if (copy_from_user(&data, (void __user *) arg, sizeof(data)) || 2264 (data.length <= CCP_MAX_OPTION_LENGTH && 2265 copy_from_user(ccp_option, (void __user *) data.ptr, data.length))) 2266 goto out; 2267 err = -EINVAL; 2268 if (data.length > CCP_MAX_OPTION_LENGTH || 2269 ccp_option[1] < 2 || ccp_option[1] > data.length) 2270 goto out; 2271 2272 cp = try_then_request_module( 2273 find_compressor(ccp_option[0]), 2274 "ppp-compress-%d", ccp_option[0]); 2275 if (!cp) 2276 goto out; 2277 2278 err = -ENOBUFS; 2279 if (data.transmit) { 2280 state = cp->comp_alloc(ccp_option, data.length); 2281 if (state) { 2282 ppp_xmit_lock(ppp); 2283 ppp->xstate &= ~SC_COMP_RUN; 2284 ocomp = ppp->xcomp; 2285 ostate = ppp->xc_state; 2286 ppp->xcomp = cp; 2287 ppp->xc_state = state; 2288 ppp_xmit_unlock(ppp); 2289 if (ostate) { 2290 ocomp->comp_free(ostate); 2291 module_put(ocomp->owner); 2292 } 2293 err = 0; 2294 } else 2295 module_put(cp->owner); 2296 2297 } else { 2298 state = cp->decomp_alloc(ccp_option, data.length); 2299 if (state) { 2300 ppp_recv_lock(ppp); 2301 ppp->rstate &= ~SC_DECOMP_RUN; 2302 ocomp = ppp->rcomp; 2303 ostate = ppp->rc_state; 2304 ppp->rcomp = cp; 2305 ppp->rc_state = state; 2306 ppp_recv_unlock(ppp); 2307 if (ostate) { 2308 ocomp->decomp_free(ostate); 2309 module_put(ocomp->owner); 2310 } 2311 err = 0; 2312 } else 2313 module_put(cp->owner); 2314 } 2315 2316 out: 2317 return err; 2318} 2319 2320/* 2321 * Look at a CCP packet and update our state accordingly. 2322 * We assume the caller has the xmit or recv path locked. 2323 */ 2324static void 2325ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) 2326{ 2327 unsigned char *dp; 2328 int len; 2329 2330 if (!pskb_may_pull(skb, CCP_HDRLEN + 2)) 2331 return; /* no header */ 2332 dp = skb->data + 2; 2333 2334 switch (CCP_CODE(dp)) { 2335 case CCP_CONFREQ: 2336 2337 /* A ConfReq starts negotiation of compression 2338 * in one direction of transmission, 2339 * and hence brings it down...but which way? 2340 * 2341 * Remember: 2342 * A ConfReq indicates what the sender would like to receive 2343 */ 2344 if(inbound) 2345 /* He is proposing what I should send */ 2346 ppp->xstate &= ~SC_COMP_RUN; 2347 else 2348 /* I am proposing to what he should send */ 2349 ppp->rstate &= ~SC_DECOMP_RUN; 2350 2351 break; 2352 2353 case CCP_TERMREQ: 2354 case CCP_TERMACK: 2355 /* 2356 * CCP is going down, both directions of transmission 2357 */ 2358 ppp->rstate &= ~SC_DECOMP_RUN; 2359 ppp->xstate &= ~SC_COMP_RUN; 2360 break; 2361 2362 case CCP_CONFACK: 2363 if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN) 2364 break; 2365 len = CCP_LENGTH(dp); 2366 if (!pskb_may_pull(skb, len + 2)) 2367 return; /* too short */ 2368 dp += CCP_HDRLEN; 2369 len -= CCP_HDRLEN; 2370 if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp)) 2371 break; 2372 if (inbound) { 2373 /* we will start receiving compressed packets */ 2374 if (!ppp->rc_state) 2375 break; 2376 if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len, 2377 ppp->file.index, 0, ppp->mru, ppp->debug)) { 2378 ppp->rstate |= SC_DECOMP_RUN; 2379 ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR); 2380 } 2381 } else { 2382 /* we will soon start sending compressed packets */ 2383 if (!ppp->xc_state) 2384 break; 2385 if (ppp->xcomp->comp_init(ppp->xc_state, dp, len, 2386 ppp->file.index, 0, ppp->debug)) 2387 ppp->xstate |= SC_COMP_RUN; 2388 } 2389 break; 2390 2391 case CCP_RESETACK: 2392 /* reset the [de]compressor */ 2393 if ((ppp->flags & SC_CCP_UP) == 0) 2394 break; 2395 if (inbound) { 2396 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) { 2397 ppp->rcomp->decomp_reset(ppp->rc_state); 2398 ppp->rstate &= ~SC_DC_ERROR; 2399 } 2400 } else { 2401 if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN)) 2402 ppp->xcomp->comp_reset(ppp->xc_state); 2403 } 2404 break; 2405 } 2406} 2407 2408/* Free up compression resources. */ 2409static void 2410ppp_ccp_closed(struct ppp *ppp) 2411{ 2412 void *xstate, *rstate; 2413 struct compressor *xcomp, *rcomp; 2414 2415 ppp_lock(ppp); 2416 ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP); 2417 ppp->xstate = 0; 2418 xcomp = ppp->xcomp; 2419 xstate = ppp->xc_state; 2420 ppp->xc_state = NULL; 2421 ppp->rstate = 0; 2422 rcomp = ppp->rcomp; 2423 rstate = ppp->rc_state; 2424 ppp->rc_state = NULL; 2425 ppp_unlock(ppp); 2426 2427 if (xstate) { 2428 xcomp->comp_free(xstate); 2429 module_put(xcomp->owner); 2430 } 2431 if (rstate) { 2432 rcomp->decomp_free(rstate); 2433 module_put(rcomp->owner); 2434 } 2435} 2436 2437/* List of compressors. */ 2438static LIST_HEAD(compressor_list); 2439static DEFINE_SPINLOCK(compressor_list_lock); 2440 2441struct compressor_entry { 2442 struct list_head list; 2443 struct compressor *comp; 2444}; 2445 2446static struct compressor_entry * 2447find_comp_entry(int proto) 2448{ 2449 struct compressor_entry *ce; 2450 2451 list_for_each_entry(ce, &compressor_list, list) { 2452 if (ce->comp->compress_proto == proto) 2453 return ce; 2454 } 2455 return NULL; 2456} 2457 2458/* Register a compressor */ 2459int 2460ppp_register_compressor(struct compressor *cp) 2461{ 2462 struct compressor_entry *ce; 2463 int ret; 2464 spin_lock(&compressor_list_lock); 2465 ret = -EEXIST; 2466 if (find_comp_entry(cp->compress_proto)) 2467 goto out; 2468 ret = -ENOMEM; 2469 ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); 2470 if (!ce) 2471 goto out; 2472 ret = 0; 2473 ce->comp = cp; 2474 list_add(&ce->list, &compressor_list); 2475 out: 2476 spin_unlock(&compressor_list_lock); 2477 return ret; 2478} 2479 2480/* Unregister a compressor */ 2481void 2482ppp_unregister_compressor(struct compressor *cp) 2483{ 2484 struct compressor_entry *ce; 2485 2486 spin_lock(&compressor_list_lock); 2487 ce = find_comp_entry(cp->compress_proto); 2488 if (ce && ce->comp == cp) { 2489 list_del(&ce->list); 2490 kfree(ce); 2491 } 2492 spin_unlock(&compressor_list_lock); 2493} 2494 2495/* Find a compressor. */ 2496static struct compressor * 2497find_compressor(int type) 2498{ 2499 struct compressor_entry *ce; 2500 struct compressor *cp = NULL; 2501 2502 spin_lock(&compressor_list_lock); 2503 ce = find_comp_entry(type); 2504 if (ce) { 2505 cp = ce->comp; 2506 if (!try_module_get(cp->owner)) 2507 cp = NULL; 2508 } 2509 spin_unlock(&compressor_list_lock); 2510 return cp; 2511} 2512 2513/* 2514 * Miscelleneous stuff. 2515 */ 2516 2517static void 2518ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) 2519{ 2520 struct slcompress *vj = ppp->vj; 2521 2522 memset(st, 0, sizeof(*st)); 2523 st->p.ppp_ipackets = ppp->dev->stats.rx_packets; 2524 st->p.ppp_ierrors = ppp->dev->stats.rx_errors; 2525 st->p.ppp_ibytes = ppp->dev->stats.rx_bytes; 2526 st->p.ppp_opackets = ppp->dev->stats.tx_packets; 2527 st->p.ppp_oerrors = ppp->dev->stats.tx_errors; 2528 st->p.ppp_obytes = ppp->dev->stats.tx_bytes; 2529 if (!vj) 2530 return; 2531 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; 2532 st->vj.vjs_compressed = vj->sls_o_compressed; 2533 st->vj.vjs_searches = vj->sls_o_searches; 2534 st->vj.vjs_misses = vj->sls_o_misses; 2535 st->vj.vjs_errorin = vj->sls_i_error; 2536 st->vj.vjs_tossed = vj->sls_i_tossed; 2537 st->vj.vjs_uncompressedin = vj->sls_i_uncompressed; 2538 st->vj.vjs_compressedin = vj->sls_i_compressed; 2539} 2540 2541/* 2542 * Stuff for handling the lists of ppp units and channels 2543 * and for initialization. 2544 */ 2545 2546/* 2547 * Create a new ppp interface unit. Fails if it can't allocate memory 2548 * or if there is already a unit with the requested number. 2549 * unit == -1 means allocate a new number. 2550 */ 2551static struct ppp * 2552ppp_create_interface(struct net *net, int unit, int *retp) 2553{ 2554 struct ppp *ppp; 2555 struct ppp_net *pn; 2556 struct net_device *dev = NULL; 2557 int ret = -ENOMEM; 2558 int i; 2559 2560 dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup); 2561 if (!dev) 2562 goto out1; 2563 2564 pn = ppp_pernet(net); 2565 2566 ppp = netdev_priv(dev); 2567 ppp->dev = dev; 2568 ppp->mru = PPP_MRU; 2569 init_ppp_file(&ppp->file, INTERFACE); 2570 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 2571 for (i = 0; i < NUM_NP; ++i) 2572 ppp->npmode[i] = NPMODE_PASS; 2573 INIT_LIST_HEAD(&ppp->channels); 2574 spin_lock_init(&ppp->rlock); 2575 spin_lock_init(&ppp->wlock); 2576#ifdef CONFIG_PPP_MULTILINK 2577 ppp->minseq = -1; 2578 skb_queue_head_init(&ppp->mrq); 2579#endif /* CONFIG_PPP_MULTILINK */ 2580 2581 /* 2582 * drum roll: don't forget to set 2583 * the net device is belong to 2584 */ 2585 dev_net_set(dev, net); 2586 2587 mutex_lock(&pn->all_ppp_mutex); 2588 2589 if (unit < 0) { 2590 unit = unit_get(&pn->units_idr, ppp); 2591 if (unit < 0) { 2592 ret = unit; 2593 goto out2; 2594 } 2595 } else { 2596 ret = -EEXIST; 2597 if (unit_find(&pn->units_idr, unit)) 2598 goto out2; /* unit already exists */ 2599 /* 2600 * if caller need a specified unit number 2601 * lets try to satisfy him, otherwise -- 2602 * he should better ask us for new unit number 2603 * 2604 * NOTE: yes I know that returning EEXIST it's not 2605 * fair but at least pppd will ask us to allocate 2606 * new unit in this case so user is happy :) 2607 */ 2608 unit = unit_set(&pn->units_idr, ppp, unit); 2609 if (unit < 0) 2610 goto out2; 2611 } 2612 2613 /* Initialize the new ppp unit */ 2614 ppp->file.index = unit; 2615 sprintf(dev->name, "ppp%d", unit); 2616 2617 ret = register_netdev(dev); 2618 if (ret != 0) { 2619 unit_put(&pn->units_idr, unit); 2620 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", 2621 dev->name, ret); 2622 goto out2; 2623 } 2624 2625 ppp->ppp_net = net; 2626 2627 atomic_inc(&ppp_unit_count); 2628 mutex_unlock(&pn->all_ppp_mutex); 2629 2630 *retp = 0; 2631 return ppp; 2632 2633out2: 2634 mutex_unlock(&pn->all_ppp_mutex); 2635 free_netdev(dev); 2636out1: 2637 *retp = ret; 2638 return NULL; 2639} 2640 2641/* 2642 * Initialize a ppp_file structure. 2643 */ 2644static void 2645init_ppp_file(struct ppp_file *pf, int kind) 2646{ 2647 pf->kind = kind; 2648 skb_queue_head_init(&pf->xq); 2649 skb_queue_head_init(&pf->rq); 2650 atomic_set(&pf->refcnt, 1); 2651 init_waitqueue_head(&pf->rwait); 2652} 2653 2654/* 2655 * Take down a ppp interface unit - called when the owning file 2656 * (the one that created the unit) is closed or detached. 2657 */ 2658static void ppp_shutdown_interface(struct ppp *ppp) 2659{ 2660 struct ppp_net *pn; 2661 2662 pn = ppp_pernet(ppp->ppp_net); 2663 mutex_lock(&pn->all_ppp_mutex); 2664 2665 /* This will call dev_close() for us. */ 2666 ppp_lock(ppp); 2667 if (!ppp->closing) { 2668 ppp->closing = 1; 2669 ppp_unlock(ppp); 2670 unregister_netdev(ppp->dev); 2671 unit_put(&pn->units_idr, ppp->file.index); 2672 } else 2673 ppp_unlock(ppp); 2674 2675 ppp->file.dead = 1; 2676 ppp->owner = NULL; 2677 wake_up_interruptible(&ppp->file.rwait); 2678 2679 mutex_unlock(&pn->all_ppp_mutex); 2680} 2681 2682/* 2683 * Free the memory used by a ppp unit. This is only called once 2684 * there are no channels connected to the unit and no file structs 2685 * that reference the unit. 2686 */ 2687static void ppp_destroy_interface(struct ppp *ppp) 2688{ 2689 atomic_dec(&ppp_unit_count); 2690 2691 if (!ppp->file.dead || ppp->n_channels) { 2692 /* "can't happen" */ 2693 printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d " 2694 "n_channels=%d !\n", ppp, ppp->file.dead, 2695 ppp->n_channels); 2696 return; 2697 } 2698 2699 ppp_ccp_closed(ppp); 2700 if (ppp->vj) { 2701 slhc_free(ppp->vj); 2702 ppp->vj = NULL; 2703 } 2704 skb_queue_purge(&ppp->file.xq); 2705 skb_queue_purge(&ppp->file.rq); 2706#ifdef CONFIG_PPP_MULTILINK 2707 skb_queue_purge(&ppp->mrq); 2708#endif /* CONFIG_PPP_MULTILINK */ 2709#ifdef CONFIG_PPP_FILTER 2710 kfree(ppp->pass_filter); 2711 ppp->pass_filter = NULL; 2712 kfree(ppp->active_filter); 2713 ppp->active_filter = NULL; 2714#endif /* CONFIG_PPP_FILTER */ 2715 2716 kfree_skb(ppp->xmit_pending); 2717 2718 free_netdev(ppp->dev); 2719} 2720 2721/* 2722 * Locate an existing ppp unit. 2723 * The caller should have locked the all_ppp_mutex. 2724 */ 2725static struct ppp * 2726ppp_find_unit(struct ppp_net *pn, int unit) 2727{ 2728 return unit_find(&pn->units_idr, unit); 2729} 2730 2731/* 2732 * Locate an existing ppp channel. 2733 * The caller should have locked the all_channels_lock. 2734 * First we look in the new_channels list, then in the 2735 * all_channels list. If found in the new_channels list, 2736 * we move it to the all_channels list. This is for speed 2737 * when we have a lot of channels in use. 2738 */ 2739static struct channel * 2740ppp_find_channel(struct ppp_net *pn, int unit) 2741{ 2742 struct channel *pch; 2743 2744 list_for_each_entry(pch, &pn->new_channels, list) { 2745 if (pch->file.index == unit) { 2746 list_move(&pch->list, &pn->all_channels); 2747 return pch; 2748 } 2749 } 2750 2751 list_for_each_entry(pch, &pn->all_channels, list) { 2752 if (pch->file.index == unit) 2753 return pch; 2754 } 2755 2756 return NULL; 2757} 2758 2759/* 2760 * Connect a PPP channel to a PPP interface unit. 2761 */ 2762static int 2763ppp_connect_channel(struct channel *pch, int unit) 2764{ 2765 struct ppp *ppp; 2766 struct ppp_net *pn; 2767 int ret = -ENXIO; 2768 int hdrlen; 2769 2770 pn = ppp_pernet(pch->chan_net); 2771 2772 mutex_lock(&pn->all_ppp_mutex); 2773 ppp = ppp_find_unit(pn, unit); 2774 if (!ppp) 2775 goto out; 2776 write_lock_bh(&pch->upl); 2777 ret = -EINVAL; 2778 if (pch->ppp) 2779 goto outl; 2780 2781 ppp_lock(ppp); 2782 if (pch->file.hdrlen > ppp->file.hdrlen) 2783 ppp->file.hdrlen = pch->file.hdrlen; 2784 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ 2785 if (hdrlen > ppp->dev->hard_header_len) 2786 ppp->dev->hard_header_len = hdrlen; 2787 list_add_tail(&pch->clist, &ppp->channels); 2788 ++ppp->n_channels; 2789 pch->ppp = ppp; 2790 atomic_inc(&ppp->file.refcnt); 2791 ppp_unlock(ppp); 2792 ret = 0; 2793 2794 outl: 2795 write_unlock_bh(&pch->upl); 2796 out: 2797 mutex_unlock(&pn->all_ppp_mutex); 2798 return ret; 2799} 2800 2801/* 2802 * Disconnect a channel from its ppp unit. 2803 */ 2804static int 2805ppp_disconnect_channel(struct channel *pch) 2806{ 2807 struct ppp *ppp; 2808 int err = -EINVAL; 2809 2810 write_lock_bh(&pch->upl); 2811 ppp = pch->ppp; 2812 pch->ppp = NULL; 2813 write_unlock_bh(&pch->upl); 2814 if (ppp) { 2815 /* remove it from the ppp unit's list */ 2816 ppp_lock(ppp); 2817 list_del(&pch->clist); 2818 if (--ppp->n_channels == 0) 2819 wake_up_interruptible(&ppp->file.rwait); 2820 ppp_unlock(ppp); 2821 if (atomic_dec_and_test(&ppp->file.refcnt)) 2822 ppp_destroy_interface(ppp); 2823 err = 0; 2824 } 2825 return err; 2826} 2827 2828/* 2829 * Free up the resources used by a ppp channel. 2830 */ 2831static void ppp_destroy_channel(struct channel *pch) 2832{ 2833 atomic_dec(&channel_count); 2834 2835 if (!pch->file.dead) { 2836 /* "can't happen" */ 2837 printk(KERN_ERR "ppp: destroying undead channel %p !\n", 2838 pch); 2839 return; 2840 } 2841 skb_queue_purge(&pch->file.xq); 2842 skb_queue_purge(&pch->file.rq); 2843 kfree(pch); 2844} 2845 2846static void __exit ppp_cleanup(void) 2847{ 2848 /* should never happen */ 2849 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) 2850 printk(KERN_ERR "PPP: removing module but units remain!\n"); 2851 unregister_chrdev(PPP_MAJOR, "ppp"); 2852 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); 2853 class_destroy(ppp_class); 2854 unregister_pernet_device(&ppp_net_ops); 2855} 2856 2857/* 2858 * Units handling. Caller must protect concurrent access 2859 * by holding all_ppp_mutex 2860 */ 2861 2862static int __unit_alloc(struct idr *p, void *ptr, int n) 2863{ 2864 int unit, err; 2865 2866again: 2867 if (!idr_pre_get(p, GFP_KERNEL)) { 2868 printk(KERN_ERR "PPP: No free memory for idr\n"); 2869 return -ENOMEM; 2870 } 2871 2872 err = idr_get_new_above(p, ptr, n, &unit); 2873 if (err < 0) { 2874 if (err == -EAGAIN) 2875 goto again; 2876 return err; 2877 } 2878 2879 return unit; 2880} 2881 2882/* associate pointer with specified number */ 2883static int unit_set(struct idr *p, void *ptr, int n) 2884{ 2885 int unit; 2886 2887 unit = __unit_alloc(p, ptr, n); 2888 if (unit < 0) 2889 return unit; 2890 else if (unit != n) { 2891 idr_remove(p, unit); 2892 return -EINVAL; 2893 } 2894 2895 return unit; 2896} 2897 2898/* get new free unit number and associate pointer with it */ 2899static int unit_get(struct idr *p, void *ptr) 2900{ 2901 return __unit_alloc(p, ptr, 0); 2902} 2903 2904/* put unit number back to a pool */ 2905static void unit_put(struct idr *p, int n) 2906{ 2907 idr_remove(p, n); 2908} 2909 2910/* get pointer associated with the number */ 2911static void *unit_find(struct idr *p, int n) 2912{ 2913 return idr_find(p, n); 2914} 2915 2916/* Module/initialization stuff */ 2917 2918module_init(ppp_init); 2919module_exit(ppp_cleanup); 2920 2921EXPORT_SYMBOL(ppp_register_net_channel); 2922EXPORT_SYMBOL(ppp_register_channel); 2923EXPORT_SYMBOL(ppp_unregister_channel); 2924EXPORT_SYMBOL(ppp_channel_index); 2925EXPORT_SYMBOL(ppp_unit_number); 2926EXPORT_SYMBOL(ppp_dev_name); 2927EXPORT_SYMBOL(ppp_input); 2928EXPORT_SYMBOL(ppp_input_error); 2929EXPORT_SYMBOL(ppp_output_wakeup); 2930EXPORT_SYMBOL(ppp_register_compressor); 2931EXPORT_SYMBOL(ppp_unregister_compressor); 2932MODULE_LICENSE("GPL"); 2933MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); 2934MODULE_ALIAS("devname:ppp");