at v2.6.31-rc2 2906 lines 69 kB view raw
1/* 2 * Generic PPP layer for Linux. 3 * 4 * Copyright 1999-2002 Paul Mackerras. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * The generic PPP layer handles the PPP network interfaces, the 12 * /dev/ppp device, packet and VJ compression, and multilink. 13 * It talks to PPP `channels' via the interface defined in 14 * include/linux/ppp_channel.h. Channels provide the basic means for 15 * sending and receiving PPP frames on some kind of communications 16 * channel. 17 * 18 * Part of the code in this driver was inspired by the old async-only 19 * PPP driver, written by Michael Callahan and Al Longyear, and 20 * subsequently hacked by Paul Mackerras. 21 * 22 * ==FILEVERSION 20041108== 23 */ 24 25#include <linux/module.h> 26#include <linux/kernel.h> 27#include <linux/kmod.h> 28#include <linux/init.h> 29#include <linux/list.h> 30#include <linux/idr.h> 31#include <linux/netdevice.h> 32#include <linux/poll.h> 33#include <linux/ppp_defs.h> 34#include <linux/filter.h> 35#include <linux/if_ppp.h> 36#include <linux/ppp_channel.h> 37#include <linux/ppp-comp.h> 38#include <linux/skbuff.h> 39#include <linux/rtnetlink.h> 40#include <linux/if_arp.h> 41#include <linux/ip.h> 42#include <linux/tcp.h> 43#include <linux/smp_lock.h> 44#include <linux/spinlock.h> 45#include <linux/rwsem.h> 46#include <linux/stddef.h> 47#include <linux/device.h> 48#include <linux/mutex.h> 49#include <net/slhc_vj.h> 50#include <asm/atomic.h> 51 52#include <linux/nsproxy.h> 53#include <net/net_namespace.h> 54#include <net/netns/generic.h> 55 56#define PPP_VERSION "2.4.2" 57 58/* 59 * Network protocols we support. 60 */ 61#define NP_IP 0 /* Internet Protocol V4 */ 62#define NP_IPV6 1 /* Internet Protocol V6 */ 63#define NP_IPX 2 /* IPX protocol */ 64#define NP_AT 3 /* Appletalk protocol */ 65#define NP_MPLS_UC 4 /* MPLS unicast */ 66#define NP_MPLS_MC 5 /* MPLS multicast */ 67#define NUM_NP 6 /* Number of NPs. */ 68 69#define MPHDRLEN 6 /* multilink protocol header length */ 70#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ 71#define MIN_FRAG_SIZE 64 72 73/* 74 * An instance of /dev/ppp can be associated with either a ppp 75 * interface unit or a ppp channel. In both cases, file->private_data 76 * points to one of these. 77 */ 78struct ppp_file { 79 enum { 80 INTERFACE=1, CHANNEL 81 } kind; 82 struct sk_buff_head xq; /* pppd transmit queue */ 83 struct sk_buff_head rq; /* receive queue for pppd */ 84 wait_queue_head_t rwait; /* for poll on reading /dev/ppp */ 85 atomic_t refcnt; /* # refs (incl /dev/ppp attached) */ 86 int hdrlen; /* space to leave for headers */ 87 int index; /* interface unit / channel number */ 88 int dead; /* unit/channel has been shut down */ 89}; 90 91#define PF_TO_X(pf, X) container_of(pf, X, file) 92 93#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) 94#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) 95 96/* 97 * Data structure describing one ppp unit. 98 * A ppp unit corresponds to a ppp network interface device 99 * and represents a multilink bundle. 100 * It can have 0 or more ppp channels connected to it. 101 */ 102struct ppp { 103 struct ppp_file file; /* stuff for read/write/poll 0 */ 104 struct file *owner; /* file that owns this unit 48 */ 105 struct list_head channels; /* list of attached channels 4c */ 106 int n_channels; /* how many channels are attached 54 */ 107 spinlock_t rlock; /* lock for receive side 58 */ 108 spinlock_t wlock; /* lock for transmit side 5c */ 109 int mru; /* max receive unit 60 */ 110 unsigned int flags; /* control bits 64 */ 111 unsigned int xstate; /* transmit state bits 68 */ 112 unsigned int rstate; /* receive state bits 6c */ 113 int debug; /* debug flags 70 */ 114 struct slcompress *vj; /* state for VJ header compression */ 115 enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */ 116 struct sk_buff *xmit_pending; /* a packet ready to go out 88 */ 117 struct compressor *xcomp; /* transmit packet compressor 8c */ 118 void *xc_state; /* its internal state 90 */ 119 struct compressor *rcomp; /* receive decompressor 94 */ 120 void *rc_state; /* its internal state 98 */ 121 unsigned long last_xmit; /* jiffies when last pkt sent 9c */ 122 unsigned long last_recv; /* jiffies when last pkt rcvd a0 */ 123 struct net_device *dev; /* network interface device a4 */ 124 int closing; /* is device closing down? a8 */ 125#ifdef CONFIG_PPP_MULTILINK 126 int nxchan; /* next channel to send something on */ 127 u32 nxseq; /* next sequence number to send */ 128 int mrru; /* MP: max reconst. receive unit */ 129 u32 nextseq; /* MP: seq no of next packet */ 130 u32 minseq; /* MP: min of most recent seqnos */ 131 struct sk_buff_head mrq; /* MP: receive reconstruction queue */ 132#endif /* CONFIG_PPP_MULTILINK */ 133#ifdef CONFIG_PPP_FILTER 134 struct sock_filter *pass_filter; /* filter for packets to pass */ 135 struct sock_filter *active_filter;/* filter for pkts to reset idle */ 136 unsigned pass_len, active_len; 137#endif /* CONFIG_PPP_FILTER */ 138 struct net *ppp_net; /* the net we belong to */ 139}; 140 141/* 142 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC, 143 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP, 144 * SC_MUST_COMP 145 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR. 146 * Bits in xstate: SC_COMP_RUN 147 */ 148#define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \ 149 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \ 150 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP) 151 152/* 153 * Private data structure for each channel. 154 * This includes the data structure used for multilink. 155 */ 156struct channel { 157 struct ppp_file file; /* stuff for read/write/poll */ 158 struct list_head list; /* link in all/new_channels list */ 159 struct ppp_channel *chan; /* public channel data structure */ 160 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ 161 spinlock_t downl; /* protects `chan', file.xq dequeue */ 162 struct ppp *ppp; /* ppp unit we're connected to */ 163 struct net *chan_net; /* the net channel belongs to */ 164 struct list_head clist; /* link in list of channels per unit */ 165 rwlock_t upl; /* protects `ppp' */ 166#ifdef CONFIG_PPP_MULTILINK 167 u8 avail; /* flag used in multilink stuff */ 168 u8 had_frag; /* >= 1 fragments have been sent */ 169 u32 lastseq; /* MP: last sequence # received */ 170 int speed; /* speed of the corresponding ppp channel*/ 171#endif /* CONFIG_PPP_MULTILINK */ 172}; 173 174/* 175 * SMP locking issues: 176 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels 177 * list and the ppp.n_channels field, you need to take both locks 178 * before you modify them. 179 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock -> 180 * channel.downl. 181 */ 182 183static atomic_t ppp_unit_count = ATOMIC_INIT(0); 184static atomic_t channel_count = ATOMIC_INIT(0); 185 186/* per-net private data for this module */ 187static int ppp_net_id; 188struct ppp_net { 189 /* units to ppp mapping */ 190 struct idr units_idr; 191 192 /* 193 * all_ppp_mutex protects the units_idr mapping. 194 * It also ensures that finding a ppp unit in the units_idr 195 * map and updating its file.refcnt field is atomic. 196 */ 197 struct mutex all_ppp_mutex; 198 199 /* channels */ 200 struct list_head all_channels; 201 struct list_head new_channels; 202 int last_channel_index; 203 204 /* 205 * all_channels_lock protects all_channels and 206 * last_channel_index, and the atomicity of find 207 * a channel and updating its file.refcnt field. 208 */ 209 spinlock_t all_channels_lock; 210}; 211 212/* Get the PPP protocol number from a skb */ 213#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) 214 215/* We limit the length of ppp->file.rq to this (arbitrary) value */ 216#define PPP_MAX_RQLEN 32 217 218/* 219 * Maximum number of multilink fragments queued up. 220 * This has to be large enough to cope with the maximum latency of 221 * the slowest channel relative to the others. Strictly it should 222 * depend on the number of channels and their characteristics. 223 */ 224#define PPP_MP_MAX_QLEN 128 225 226/* Multilink header bits. */ 227#define B 0x80 /* this fragment begins a packet */ 228#define E 0x40 /* this fragment ends a packet */ 229 230/* Compare multilink sequence numbers (assumed to be 32 bits wide) */ 231#define seq_before(a, b) ((s32)((a) - (b)) < 0) 232#define seq_after(a, b) ((s32)((a) - (b)) > 0) 233 234/* Prototypes. */ 235static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 236 struct file *file, unsigned int cmd, unsigned long arg); 237static void ppp_xmit_process(struct ppp *ppp); 238static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 239static void ppp_push(struct ppp *ppp); 240static void ppp_channel_push(struct channel *pch); 241static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, 242 struct channel *pch); 243static void ppp_receive_error(struct ppp *ppp); 244static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb); 245static struct sk_buff *ppp_decompress_frame(struct ppp *ppp, 246 struct sk_buff *skb); 247#ifdef CONFIG_PPP_MULTILINK 248static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, 249 struct channel *pch); 250static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb); 251static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp); 252static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb); 253#endif /* CONFIG_PPP_MULTILINK */ 254static int ppp_set_compress(struct ppp *ppp, unsigned long arg); 255static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); 256static void ppp_ccp_closed(struct ppp *ppp); 257static struct compressor *find_compressor(int type); 258static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 259static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); 260static void init_ppp_file(struct ppp_file *pf, int kind); 261static void ppp_shutdown_interface(struct ppp *ppp); 262static void ppp_destroy_interface(struct ppp *ppp); 263static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); 264static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); 265static int ppp_connect_channel(struct channel *pch, int unit); 266static int ppp_disconnect_channel(struct channel *pch); 267static void ppp_destroy_channel(struct channel *pch); 268static int unit_get(struct idr *p, void *ptr); 269static int unit_set(struct idr *p, void *ptr, int n); 270static void unit_put(struct idr *p, int n); 271static void *unit_find(struct idr *p, int n); 272 273static struct class *ppp_class; 274 275/* per net-namespace data */ 276static inline struct ppp_net *ppp_pernet(struct net *net) 277{ 278 BUG_ON(!net); 279 280 return net_generic(net, ppp_net_id); 281} 282 283/* Translates a PPP protocol number to a NP index (NP == network protocol) */ 284static inline int proto_to_npindex(int proto) 285{ 286 switch (proto) { 287 case PPP_IP: 288 return NP_IP; 289 case PPP_IPV6: 290 return NP_IPV6; 291 case PPP_IPX: 292 return NP_IPX; 293 case PPP_AT: 294 return NP_AT; 295 case PPP_MPLS_UC: 296 return NP_MPLS_UC; 297 case PPP_MPLS_MC: 298 return NP_MPLS_MC; 299 } 300 return -EINVAL; 301} 302 303/* Translates an NP index into a PPP protocol number */ 304static const int npindex_to_proto[NUM_NP] = { 305 PPP_IP, 306 PPP_IPV6, 307 PPP_IPX, 308 PPP_AT, 309 PPP_MPLS_UC, 310 PPP_MPLS_MC, 311}; 312 313/* Translates an ethertype into an NP index */ 314static inline int ethertype_to_npindex(int ethertype) 315{ 316 switch (ethertype) { 317 case ETH_P_IP: 318 return NP_IP; 319 case ETH_P_IPV6: 320 return NP_IPV6; 321 case ETH_P_IPX: 322 return NP_IPX; 323 case ETH_P_PPPTALK: 324 case ETH_P_ATALK: 325 return NP_AT; 326 case ETH_P_MPLS_UC: 327 return NP_MPLS_UC; 328 case ETH_P_MPLS_MC: 329 return NP_MPLS_MC; 330 } 331 return -1; 332} 333 334/* Translates an NP index into an ethertype */ 335static const int npindex_to_ethertype[NUM_NP] = { 336 ETH_P_IP, 337 ETH_P_IPV6, 338 ETH_P_IPX, 339 ETH_P_PPPTALK, 340 ETH_P_MPLS_UC, 341 ETH_P_MPLS_MC, 342}; 343 344/* 345 * Locking shorthand. 346 */ 347#define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock) 348#define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock) 349#define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock) 350#define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock) 351#define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \ 352 ppp_recv_lock(ppp); } while (0) 353#define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \ 354 ppp_xmit_unlock(ppp); } while (0) 355 356/* 357 * /dev/ppp device routines. 358 * The /dev/ppp device is used by pppd to control the ppp unit. 359 * It supports the read, write, ioctl and poll functions. 360 * Open instances of /dev/ppp can be in one of three states: 361 * unattached, attached to a ppp unit, or attached to a ppp channel. 362 */ 363static int ppp_open(struct inode *inode, struct file *file) 364{ 365 cycle_kernel_lock(); 366 /* 367 * This could (should?) be enforced by the permissions on /dev/ppp. 368 */ 369 if (!capable(CAP_NET_ADMIN)) 370 return -EPERM; 371 return 0; 372} 373 374static int ppp_release(struct inode *unused, struct file *file) 375{ 376 struct ppp_file *pf = file->private_data; 377 struct ppp *ppp; 378 379 if (pf) { 380 file->private_data = NULL; 381 if (pf->kind == INTERFACE) { 382 ppp = PF_TO_PPP(pf); 383 if (file == ppp->owner) 384 ppp_shutdown_interface(ppp); 385 } 386 if (atomic_dec_and_test(&pf->refcnt)) { 387 switch (pf->kind) { 388 case INTERFACE: 389 ppp_destroy_interface(PF_TO_PPP(pf)); 390 break; 391 case CHANNEL: 392 ppp_destroy_channel(PF_TO_CHANNEL(pf)); 393 break; 394 } 395 } 396 } 397 return 0; 398} 399 400static ssize_t ppp_read(struct file *file, char __user *buf, 401 size_t count, loff_t *ppos) 402{ 403 struct ppp_file *pf = file->private_data; 404 DECLARE_WAITQUEUE(wait, current); 405 ssize_t ret; 406 struct sk_buff *skb = NULL; 407 408 ret = count; 409 410 if (!pf) 411 return -ENXIO; 412 add_wait_queue(&pf->rwait, &wait); 413 for (;;) { 414 set_current_state(TASK_INTERRUPTIBLE); 415 skb = skb_dequeue(&pf->rq); 416 if (skb) 417 break; 418 ret = 0; 419 if (pf->dead) 420 break; 421 if (pf->kind == INTERFACE) { 422 /* 423 * Return 0 (EOF) on an interface that has no 424 * channels connected, unless it is looping 425 * network traffic (demand mode). 426 */ 427 struct ppp *ppp = PF_TO_PPP(pf); 428 if (ppp->n_channels == 0 429 && (ppp->flags & SC_LOOP_TRAFFIC) == 0) 430 break; 431 } 432 ret = -EAGAIN; 433 if (file->f_flags & O_NONBLOCK) 434 break; 435 ret = -ERESTARTSYS; 436 if (signal_pending(current)) 437 break; 438 schedule(); 439 } 440 set_current_state(TASK_RUNNING); 441 remove_wait_queue(&pf->rwait, &wait); 442 443 if (!skb) 444 goto out; 445 446 ret = -EOVERFLOW; 447 if (skb->len > count) 448 goto outf; 449 ret = -EFAULT; 450 if (copy_to_user(buf, skb->data, skb->len)) 451 goto outf; 452 ret = skb->len; 453 454 outf: 455 kfree_skb(skb); 456 out: 457 return ret; 458} 459 460static ssize_t ppp_write(struct file *file, const char __user *buf, 461 size_t count, loff_t *ppos) 462{ 463 struct ppp_file *pf = file->private_data; 464 struct sk_buff *skb; 465 ssize_t ret; 466 467 if (!pf) 468 return -ENXIO; 469 ret = -ENOMEM; 470 skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); 471 if (!skb) 472 goto out; 473 skb_reserve(skb, pf->hdrlen); 474 ret = -EFAULT; 475 if (copy_from_user(skb_put(skb, count), buf, count)) { 476 kfree_skb(skb); 477 goto out; 478 } 479 480 skb_queue_tail(&pf->xq, skb); 481 482 switch (pf->kind) { 483 case INTERFACE: 484 ppp_xmit_process(PF_TO_PPP(pf)); 485 break; 486 case CHANNEL: 487 ppp_channel_push(PF_TO_CHANNEL(pf)); 488 break; 489 } 490 491 ret = count; 492 493 out: 494 return ret; 495} 496 497/* No kernel lock - fine */ 498static unsigned int ppp_poll(struct file *file, poll_table *wait) 499{ 500 struct ppp_file *pf = file->private_data; 501 unsigned int mask; 502 503 if (!pf) 504 return 0; 505 poll_wait(file, &pf->rwait, wait); 506 mask = POLLOUT | POLLWRNORM; 507 if (skb_peek(&pf->rq)) 508 mask |= POLLIN | POLLRDNORM; 509 if (pf->dead) 510 mask |= POLLHUP; 511 else if (pf->kind == INTERFACE) { 512 /* see comment in ppp_read */ 513 struct ppp *ppp = PF_TO_PPP(pf); 514 if (ppp->n_channels == 0 515 && (ppp->flags & SC_LOOP_TRAFFIC) == 0) 516 mask |= POLLIN | POLLRDNORM; 517 } 518 519 return mask; 520} 521 522#ifdef CONFIG_PPP_FILTER 523static int get_filter(void __user *arg, struct sock_filter **p) 524{ 525 struct sock_fprog uprog; 526 struct sock_filter *code = NULL; 527 int len, err; 528 529 if (copy_from_user(&uprog, arg, sizeof(uprog))) 530 return -EFAULT; 531 532 if (!uprog.len) { 533 *p = NULL; 534 return 0; 535 } 536 537 len = uprog.len * sizeof(struct sock_filter); 538 code = kmalloc(len, GFP_KERNEL); 539 if (code == NULL) 540 return -ENOMEM; 541 542 if (copy_from_user(code, uprog.filter, len)) { 543 kfree(code); 544 return -EFAULT; 545 } 546 547 err = sk_chk_filter(code, uprog.len); 548 if (err) { 549 kfree(code); 550 return err; 551 } 552 553 *p = code; 554 return uprog.len; 555} 556#endif /* CONFIG_PPP_FILTER */ 557 558static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 559{ 560 struct ppp_file *pf = file->private_data; 561 struct ppp *ppp; 562 int err = -EFAULT, val, val2, i; 563 struct ppp_idle idle; 564 struct npioctl npi; 565 int unit, cflags; 566 struct slcompress *vj; 567 void __user *argp = (void __user *)arg; 568 int __user *p = argp; 569 570 if (!pf) 571 return ppp_unattached_ioctl(current->nsproxy->net_ns, 572 pf, file, cmd, arg); 573 574 if (cmd == PPPIOCDETACH) { 575 /* 576 * We have to be careful here... if the file descriptor 577 * has been dup'd, we could have another process in the 578 * middle of a poll using the same file *, so we had 579 * better not free the interface data structures - 580 * instead we fail the ioctl. Even in this case, we 581 * shut down the interface if we are the owner of it. 582 * Actually, we should get rid of PPPIOCDETACH, userland 583 * (i.e. pppd) could achieve the same effect by closing 584 * this fd and reopening /dev/ppp. 585 */ 586 err = -EINVAL; 587 lock_kernel(); 588 if (pf->kind == INTERFACE) { 589 ppp = PF_TO_PPP(pf); 590 if (file == ppp->owner) 591 ppp_shutdown_interface(ppp); 592 } 593 if (atomic_long_read(&file->f_count) <= 2) { 594 ppp_release(NULL, file); 595 err = 0; 596 } else 597 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", 598 atomic_long_read(&file->f_count)); 599 unlock_kernel(); 600 return err; 601 } 602 603 if (pf->kind == CHANNEL) { 604 struct channel *pch; 605 struct ppp_channel *chan; 606 607 lock_kernel(); 608 pch = PF_TO_CHANNEL(pf); 609 610 switch (cmd) { 611 case PPPIOCCONNECT: 612 if (get_user(unit, p)) 613 break; 614 err = ppp_connect_channel(pch, unit); 615 break; 616 617 case PPPIOCDISCONN: 618 err = ppp_disconnect_channel(pch); 619 break; 620 621 default: 622 down_read(&pch->chan_sem); 623 chan = pch->chan; 624 err = -ENOTTY; 625 if (chan && chan->ops->ioctl) 626 err = chan->ops->ioctl(chan, cmd, arg); 627 up_read(&pch->chan_sem); 628 } 629 unlock_kernel(); 630 return err; 631 } 632 633 if (pf->kind != INTERFACE) { 634 /* can't happen */ 635 printk(KERN_ERR "PPP: not interface or channel??\n"); 636 return -EINVAL; 637 } 638 639 lock_kernel(); 640 ppp = PF_TO_PPP(pf); 641 switch (cmd) { 642 case PPPIOCSMRU: 643 if (get_user(val, p)) 644 break; 645 ppp->mru = val; 646 err = 0; 647 break; 648 649 case PPPIOCSFLAGS: 650 if (get_user(val, p)) 651 break; 652 ppp_lock(ppp); 653 cflags = ppp->flags & ~val; 654 ppp->flags = val & SC_FLAG_BITS; 655 ppp_unlock(ppp); 656 if (cflags & SC_CCP_OPEN) 657 ppp_ccp_closed(ppp); 658 err = 0; 659 break; 660 661 case PPPIOCGFLAGS: 662 val = ppp->flags | ppp->xstate | ppp->rstate; 663 if (put_user(val, p)) 664 break; 665 err = 0; 666 break; 667 668 case PPPIOCSCOMPRESS: 669 err = ppp_set_compress(ppp, arg); 670 break; 671 672 case PPPIOCGUNIT: 673 if (put_user(ppp->file.index, p)) 674 break; 675 err = 0; 676 break; 677 678 case PPPIOCSDEBUG: 679 if (get_user(val, p)) 680 break; 681 ppp->debug = val; 682 err = 0; 683 break; 684 685 case PPPIOCGDEBUG: 686 if (put_user(ppp->debug, p)) 687 break; 688 err = 0; 689 break; 690 691 case PPPIOCGIDLE: 692 idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ; 693 idle.recv_idle = (jiffies - ppp->last_recv) / HZ; 694 if (copy_to_user(argp, &idle, sizeof(idle))) 695 break; 696 err = 0; 697 break; 698 699 case PPPIOCSMAXCID: 700 if (get_user(val, p)) 701 break; 702 val2 = 15; 703 if ((val >> 16) != 0) { 704 val2 = val >> 16; 705 val &= 0xffff; 706 } 707 vj = slhc_init(val2+1, val+1); 708 if (!vj) { 709 printk(KERN_ERR "PPP: no memory (VJ compressor)\n"); 710 err = -ENOMEM; 711 break; 712 } 713 ppp_lock(ppp); 714 if (ppp->vj) 715 slhc_free(ppp->vj); 716 ppp->vj = vj; 717 ppp_unlock(ppp); 718 err = 0; 719 break; 720 721 case PPPIOCGNPMODE: 722 case PPPIOCSNPMODE: 723 if (copy_from_user(&npi, argp, sizeof(npi))) 724 break; 725 err = proto_to_npindex(npi.protocol); 726 if (err < 0) 727 break; 728 i = err; 729 if (cmd == PPPIOCGNPMODE) { 730 err = -EFAULT; 731 npi.mode = ppp->npmode[i]; 732 if (copy_to_user(argp, &npi, sizeof(npi))) 733 break; 734 } else { 735 ppp->npmode[i] = npi.mode; 736 /* we may be able to transmit more packets now (??) */ 737 netif_wake_queue(ppp->dev); 738 } 739 err = 0; 740 break; 741 742#ifdef CONFIG_PPP_FILTER 743 case PPPIOCSPASS: 744 { 745 struct sock_filter *code; 746 err = get_filter(argp, &code); 747 if (err >= 0) { 748 ppp_lock(ppp); 749 kfree(ppp->pass_filter); 750 ppp->pass_filter = code; 751 ppp->pass_len = err; 752 ppp_unlock(ppp); 753 err = 0; 754 } 755 break; 756 } 757 case PPPIOCSACTIVE: 758 { 759 struct sock_filter *code; 760 err = get_filter(argp, &code); 761 if (err >= 0) { 762 ppp_lock(ppp); 763 kfree(ppp->active_filter); 764 ppp->active_filter = code; 765 ppp->active_len = err; 766 ppp_unlock(ppp); 767 err = 0; 768 } 769 break; 770 } 771#endif /* CONFIG_PPP_FILTER */ 772 773#ifdef CONFIG_PPP_MULTILINK 774 case PPPIOCSMRRU: 775 if (get_user(val, p)) 776 break; 777 ppp_recv_lock(ppp); 778 ppp->mrru = val; 779 ppp_recv_unlock(ppp); 780 err = 0; 781 break; 782#endif /* CONFIG_PPP_MULTILINK */ 783 784 default: 785 err = -ENOTTY; 786 } 787 unlock_kernel(); 788 return err; 789} 790 791static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 792 struct file *file, unsigned int cmd, unsigned long arg) 793{ 794 int unit, err = -EFAULT; 795 struct ppp *ppp; 796 struct channel *chan; 797 struct ppp_net *pn; 798 int __user *p = (int __user *)arg; 799 800 lock_kernel(); 801 switch (cmd) { 802 case PPPIOCNEWUNIT: 803 /* Create a new ppp unit */ 804 if (get_user(unit, p)) 805 break; 806 ppp = ppp_create_interface(net, unit, &err); 807 if (!ppp) 808 break; 809 file->private_data = &ppp->file; 810 ppp->owner = file; 811 err = -EFAULT; 812 if (put_user(ppp->file.index, p)) 813 break; 814 err = 0; 815 break; 816 817 case PPPIOCATTACH: 818 /* Attach to an existing ppp unit */ 819 if (get_user(unit, p)) 820 break; 821 err = -ENXIO; 822 pn = ppp_pernet(net); 823 mutex_lock(&pn->all_ppp_mutex); 824 ppp = ppp_find_unit(pn, unit); 825 if (ppp) { 826 atomic_inc(&ppp->file.refcnt); 827 file->private_data = &ppp->file; 828 err = 0; 829 } 830 mutex_unlock(&pn->all_ppp_mutex); 831 break; 832 833 case PPPIOCATTCHAN: 834 if (get_user(unit, p)) 835 break; 836 err = -ENXIO; 837 pn = ppp_pernet(net); 838 spin_lock_bh(&pn->all_channels_lock); 839 chan = ppp_find_channel(pn, unit); 840 if (chan) { 841 atomic_inc(&chan->file.refcnt); 842 file->private_data = &chan->file; 843 err = 0; 844 } 845 spin_unlock_bh(&pn->all_channels_lock); 846 break; 847 848 default: 849 err = -ENOTTY; 850 } 851 unlock_kernel(); 852 return err; 853} 854 855static const struct file_operations ppp_device_fops = { 856 .owner = THIS_MODULE, 857 .read = ppp_read, 858 .write = ppp_write, 859 .poll = ppp_poll, 860 .unlocked_ioctl = ppp_ioctl, 861 .open = ppp_open, 862 .release = ppp_release 863}; 864 865static __net_init int ppp_init_net(struct net *net) 866{ 867 struct ppp_net *pn; 868 int err; 869 870 pn = kzalloc(sizeof(*pn), GFP_KERNEL); 871 if (!pn) 872 return -ENOMEM; 873 874 idr_init(&pn->units_idr); 875 mutex_init(&pn->all_ppp_mutex); 876 877 INIT_LIST_HEAD(&pn->all_channels); 878 INIT_LIST_HEAD(&pn->new_channels); 879 880 spin_lock_init(&pn->all_channels_lock); 881 882 err = net_assign_generic(net, ppp_net_id, pn); 883 if (err) { 884 kfree(pn); 885 return err; 886 } 887 888 return 0; 889} 890 891static __net_exit void ppp_exit_net(struct net *net) 892{ 893 struct ppp_net *pn; 894 895 pn = net_generic(net, ppp_net_id); 896 idr_destroy(&pn->units_idr); 897 /* 898 * if someone has cached our net then 899 * further net_generic call will return NULL 900 */ 901 net_assign_generic(net, ppp_net_id, NULL); 902 kfree(pn); 903} 904 905static struct pernet_operations ppp_net_ops = { 906 .init = ppp_init_net, 907 .exit = ppp_exit_net, 908}; 909 910#define PPP_MAJOR 108 911 912/* Called at boot time if ppp is compiled into the kernel, 913 or at module load time (from init_module) if compiled as a module. */ 914static int __init ppp_init(void) 915{ 916 int err; 917 918 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); 919 920 err = register_pernet_gen_device(&ppp_net_id, &ppp_net_ops); 921 if (err) { 922 printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err); 923 goto out; 924 } 925 926 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 927 if (err) { 928 printk(KERN_ERR "failed to register PPP device (%d)\n", err); 929 goto out_net; 930 } 931 932 ppp_class = class_create(THIS_MODULE, "ppp"); 933 if (IS_ERR(ppp_class)) { 934 err = PTR_ERR(ppp_class); 935 goto out_chrdev; 936 } 937 938 /* not a big deal if we fail here :-) */ 939 device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); 940 941 return 0; 942 943out_chrdev: 944 unregister_chrdev(PPP_MAJOR, "ppp"); 945out_net: 946 unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops); 947out: 948 return err; 949} 950 951/* 952 * Network interface unit routines. 953 */ 954static int 955ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) 956{ 957 struct ppp *ppp = netdev_priv(dev); 958 int npi, proto; 959 unsigned char *pp; 960 961 npi = ethertype_to_npindex(ntohs(skb->protocol)); 962 if (npi < 0) 963 goto outf; 964 965 /* Drop, accept or reject the packet */ 966 switch (ppp->npmode[npi]) { 967 case NPMODE_PASS: 968 break; 969 case NPMODE_QUEUE: 970 /* it would be nice to have a way to tell the network 971 system to queue this one up for later. */ 972 goto outf; 973 case NPMODE_DROP: 974 case NPMODE_ERROR: 975 goto outf; 976 } 977 978 /* Put the 2-byte PPP protocol number on the front, 979 making sure there is room for the address and control fields. */ 980 if (skb_cow_head(skb, PPP_HDRLEN)) 981 goto outf; 982 983 pp = skb_push(skb, 2); 984 proto = npindex_to_proto[npi]; 985 pp[0] = proto >> 8; 986 pp[1] = proto; 987 988 netif_stop_queue(dev); 989 skb_queue_tail(&ppp->file.xq, skb); 990 ppp_xmit_process(ppp); 991 return 0; 992 993 outf: 994 kfree_skb(skb); 995 ++dev->stats.tx_dropped; 996 return 0; 997} 998 999static int 1000ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1001{ 1002 struct ppp *ppp = netdev_priv(dev); 1003 int err = -EFAULT; 1004 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; 1005 struct ppp_stats stats; 1006 struct ppp_comp_stats cstats; 1007 char *vers; 1008 1009 switch (cmd) { 1010 case SIOCGPPPSTATS: 1011 ppp_get_stats(ppp, &stats); 1012 if (copy_to_user(addr, &stats, sizeof(stats))) 1013 break; 1014 err = 0; 1015 break; 1016 1017 case SIOCGPPPCSTATS: 1018 memset(&cstats, 0, sizeof(cstats)); 1019 if (ppp->xc_state) 1020 ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); 1021 if (ppp->rc_state) 1022 ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d); 1023 if (copy_to_user(addr, &cstats, sizeof(cstats))) 1024 break; 1025 err = 0; 1026 break; 1027 1028 case SIOCGPPPVER: 1029 vers = PPP_VERSION; 1030 if (copy_to_user(addr, vers, strlen(vers) + 1)) 1031 break; 1032 err = 0; 1033 break; 1034 1035 default: 1036 err = -EINVAL; 1037 } 1038 1039 return err; 1040} 1041 1042static const struct net_device_ops ppp_netdev_ops = { 1043 .ndo_start_xmit = ppp_start_xmit, 1044 .ndo_do_ioctl = ppp_net_ioctl, 1045}; 1046 1047static void ppp_setup(struct net_device *dev) 1048{ 1049 dev->netdev_ops = &ppp_netdev_ops; 1050 dev->hard_header_len = PPP_HDRLEN; 1051 dev->mtu = PPP_MTU; 1052 dev->addr_len = 0; 1053 dev->tx_queue_len = 3; 1054 dev->type = ARPHRD_PPP; 1055 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1056 dev->features |= NETIF_F_NETNS_LOCAL; 1057 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1058} 1059 1060/* 1061 * Transmit-side routines. 1062 */ 1063 1064/* 1065 * Called to do any work queued up on the transmit side 1066 * that can now be done. 1067 */ 1068static void 1069ppp_xmit_process(struct ppp *ppp) 1070{ 1071 struct sk_buff *skb; 1072 1073 ppp_xmit_lock(ppp); 1074 if (!ppp->closing) { 1075 ppp_push(ppp); 1076 while (!ppp->xmit_pending 1077 && (skb = skb_dequeue(&ppp->file.xq))) 1078 ppp_send_frame(ppp, skb); 1079 /* If there's no work left to do, tell the core net 1080 code that we can accept some more. */ 1081 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) 1082 netif_wake_queue(ppp->dev); 1083 } 1084 ppp_xmit_unlock(ppp); 1085} 1086 1087static inline struct sk_buff * 1088pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) 1089{ 1090 struct sk_buff *new_skb; 1091 int len; 1092 int new_skb_size = ppp->dev->mtu + 1093 ppp->xcomp->comp_extra + ppp->dev->hard_header_len; 1094 int compressor_skb_size = ppp->dev->mtu + 1095 ppp->xcomp->comp_extra + PPP_HDRLEN; 1096 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); 1097 if (!new_skb) { 1098 if (net_ratelimit()) 1099 printk(KERN_ERR "PPP: no memory (comp pkt)\n"); 1100 return NULL; 1101 } 1102 if (ppp->dev->hard_header_len > PPP_HDRLEN) 1103 skb_reserve(new_skb, 1104 ppp->dev->hard_header_len - PPP_HDRLEN); 1105 1106 /* compressor still expects A/C bytes in hdr */ 1107 len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2, 1108 new_skb->data, skb->len + 2, 1109 compressor_skb_size); 1110 if (len > 0 && (ppp->flags & SC_CCP_UP)) { 1111 kfree_skb(skb); 1112 skb = new_skb; 1113 skb_put(skb, len); 1114 skb_pull(skb, 2); /* pull off A/C bytes */ 1115 } else if (len == 0) { 1116 /* didn't compress, or CCP not up yet */ 1117 kfree_skb(new_skb); 1118 new_skb = skb; 1119 } else { 1120 /* 1121 * (len < 0) 1122 * MPPE requires that we do not send unencrypted 1123 * frames. The compressor will return -1 if we 1124 * should drop the frame. We cannot simply test 1125 * the compress_proto because MPPE and MPPC share 1126 * the same number. 1127 */ 1128 if (net_ratelimit()) 1129 printk(KERN_ERR "ppp: compressor dropped pkt\n"); 1130 kfree_skb(skb); 1131 kfree_skb(new_skb); 1132 new_skb = NULL; 1133 } 1134 return new_skb; 1135} 1136 1137/* 1138 * Compress and send a frame. 1139 * The caller should have locked the xmit path, 1140 * and xmit_pending should be 0. 1141 */ 1142static void 1143ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) 1144{ 1145 int proto = PPP_PROTO(skb); 1146 struct sk_buff *new_skb; 1147 int len; 1148 unsigned char *cp; 1149 1150 if (proto < 0x8000) { 1151#ifdef CONFIG_PPP_FILTER 1152 /* check if we should pass this packet */ 1153 /* the filter instructions are constructed assuming 1154 a four-byte PPP header on each packet */ 1155 *skb_push(skb, 2) = 1; 1156 if (ppp->pass_filter 1157 && sk_run_filter(skb, ppp->pass_filter, 1158 ppp->pass_len) == 0) { 1159 if (ppp->debug & 1) 1160 printk(KERN_DEBUG "PPP: outbound frame not passed\n"); 1161 kfree_skb(skb); 1162 return; 1163 } 1164 /* if this packet passes the active filter, record the time */ 1165 if (!(ppp->active_filter 1166 && sk_run_filter(skb, ppp->active_filter, 1167 ppp->active_len) == 0)) 1168 ppp->last_xmit = jiffies; 1169 skb_pull(skb, 2); 1170#else 1171 /* for data packets, record the time */ 1172 ppp->last_xmit = jiffies; 1173#endif /* CONFIG_PPP_FILTER */ 1174 } 1175 1176 ++ppp->dev->stats.tx_packets; 1177 ppp->dev->stats.tx_bytes += skb->len - 2; 1178 1179 switch (proto) { 1180 case PPP_IP: 1181 if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0) 1182 break; 1183 /* try to do VJ TCP header compression */ 1184 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, 1185 GFP_ATOMIC); 1186 if (!new_skb) { 1187 printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); 1188 goto drop; 1189 } 1190 skb_reserve(new_skb, ppp->dev->hard_header_len - 2); 1191 cp = skb->data + 2; 1192 len = slhc_compress(ppp->vj, cp, skb->len - 2, 1193 new_skb->data + 2, &cp, 1194 !(ppp->flags & SC_NO_TCP_CCID)); 1195 if (cp == skb->data + 2) { 1196 /* didn't compress */ 1197 kfree_skb(new_skb); 1198 } else { 1199 if (cp[0] & SL_TYPE_COMPRESSED_TCP) { 1200 proto = PPP_VJC_COMP; 1201 cp[0] &= ~SL_TYPE_COMPRESSED_TCP; 1202 } else { 1203 proto = PPP_VJC_UNCOMP; 1204 cp[0] = skb->data[2]; 1205 } 1206 kfree_skb(skb); 1207 skb = new_skb; 1208 cp = skb_put(skb, len + 2); 1209 cp[0] = 0; 1210 cp[1] = proto; 1211 } 1212 break; 1213 1214 case PPP_CCP: 1215 /* peek at outbound CCP frames */ 1216 ppp_ccp_peek(ppp, skb, 0); 1217 break; 1218 } 1219 1220 /* try to do packet compression */ 1221 if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state 1222 && proto != PPP_LCP && proto != PPP_CCP) { 1223 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { 1224 if (net_ratelimit()) 1225 printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n"); 1226 goto drop; 1227 } 1228 skb = pad_compress_skb(ppp, skb); 1229 if (!skb) 1230 goto drop; 1231 } 1232 1233 /* 1234 * If we are waiting for traffic (demand dialling), 1235 * queue it up for pppd to receive. 1236 */ 1237 if (ppp->flags & SC_LOOP_TRAFFIC) { 1238 if (ppp->file.rq.qlen > PPP_MAX_RQLEN) 1239 goto drop; 1240 skb_queue_tail(&ppp->file.rq, skb); 1241 wake_up_interruptible(&ppp->file.rwait); 1242 return; 1243 } 1244 1245 ppp->xmit_pending = skb; 1246 ppp_push(ppp); 1247 return; 1248 1249 drop: 1250 kfree_skb(skb); 1251 ++ppp->dev->stats.tx_errors; 1252} 1253 1254/* 1255 * Try to send the frame in xmit_pending. 1256 * The caller should have the xmit path locked. 1257 */ 1258static void 1259ppp_push(struct ppp *ppp) 1260{ 1261 struct list_head *list; 1262 struct channel *pch; 1263 struct sk_buff *skb = ppp->xmit_pending; 1264 1265 if (!skb) 1266 return; 1267 1268 list = &ppp->channels; 1269 if (list_empty(list)) { 1270 /* nowhere to send the packet, just drop it */ 1271 ppp->xmit_pending = NULL; 1272 kfree_skb(skb); 1273 return; 1274 } 1275 1276 if ((ppp->flags & SC_MULTILINK) == 0) { 1277 /* not doing multilink: send it down the first channel */ 1278 list = list->next; 1279 pch = list_entry(list, struct channel, clist); 1280 1281 spin_lock_bh(&pch->downl); 1282 if (pch->chan) { 1283 if (pch->chan->ops->start_xmit(pch->chan, skb)) 1284 ppp->xmit_pending = NULL; 1285 } else { 1286 /* channel got unregistered */ 1287 kfree_skb(skb); 1288 ppp->xmit_pending = NULL; 1289 } 1290 spin_unlock_bh(&pch->downl); 1291 return; 1292 } 1293 1294#ifdef CONFIG_PPP_MULTILINK 1295 /* Multilink: fragment the packet over as many links 1296 as can take the packet at the moment. */ 1297 if (!ppp_mp_explode(ppp, skb)) 1298 return; 1299#endif /* CONFIG_PPP_MULTILINK */ 1300 1301 ppp->xmit_pending = NULL; 1302 kfree_skb(skb); 1303} 1304 1305#ifdef CONFIG_PPP_MULTILINK 1306/* 1307 * Divide a packet to be transmitted into fragments and 1308 * send them out the individual links. 1309 */ 1310static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) 1311{ 1312 int len, totlen; 1313 int i, bits, hdrlen, mtu; 1314 int flen; 1315 int navail, nfree, nzero; 1316 int nbigger; 1317 int totspeed; 1318 int totfree; 1319 unsigned char *p, *q; 1320 struct list_head *list; 1321 struct channel *pch; 1322 struct sk_buff *frag; 1323 struct ppp_channel *chan; 1324 1325 totspeed = 0; /*total bitrate of the bundle*/ 1326 nfree = 0; /* # channels which have no packet already queued */ 1327 navail = 0; /* total # of usable channels (not deregistered) */ 1328 nzero = 0; /* number of channels with zero speed associated*/ 1329 totfree = 0; /*total # of channels available and 1330 *having no queued packets before 1331 *starting the fragmentation*/ 1332 1333 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1334 i = 0; 1335 list_for_each_entry(pch, &ppp->channels, clist) { 1336 navail += pch->avail = (pch->chan != NULL); 1337 pch->speed = pch->chan->speed; 1338 if (pch->avail) { 1339 if (skb_queue_empty(&pch->file.xq) || 1340 !pch->had_frag) { 1341 if (pch->speed == 0) 1342 nzero++; 1343 else 1344 totspeed += pch->speed; 1345 1346 pch->avail = 2; 1347 ++nfree; 1348 ++totfree; 1349 } 1350 if (!pch->had_frag && i < ppp->nxchan) 1351 ppp->nxchan = i; 1352 } 1353 ++i; 1354 } 1355 /* 1356 * Don't start sending this packet unless at least half of 1357 * the channels are free. This gives much better TCP 1358 * performance if we have a lot of channels. 1359 */ 1360 if (nfree == 0 || nfree < navail / 2) 1361 return 0; /* can't take now, leave it in xmit_pending */ 1362 1363 /* Do protocol field compression (XXX this should be optional) */ 1364 p = skb->data; 1365 len = skb->len; 1366 if (*p == 0) { 1367 ++p; 1368 --len; 1369 } 1370 1371 totlen = len; 1372 nbigger = len % nfree; 1373 1374 /* skip to the channel after the one we last used 1375 and start at that one */ 1376 list = &ppp->channels; 1377 for (i = 0; i < ppp->nxchan; ++i) { 1378 list = list->next; 1379 if (list == &ppp->channels) { 1380 i = 0; 1381 break; 1382 } 1383 } 1384 1385 /* create a fragment for each channel */ 1386 bits = B; 1387 while (nfree > 0 && len > 0) { 1388 list = list->next; 1389 if (list == &ppp->channels) { 1390 i = 0; 1391 continue; 1392 } 1393 pch = list_entry(list, struct channel, clist); 1394 ++i; 1395 if (!pch->avail) 1396 continue; 1397 1398 /* 1399 * Skip this channel if it has a fragment pending already and 1400 * we haven't given a fragment to all of the free channels. 1401 */ 1402 if (pch->avail == 1) { 1403 if (nfree > 0) 1404 continue; 1405 } else { 1406 pch->avail = 1; 1407 } 1408 1409 /* check the channel's mtu and whether it is still attached. */ 1410 spin_lock_bh(&pch->downl); 1411 if (pch->chan == NULL) { 1412 /* can't use this channel, it's being deregistered */ 1413 if (pch->speed == 0) 1414 nzero--; 1415 else 1416 totspeed -= pch->speed; 1417 1418 spin_unlock_bh(&pch->downl); 1419 pch->avail = 0; 1420 totlen = len; 1421 totfree--; 1422 nfree--; 1423 if (--navail == 0) 1424 break; 1425 continue; 1426 } 1427 1428 /* 1429 *if the channel speed is not set divide 1430 *the packet evenly among the free channels; 1431 *otherwise divide it according to the speed 1432 *of the channel we are going to transmit on 1433 */ 1434 if (pch->speed == 0) { 1435 flen = totlen/nfree ; 1436 if (nbigger > 0) { 1437 flen++; 1438 nbigger--; 1439 } 1440 } else { 1441 flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / 1442 ((totspeed*totfree)/pch->speed)) - hdrlen; 1443 if (nbigger > 0) { 1444 flen += ((totfree - nzero)*pch->speed)/totspeed; 1445 nbigger -= ((totfree - nzero)*pch->speed)/ 1446 totspeed; 1447 } 1448 } 1449 nfree--; 1450 1451 /* 1452 *check if we are on the last channel or 1453 *we exceded the lenght of the data to 1454 *fragment 1455 */ 1456 if ((nfree == 0) || (flen > len)) 1457 flen = len; 1458 /* 1459 *it is not worth to tx on slow channels: 1460 *in that case from the resulting flen according to the 1461 *above formula will be equal or less than zero. 1462 *Skip the channel in this case 1463 */ 1464 if (flen <= 0) { 1465 pch->avail = 2; 1466 spin_unlock_bh(&pch->downl); 1467 continue; 1468 } 1469 1470 mtu = pch->chan->mtu + 2 - hdrlen; 1471 if (mtu < 4) 1472 mtu = 4; 1473 if (flen > mtu) 1474 flen = mtu; 1475 if (flen == len) 1476 bits |= E; 1477 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); 1478 if (!frag) 1479 goto noskb; 1480 q = skb_put(frag, flen + hdrlen); 1481 1482 /* make the MP header */ 1483 q[0] = PPP_MP >> 8; 1484 q[1] = PPP_MP; 1485 if (ppp->flags & SC_MP_XSHORTSEQ) { 1486 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1487 q[3] = ppp->nxseq; 1488 } else { 1489 q[2] = bits; 1490 q[3] = ppp->nxseq >> 16; 1491 q[4] = ppp->nxseq >> 8; 1492 q[5] = ppp->nxseq; 1493 } 1494 1495 memcpy(q + hdrlen, p, flen); 1496 1497 /* try to send it down the channel */ 1498 chan = pch->chan; 1499 if (!skb_queue_empty(&pch->file.xq) || 1500 !chan->ops->start_xmit(chan, frag)) 1501 skb_queue_tail(&pch->file.xq, frag); 1502 pch->had_frag = 1; 1503 p += flen; 1504 len -= flen; 1505 ++ppp->nxseq; 1506 bits = 0; 1507 spin_unlock_bh(&pch->downl); 1508 } 1509 ppp->nxchan = i; 1510 1511 return 1; 1512 1513 noskb: 1514 spin_unlock_bh(&pch->downl); 1515 if (ppp->debug & 1) 1516 printk(KERN_ERR "PPP: no memory (fragment)\n"); 1517 ++ppp->dev->stats.tx_errors; 1518 ++ppp->nxseq; 1519 return 1; /* abandon the frame */ 1520} 1521#endif /* CONFIG_PPP_MULTILINK */ 1522 1523/* 1524 * Try to send data out on a channel. 1525 */ 1526static void 1527ppp_channel_push(struct channel *pch) 1528{ 1529 struct sk_buff *skb; 1530 struct ppp *ppp; 1531 1532 spin_lock_bh(&pch->downl); 1533 if (pch->chan) { 1534 while (!skb_queue_empty(&pch->file.xq)) { 1535 skb = skb_dequeue(&pch->file.xq); 1536 if (!pch->chan->ops->start_xmit(pch->chan, skb)) { 1537 /* put the packet back and try again later */ 1538 skb_queue_head(&pch->file.xq, skb); 1539 break; 1540 } 1541 } 1542 } else { 1543 /* channel got deregistered */ 1544 skb_queue_purge(&pch->file.xq); 1545 } 1546 spin_unlock_bh(&pch->downl); 1547 /* see if there is anything from the attached unit to be sent */ 1548 if (skb_queue_empty(&pch->file.xq)) { 1549 read_lock_bh(&pch->upl); 1550 ppp = pch->ppp; 1551 if (ppp) 1552 ppp_xmit_process(ppp); 1553 read_unlock_bh(&pch->upl); 1554 } 1555} 1556 1557/* 1558 * Receive-side routines. 1559 */ 1560 1561/* misuse a few fields of the skb for MP reconstruction */ 1562#define sequence priority 1563#define BEbits cb[0] 1564 1565static inline void 1566ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1567{ 1568 ppp_recv_lock(ppp); 1569 if (!ppp->closing) 1570 ppp_receive_frame(ppp, skb, pch); 1571 else 1572 kfree_skb(skb); 1573 ppp_recv_unlock(ppp); 1574} 1575 1576void 1577ppp_input(struct ppp_channel *chan, struct sk_buff *skb) 1578{ 1579 struct channel *pch = chan->ppp; 1580 int proto; 1581 1582 if (!pch || skb->len == 0) { 1583 kfree_skb(skb); 1584 return; 1585 } 1586 1587 proto = PPP_PROTO(skb); 1588 read_lock_bh(&pch->upl); 1589 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { 1590 /* put it on the channel queue */ 1591 skb_queue_tail(&pch->file.rq, skb); 1592 /* drop old frames if queue too long */ 1593 while (pch->file.rq.qlen > PPP_MAX_RQLEN 1594 && (skb = skb_dequeue(&pch->file.rq))) 1595 kfree_skb(skb); 1596 wake_up_interruptible(&pch->file.rwait); 1597 } else { 1598 ppp_do_recv(pch->ppp, skb, pch); 1599 } 1600 read_unlock_bh(&pch->upl); 1601} 1602 1603/* Put a 0-length skb in the receive queue as an error indication */ 1604void 1605ppp_input_error(struct ppp_channel *chan, int code) 1606{ 1607 struct channel *pch = chan->ppp; 1608 struct sk_buff *skb; 1609 1610 if (!pch) 1611 return; 1612 1613 read_lock_bh(&pch->upl); 1614 if (pch->ppp) { 1615 skb = alloc_skb(0, GFP_ATOMIC); 1616 if (skb) { 1617 skb->len = 0; /* probably unnecessary */ 1618 skb->cb[0] = code; 1619 ppp_do_recv(pch->ppp, skb, pch); 1620 } 1621 } 1622 read_unlock_bh(&pch->upl); 1623} 1624 1625/* 1626 * We come in here to process a received frame. 1627 * The receive side of the ppp unit is locked. 1628 */ 1629static void 1630ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1631{ 1632 if (pskb_may_pull(skb, 2)) { 1633#ifdef CONFIG_PPP_MULTILINK 1634 /* XXX do channel-level decompression here */ 1635 if (PPP_PROTO(skb) == PPP_MP) 1636 ppp_receive_mp_frame(ppp, skb, pch); 1637 else 1638#endif /* CONFIG_PPP_MULTILINK */ 1639 ppp_receive_nonmp_frame(ppp, skb); 1640 return; 1641 } 1642 1643 if (skb->len > 0) 1644 /* note: a 0-length skb is used as an error indication */ 1645 ++ppp->dev->stats.rx_length_errors; 1646 1647 kfree_skb(skb); 1648 ppp_receive_error(ppp); 1649} 1650 1651static void 1652ppp_receive_error(struct ppp *ppp) 1653{ 1654 ++ppp->dev->stats.rx_errors; 1655 if (ppp->vj) 1656 slhc_toss(ppp->vj); 1657} 1658 1659static void 1660ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) 1661{ 1662 struct sk_buff *ns; 1663 int proto, len, npi; 1664 1665 /* 1666 * Decompress the frame, if compressed. 1667 * Note that some decompressors need to see uncompressed frames 1668 * that come in as well as compressed frames. 1669 */ 1670 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) 1671 && (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0) 1672 skb = ppp_decompress_frame(ppp, skb); 1673 1674 if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR) 1675 goto err; 1676 1677 proto = PPP_PROTO(skb); 1678 switch (proto) { 1679 case PPP_VJC_COMP: 1680 /* decompress VJ compressed packets */ 1681 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) 1682 goto err; 1683 1684 if (skb_tailroom(skb) < 124 || skb_cloned(skb)) { 1685 /* copy to a new sk_buff with more tailroom */ 1686 ns = dev_alloc_skb(skb->len + 128); 1687 if (!ns) { 1688 printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); 1689 goto err; 1690 } 1691 skb_reserve(ns, 2); 1692 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); 1693 kfree_skb(skb); 1694 skb = ns; 1695 } 1696 else 1697 skb->ip_summed = CHECKSUM_NONE; 1698 1699 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); 1700 if (len <= 0) { 1701 printk(KERN_DEBUG "PPP: VJ decompression error\n"); 1702 goto err; 1703 } 1704 len += 2; 1705 if (len > skb->len) 1706 skb_put(skb, len - skb->len); 1707 else if (len < skb->len) 1708 skb_trim(skb, len); 1709 proto = PPP_IP; 1710 break; 1711 1712 case PPP_VJC_UNCOMP: 1713 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) 1714 goto err; 1715 1716 /* Until we fix the decompressor need to make sure 1717 * data portion is linear. 1718 */ 1719 if (!pskb_may_pull(skb, skb->len)) 1720 goto err; 1721 1722 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { 1723 printk(KERN_ERR "PPP: VJ uncompressed error\n"); 1724 goto err; 1725 } 1726 proto = PPP_IP; 1727 break; 1728 1729 case PPP_CCP: 1730 ppp_ccp_peek(ppp, skb, 1); 1731 break; 1732 } 1733 1734 ++ppp->dev->stats.rx_packets; 1735 ppp->dev->stats.rx_bytes += skb->len - 2; 1736 1737 npi = proto_to_npindex(proto); 1738 if (npi < 0) { 1739 /* control or unknown frame - pass it to pppd */ 1740 skb_queue_tail(&ppp->file.rq, skb); 1741 /* limit queue length by dropping old frames */ 1742 while (ppp->file.rq.qlen > PPP_MAX_RQLEN 1743 && (skb = skb_dequeue(&ppp->file.rq))) 1744 kfree_skb(skb); 1745 /* wake up any process polling or blocking on read */ 1746 wake_up_interruptible(&ppp->file.rwait); 1747 1748 } else { 1749 /* network protocol frame - give it to the kernel */ 1750 1751#ifdef CONFIG_PPP_FILTER 1752 /* check if the packet passes the pass and active filters */ 1753 /* the filter instructions are constructed assuming 1754 a four-byte PPP header on each packet */ 1755 if (ppp->pass_filter || ppp->active_filter) { 1756 if (skb_cloned(skb) && 1757 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1758 goto err; 1759 1760 *skb_push(skb, 2) = 0; 1761 if (ppp->pass_filter 1762 && sk_run_filter(skb, ppp->pass_filter, 1763 ppp->pass_len) == 0) { 1764 if (ppp->debug & 1) 1765 printk(KERN_DEBUG "PPP: inbound frame " 1766 "not passed\n"); 1767 kfree_skb(skb); 1768 return; 1769 } 1770 if (!(ppp->active_filter 1771 && sk_run_filter(skb, ppp->active_filter, 1772 ppp->active_len) == 0)) 1773 ppp->last_recv = jiffies; 1774 __skb_pull(skb, 2); 1775 } else 1776#endif /* CONFIG_PPP_FILTER */ 1777 ppp->last_recv = jiffies; 1778 1779 if ((ppp->dev->flags & IFF_UP) == 0 1780 || ppp->npmode[npi] != NPMODE_PASS) { 1781 kfree_skb(skb); 1782 } else { 1783 /* chop off protocol */ 1784 skb_pull_rcsum(skb, 2); 1785 skb->dev = ppp->dev; 1786 skb->protocol = htons(npindex_to_ethertype[npi]); 1787 skb_reset_mac_header(skb); 1788 netif_rx(skb); 1789 } 1790 } 1791 return; 1792 1793 err: 1794 kfree_skb(skb); 1795 ppp_receive_error(ppp); 1796} 1797 1798static struct sk_buff * 1799ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) 1800{ 1801 int proto = PPP_PROTO(skb); 1802 struct sk_buff *ns; 1803 int len; 1804 1805 /* Until we fix all the decompressor's need to make sure 1806 * data portion is linear. 1807 */ 1808 if (!pskb_may_pull(skb, skb->len)) 1809 goto err; 1810 1811 if (proto == PPP_COMP) { 1812 int obuff_size; 1813 1814 switch(ppp->rcomp->compress_proto) { 1815 case CI_MPPE: 1816 obuff_size = ppp->mru + PPP_HDRLEN + 1; 1817 break; 1818 default: 1819 obuff_size = ppp->mru + PPP_HDRLEN; 1820 break; 1821 } 1822 1823 ns = dev_alloc_skb(obuff_size); 1824 if (!ns) { 1825 printk(KERN_ERR "ppp_decompress_frame: no memory\n"); 1826 goto err; 1827 } 1828 /* the decompressor still expects the A/C bytes in the hdr */ 1829 len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, 1830 skb->len + 2, ns->data, obuff_size); 1831 if (len < 0) { 1832 /* Pass the compressed frame to pppd as an 1833 error indication. */ 1834 if (len == DECOMP_FATALERROR) 1835 ppp->rstate |= SC_DC_FERROR; 1836 kfree_skb(ns); 1837 goto err; 1838 } 1839 1840 kfree_skb(skb); 1841 skb = ns; 1842 skb_put(skb, len); 1843 skb_pull(skb, 2); /* pull off the A/C bytes */ 1844 1845 } else { 1846 /* Uncompressed frame - pass to decompressor so it 1847 can update its dictionary if necessary. */ 1848 if (ppp->rcomp->incomp) 1849 ppp->rcomp->incomp(ppp->rc_state, skb->data - 2, 1850 skb->len + 2); 1851 } 1852 1853 return skb; 1854 1855 err: 1856 ppp->rstate |= SC_DC_ERROR; 1857 ppp_receive_error(ppp); 1858 return skb; 1859} 1860 1861#ifdef CONFIG_PPP_MULTILINK 1862/* 1863 * Receive a multilink frame. 1864 * We put it on the reconstruction queue and then pull off 1865 * as many completed frames as we can. 1866 */ 1867static void 1868ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1869{ 1870 u32 mask, seq; 1871 struct channel *ch; 1872 int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1873 1874 if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0) 1875 goto err; /* no good, throw it away */ 1876 1877 /* Decode sequence number and begin/end bits */ 1878 if (ppp->flags & SC_MP_SHORTSEQ) { 1879 seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3]; 1880 mask = 0xfff; 1881 } else { 1882 seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5]; 1883 mask = 0xffffff; 1884 } 1885 skb->BEbits = skb->data[2]; 1886 skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */ 1887 1888 /* 1889 * Do protocol ID decompression on the first fragment of each packet. 1890 */ 1891 if ((skb->BEbits & B) && (skb->data[0] & 1)) 1892 *skb_push(skb, 1) = 0; 1893 1894 /* 1895 * Expand sequence number to 32 bits, making it as close 1896 * as possible to ppp->minseq. 1897 */ 1898 seq |= ppp->minseq & ~mask; 1899 if ((int)(ppp->minseq - seq) > (int)(mask >> 1)) 1900 seq += mask + 1; 1901 else if ((int)(seq - ppp->minseq) > (int)(mask >> 1)) 1902 seq -= mask + 1; /* should never happen */ 1903 skb->sequence = seq; 1904 pch->lastseq = seq; 1905 1906 /* 1907 * If this packet comes before the next one we were expecting, 1908 * drop it. 1909 */ 1910 if (seq_before(seq, ppp->nextseq)) { 1911 kfree_skb(skb); 1912 ++ppp->dev->stats.rx_dropped; 1913 ppp_receive_error(ppp); 1914 return; 1915 } 1916 1917 /* 1918 * Reevaluate minseq, the minimum over all channels of the 1919 * last sequence number received on each channel. Because of 1920 * the increasing sequence number rule, we know that any fragment 1921 * before `minseq' which hasn't arrived is never going to arrive. 1922 * The list of channels can't change because we have the receive 1923 * side of the ppp unit locked. 1924 */ 1925 list_for_each_entry(ch, &ppp->channels, clist) { 1926 if (seq_before(ch->lastseq, seq)) 1927 seq = ch->lastseq; 1928 } 1929 if (seq_before(ppp->minseq, seq)) 1930 ppp->minseq = seq; 1931 1932 /* Put the fragment on the reconstruction queue */ 1933 ppp_mp_insert(ppp, skb); 1934 1935 /* If the queue is getting long, don't wait any longer for packets 1936 before the start of the queue. */ 1937 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { 1938 struct sk_buff *skb = skb_peek(&ppp->mrq); 1939 if (seq_before(ppp->minseq, skb->sequence)) 1940 ppp->minseq = skb->sequence; 1941 } 1942 1943 /* Pull completed packets off the queue and receive them. */ 1944 while ((skb = ppp_mp_reconstruct(ppp))) 1945 ppp_receive_nonmp_frame(ppp, skb); 1946 1947 return; 1948 1949 err: 1950 kfree_skb(skb); 1951 ppp_receive_error(ppp); 1952} 1953 1954/* 1955 * Insert a fragment on the MP reconstruction queue. 1956 * The queue is ordered by increasing sequence number. 1957 */ 1958static void 1959ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb) 1960{ 1961 struct sk_buff *p; 1962 struct sk_buff_head *list = &ppp->mrq; 1963 u32 seq = skb->sequence; 1964 1965 /* N.B. we don't need to lock the list lock because we have the 1966 ppp unit receive-side lock. */ 1967 skb_queue_walk(list, p) { 1968 if (seq_before(seq, p->sequence)) 1969 break; 1970 } 1971 __skb_queue_before(list, p, skb); 1972} 1973 1974/* 1975 * Reconstruct a packet from the MP fragment queue. 1976 * We go through increasing sequence numbers until we find a 1977 * complete packet, or we get to the sequence number for a fragment 1978 * which hasn't arrived but might still do so. 1979 */ 1980static struct sk_buff * 1981ppp_mp_reconstruct(struct ppp *ppp) 1982{ 1983 u32 seq = ppp->nextseq; 1984 u32 minseq = ppp->minseq; 1985 struct sk_buff_head *list = &ppp->mrq; 1986 struct sk_buff *p, *next; 1987 struct sk_buff *head, *tail; 1988 struct sk_buff *skb = NULL; 1989 int lost = 0, len = 0; 1990 1991 if (ppp->mrru == 0) /* do nothing until mrru is set */ 1992 return NULL; 1993 head = list->next; 1994 tail = NULL; 1995 for (p = head; p != (struct sk_buff *) list; p = next) { 1996 next = p->next; 1997 if (seq_before(p->sequence, seq)) { 1998 /* this can't happen, anyway ignore the skb */ 1999 printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", 2000 p->sequence, seq); 2001 head = next; 2002 continue; 2003 } 2004 if (p->sequence != seq) { 2005 /* Fragment `seq' is missing. If it is after 2006 minseq, it might arrive later, so stop here. */ 2007 if (seq_after(seq, minseq)) 2008 break; 2009 /* Fragment `seq' is lost, keep going. */ 2010 lost = 1; 2011 seq = seq_before(minseq, p->sequence)? 2012 minseq + 1: p->sequence; 2013 next = p; 2014 continue; 2015 } 2016 2017 /* 2018 * At this point we know that all the fragments from 2019 * ppp->nextseq to seq are either present or lost. 2020 * Also, there are no complete packets in the queue 2021 * that have no missing fragments and end before this 2022 * fragment. 2023 */ 2024 2025 /* B bit set indicates this fragment starts a packet */ 2026 if (p->BEbits & B) { 2027 head = p; 2028 lost = 0; 2029 len = 0; 2030 } 2031 2032 len += p->len; 2033 2034 /* Got a complete packet yet? */ 2035 if (lost == 0 && (p->BEbits & E) && (head->BEbits & B)) { 2036 if (len > ppp->mrru + 2) { 2037 ++ppp->dev->stats.rx_length_errors; 2038 printk(KERN_DEBUG "PPP: reconstructed packet" 2039 " is too long (%d)\n", len); 2040 } else if (p == head) { 2041 /* fragment is complete packet - reuse skb */ 2042 tail = p; 2043 skb = skb_get(p); 2044 break; 2045 } else if ((skb = dev_alloc_skb(len)) == NULL) { 2046 ++ppp->dev->stats.rx_missed_errors; 2047 printk(KERN_DEBUG "PPP: no memory for " 2048 "reconstructed packet"); 2049 } else { 2050 tail = p; 2051 break; 2052 } 2053 ppp->nextseq = seq + 1; 2054 } 2055 2056 /* 2057 * If this is the ending fragment of a packet, 2058 * and we haven't found a complete valid packet yet, 2059 * we can discard up to and including this fragment. 2060 */ 2061 if (p->BEbits & E) 2062 head = next; 2063 2064 ++seq; 2065 } 2066 2067 /* If we have a complete packet, copy it all into one skb. */ 2068 if (tail != NULL) { 2069 /* If we have discarded any fragments, 2070 signal a receive error. */ 2071 if (head->sequence != ppp->nextseq) { 2072 if (ppp->debug & 1) 2073 printk(KERN_DEBUG " missed pkts %u..%u\n", 2074 ppp->nextseq, head->sequence-1); 2075 ++ppp->dev->stats.rx_dropped; 2076 ppp_receive_error(ppp); 2077 } 2078 2079 if (head != tail) 2080 /* copy to a single skb */ 2081 for (p = head; p != tail->next; p = p->next) 2082 skb_copy_bits(p, 0, skb_put(skb, p->len), p->len); 2083 ppp->nextseq = tail->sequence + 1; 2084 head = tail->next; 2085 } 2086 2087 /* Discard all the skbuffs that we have copied the data out of 2088 or that we can't use. */ 2089 while ((p = list->next) != head) { 2090 __skb_unlink(p, list); 2091 kfree_skb(p); 2092 } 2093 2094 return skb; 2095} 2096#endif /* CONFIG_PPP_MULTILINK */ 2097 2098/* 2099 * Channel interface. 2100 */ 2101 2102/* Create a new, unattached ppp channel. */ 2103int ppp_register_channel(struct ppp_channel *chan) 2104{ 2105 return ppp_register_net_channel(current->nsproxy->net_ns, chan); 2106} 2107 2108/* Create a new, unattached ppp channel for specified net. */ 2109int ppp_register_net_channel(struct net *net, struct ppp_channel *chan) 2110{ 2111 struct channel *pch; 2112 struct ppp_net *pn; 2113 2114 pch = kzalloc(sizeof(struct channel), GFP_KERNEL); 2115 if (!pch) 2116 return -ENOMEM; 2117 2118 pn = ppp_pernet(net); 2119 2120 pch->ppp = NULL; 2121 pch->chan = chan; 2122 pch->chan_net = net; 2123 chan->ppp = pch; 2124 init_ppp_file(&pch->file, CHANNEL); 2125 pch->file.hdrlen = chan->hdrlen; 2126#ifdef CONFIG_PPP_MULTILINK 2127 pch->lastseq = -1; 2128#endif /* CONFIG_PPP_MULTILINK */ 2129 init_rwsem(&pch->chan_sem); 2130 spin_lock_init(&pch->downl); 2131 rwlock_init(&pch->upl); 2132 2133 spin_lock_bh(&pn->all_channels_lock); 2134 pch->file.index = ++pn->last_channel_index; 2135 list_add(&pch->list, &pn->new_channels); 2136 atomic_inc(&channel_count); 2137 spin_unlock_bh(&pn->all_channels_lock); 2138 2139 return 0; 2140} 2141 2142/* 2143 * Return the index of a channel. 2144 */ 2145int ppp_channel_index(struct ppp_channel *chan) 2146{ 2147 struct channel *pch = chan->ppp; 2148 2149 if (pch) 2150 return pch->file.index; 2151 return -1; 2152} 2153 2154/* 2155 * Return the PPP unit number to which a channel is connected. 2156 */ 2157int ppp_unit_number(struct ppp_channel *chan) 2158{ 2159 struct channel *pch = chan->ppp; 2160 int unit = -1; 2161 2162 if (pch) { 2163 read_lock_bh(&pch->upl); 2164 if (pch->ppp) 2165 unit = pch->ppp->file.index; 2166 read_unlock_bh(&pch->upl); 2167 } 2168 return unit; 2169} 2170 2171/* 2172 * Disconnect a channel from the generic layer. 2173 * This must be called in process context. 2174 */ 2175void 2176ppp_unregister_channel(struct ppp_channel *chan) 2177{ 2178 struct channel *pch = chan->ppp; 2179 struct ppp_net *pn; 2180 2181 if (!pch) 2182 return; /* should never happen */ 2183 2184 chan->ppp = NULL; 2185 2186 /* 2187 * This ensures that we have returned from any calls into the 2188 * the channel's start_xmit or ioctl routine before we proceed. 2189 */ 2190 down_write(&pch->chan_sem); 2191 spin_lock_bh(&pch->downl); 2192 pch->chan = NULL; 2193 spin_unlock_bh(&pch->downl); 2194 up_write(&pch->chan_sem); 2195 ppp_disconnect_channel(pch); 2196 2197 pn = ppp_pernet(pch->chan_net); 2198 spin_lock_bh(&pn->all_channels_lock); 2199 list_del(&pch->list); 2200 spin_unlock_bh(&pn->all_channels_lock); 2201 2202 pch->file.dead = 1; 2203 wake_up_interruptible(&pch->file.rwait); 2204 if (atomic_dec_and_test(&pch->file.refcnt)) 2205 ppp_destroy_channel(pch); 2206} 2207 2208/* 2209 * Callback from a channel when it can accept more to transmit. 2210 * This should be called at BH/softirq level, not interrupt level. 2211 */ 2212void 2213ppp_output_wakeup(struct ppp_channel *chan) 2214{ 2215 struct channel *pch = chan->ppp; 2216 2217 if (!pch) 2218 return; 2219 ppp_channel_push(pch); 2220} 2221 2222/* 2223 * Compression control. 2224 */ 2225 2226/* Process the PPPIOCSCOMPRESS ioctl. */ 2227static int 2228ppp_set_compress(struct ppp *ppp, unsigned long arg) 2229{ 2230 int err; 2231 struct compressor *cp, *ocomp; 2232 struct ppp_option_data data; 2233 void *state, *ostate; 2234 unsigned char ccp_option[CCP_MAX_OPTION_LENGTH]; 2235 2236 err = -EFAULT; 2237 if (copy_from_user(&data, (void __user *) arg, sizeof(data)) 2238 || (data.length <= CCP_MAX_OPTION_LENGTH 2239 && copy_from_user(ccp_option, (void __user *) data.ptr, data.length))) 2240 goto out; 2241 err = -EINVAL; 2242 if (data.length > CCP_MAX_OPTION_LENGTH 2243 || ccp_option[1] < 2 || ccp_option[1] > data.length) 2244 goto out; 2245 2246 cp = try_then_request_module( 2247 find_compressor(ccp_option[0]), 2248 "ppp-compress-%d", ccp_option[0]); 2249 if (!cp) 2250 goto out; 2251 2252 err = -ENOBUFS; 2253 if (data.transmit) { 2254 state = cp->comp_alloc(ccp_option, data.length); 2255 if (state) { 2256 ppp_xmit_lock(ppp); 2257 ppp->xstate &= ~SC_COMP_RUN; 2258 ocomp = ppp->xcomp; 2259 ostate = ppp->xc_state; 2260 ppp->xcomp = cp; 2261 ppp->xc_state = state; 2262 ppp_xmit_unlock(ppp); 2263 if (ostate) { 2264 ocomp->comp_free(ostate); 2265 module_put(ocomp->owner); 2266 } 2267 err = 0; 2268 } else 2269 module_put(cp->owner); 2270 2271 } else { 2272 state = cp->decomp_alloc(ccp_option, data.length); 2273 if (state) { 2274 ppp_recv_lock(ppp); 2275 ppp->rstate &= ~SC_DECOMP_RUN; 2276 ocomp = ppp->rcomp; 2277 ostate = ppp->rc_state; 2278 ppp->rcomp = cp; 2279 ppp->rc_state = state; 2280 ppp_recv_unlock(ppp); 2281 if (ostate) { 2282 ocomp->decomp_free(ostate); 2283 module_put(ocomp->owner); 2284 } 2285 err = 0; 2286 } else 2287 module_put(cp->owner); 2288 } 2289 2290 out: 2291 return err; 2292} 2293 2294/* 2295 * Look at a CCP packet and update our state accordingly. 2296 * We assume the caller has the xmit or recv path locked. 2297 */ 2298static void 2299ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) 2300{ 2301 unsigned char *dp; 2302 int len; 2303 2304 if (!pskb_may_pull(skb, CCP_HDRLEN + 2)) 2305 return; /* no header */ 2306 dp = skb->data + 2; 2307 2308 switch (CCP_CODE(dp)) { 2309 case CCP_CONFREQ: 2310 2311 /* A ConfReq starts negotiation of compression 2312 * in one direction of transmission, 2313 * and hence brings it down...but which way? 2314 * 2315 * Remember: 2316 * A ConfReq indicates what the sender would like to receive 2317 */ 2318 if(inbound) 2319 /* He is proposing what I should send */ 2320 ppp->xstate &= ~SC_COMP_RUN; 2321 else 2322 /* I am proposing to what he should send */ 2323 ppp->rstate &= ~SC_DECOMP_RUN; 2324 2325 break; 2326 2327 case CCP_TERMREQ: 2328 case CCP_TERMACK: 2329 /* 2330 * CCP is going down, both directions of transmission 2331 */ 2332 ppp->rstate &= ~SC_DECOMP_RUN; 2333 ppp->xstate &= ~SC_COMP_RUN; 2334 break; 2335 2336 case CCP_CONFACK: 2337 if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN) 2338 break; 2339 len = CCP_LENGTH(dp); 2340 if (!pskb_may_pull(skb, len + 2)) 2341 return; /* too short */ 2342 dp += CCP_HDRLEN; 2343 len -= CCP_HDRLEN; 2344 if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp)) 2345 break; 2346 if (inbound) { 2347 /* we will start receiving compressed packets */ 2348 if (!ppp->rc_state) 2349 break; 2350 if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len, 2351 ppp->file.index, 0, ppp->mru, ppp->debug)) { 2352 ppp->rstate |= SC_DECOMP_RUN; 2353 ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR); 2354 } 2355 } else { 2356 /* we will soon start sending compressed packets */ 2357 if (!ppp->xc_state) 2358 break; 2359 if (ppp->xcomp->comp_init(ppp->xc_state, dp, len, 2360 ppp->file.index, 0, ppp->debug)) 2361 ppp->xstate |= SC_COMP_RUN; 2362 } 2363 break; 2364 2365 case CCP_RESETACK: 2366 /* reset the [de]compressor */ 2367 if ((ppp->flags & SC_CCP_UP) == 0) 2368 break; 2369 if (inbound) { 2370 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) { 2371 ppp->rcomp->decomp_reset(ppp->rc_state); 2372 ppp->rstate &= ~SC_DC_ERROR; 2373 } 2374 } else { 2375 if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN)) 2376 ppp->xcomp->comp_reset(ppp->xc_state); 2377 } 2378 break; 2379 } 2380} 2381 2382/* Free up compression resources. */ 2383static void 2384ppp_ccp_closed(struct ppp *ppp) 2385{ 2386 void *xstate, *rstate; 2387 struct compressor *xcomp, *rcomp; 2388 2389 ppp_lock(ppp); 2390 ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP); 2391 ppp->xstate = 0; 2392 xcomp = ppp->xcomp; 2393 xstate = ppp->xc_state; 2394 ppp->xc_state = NULL; 2395 ppp->rstate = 0; 2396 rcomp = ppp->rcomp; 2397 rstate = ppp->rc_state; 2398 ppp->rc_state = NULL; 2399 ppp_unlock(ppp); 2400 2401 if (xstate) { 2402 xcomp->comp_free(xstate); 2403 module_put(xcomp->owner); 2404 } 2405 if (rstate) { 2406 rcomp->decomp_free(rstate); 2407 module_put(rcomp->owner); 2408 } 2409} 2410 2411/* List of compressors. */ 2412static LIST_HEAD(compressor_list); 2413static DEFINE_SPINLOCK(compressor_list_lock); 2414 2415struct compressor_entry { 2416 struct list_head list; 2417 struct compressor *comp; 2418}; 2419 2420static struct compressor_entry * 2421find_comp_entry(int proto) 2422{ 2423 struct compressor_entry *ce; 2424 2425 list_for_each_entry(ce, &compressor_list, list) { 2426 if (ce->comp->compress_proto == proto) 2427 return ce; 2428 } 2429 return NULL; 2430} 2431 2432/* Register a compressor */ 2433int 2434ppp_register_compressor(struct compressor *cp) 2435{ 2436 struct compressor_entry *ce; 2437 int ret; 2438 spin_lock(&compressor_list_lock); 2439 ret = -EEXIST; 2440 if (find_comp_entry(cp->compress_proto)) 2441 goto out; 2442 ret = -ENOMEM; 2443 ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); 2444 if (!ce) 2445 goto out; 2446 ret = 0; 2447 ce->comp = cp; 2448 list_add(&ce->list, &compressor_list); 2449 out: 2450 spin_unlock(&compressor_list_lock); 2451 return ret; 2452} 2453 2454/* Unregister a compressor */ 2455void 2456ppp_unregister_compressor(struct compressor *cp) 2457{ 2458 struct compressor_entry *ce; 2459 2460 spin_lock(&compressor_list_lock); 2461 ce = find_comp_entry(cp->compress_proto); 2462 if (ce && ce->comp == cp) { 2463 list_del(&ce->list); 2464 kfree(ce); 2465 } 2466 spin_unlock(&compressor_list_lock); 2467} 2468 2469/* Find a compressor. */ 2470static struct compressor * 2471find_compressor(int type) 2472{ 2473 struct compressor_entry *ce; 2474 struct compressor *cp = NULL; 2475 2476 spin_lock(&compressor_list_lock); 2477 ce = find_comp_entry(type); 2478 if (ce) { 2479 cp = ce->comp; 2480 if (!try_module_get(cp->owner)) 2481 cp = NULL; 2482 } 2483 spin_unlock(&compressor_list_lock); 2484 return cp; 2485} 2486 2487/* 2488 * Miscelleneous stuff. 2489 */ 2490 2491static void 2492ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) 2493{ 2494 struct slcompress *vj = ppp->vj; 2495 2496 memset(st, 0, sizeof(*st)); 2497 st->p.ppp_ipackets = ppp->dev->stats.rx_packets; 2498 st->p.ppp_ierrors = ppp->dev->stats.rx_errors; 2499 st->p.ppp_ibytes = ppp->dev->stats.rx_bytes; 2500 st->p.ppp_opackets = ppp->dev->stats.tx_packets; 2501 st->p.ppp_oerrors = ppp->dev->stats.tx_errors; 2502 st->p.ppp_obytes = ppp->dev->stats.tx_bytes; 2503 if (!vj) 2504 return; 2505 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; 2506 st->vj.vjs_compressed = vj->sls_o_compressed; 2507 st->vj.vjs_searches = vj->sls_o_searches; 2508 st->vj.vjs_misses = vj->sls_o_misses; 2509 st->vj.vjs_errorin = vj->sls_i_error; 2510 st->vj.vjs_tossed = vj->sls_i_tossed; 2511 st->vj.vjs_uncompressedin = vj->sls_i_uncompressed; 2512 st->vj.vjs_compressedin = vj->sls_i_compressed; 2513} 2514 2515/* 2516 * Stuff for handling the lists of ppp units and channels 2517 * and for initialization. 2518 */ 2519 2520/* 2521 * Create a new ppp interface unit. Fails if it can't allocate memory 2522 * or if there is already a unit with the requested number. 2523 * unit == -1 means allocate a new number. 2524 */ 2525static struct ppp * 2526ppp_create_interface(struct net *net, int unit, int *retp) 2527{ 2528 struct ppp *ppp; 2529 struct ppp_net *pn; 2530 struct net_device *dev = NULL; 2531 int ret = -ENOMEM; 2532 int i; 2533 2534 dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup); 2535 if (!dev) 2536 goto out1; 2537 2538 pn = ppp_pernet(net); 2539 2540 ppp = netdev_priv(dev); 2541 ppp->dev = dev; 2542 ppp->mru = PPP_MRU; 2543 init_ppp_file(&ppp->file, INTERFACE); 2544 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 2545 for (i = 0; i < NUM_NP; ++i) 2546 ppp->npmode[i] = NPMODE_PASS; 2547 INIT_LIST_HEAD(&ppp->channels); 2548 spin_lock_init(&ppp->rlock); 2549 spin_lock_init(&ppp->wlock); 2550#ifdef CONFIG_PPP_MULTILINK 2551 ppp->minseq = -1; 2552 skb_queue_head_init(&ppp->mrq); 2553#endif /* CONFIG_PPP_MULTILINK */ 2554 2555 /* 2556 * drum roll: don't forget to set 2557 * the net device is belong to 2558 */ 2559 dev_net_set(dev, net); 2560 2561 ret = -EEXIST; 2562 mutex_lock(&pn->all_ppp_mutex); 2563 2564 if (unit < 0) { 2565 unit = unit_get(&pn->units_idr, ppp); 2566 if (unit < 0) { 2567 *retp = unit; 2568 goto out2; 2569 } 2570 } else { 2571 if (unit_find(&pn->units_idr, unit)) 2572 goto out2; /* unit already exists */ 2573 /* 2574 * if caller need a specified unit number 2575 * lets try to satisfy him, otherwise -- 2576 * he should better ask us for new unit number 2577 * 2578 * NOTE: yes I know that returning EEXIST it's not 2579 * fair but at least pppd will ask us to allocate 2580 * new unit in this case so user is happy :) 2581 */ 2582 unit = unit_set(&pn->units_idr, ppp, unit); 2583 if (unit < 0) 2584 goto out2; 2585 } 2586 2587 /* Initialize the new ppp unit */ 2588 ppp->file.index = unit; 2589 sprintf(dev->name, "ppp%d", unit); 2590 2591 ret = register_netdev(dev); 2592 if (ret != 0) { 2593 unit_put(&pn->units_idr, unit); 2594 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", 2595 dev->name, ret); 2596 goto out2; 2597 } 2598 2599 ppp->ppp_net = net; 2600 2601 atomic_inc(&ppp_unit_count); 2602 mutex_unlock(&pn->all_ppp_mutex); 2603 2604 *retp = 0; 2605 return ppp; 2606 2607out2: 2608 mutex_unlock(&pn->all_ppp_mutex); 2609 free_netdev(dev); 2610out1: 2611 *retp = ret; 2612 return NULL; 2613} 2614 2615/* 2616 * Initialize a ppp_file structure. 2617 */ 2618static void 2619init_ppp_file(struct ppp_file *pf, int kind) 2620{ 2621 pf->kind = kind; 2622 skb_queue_head_init(&pf->xq); 2623 skb_queue_head_init(&pf->rq); 2624 atomic_set(&pf->refcnt, 1); 2625 init_waitqueue_head(&pf->rwait); 2626} 2627 2628/* 2629 * Take down a ppp interface unit - called when the owning file 2630 * (the one that created the unit) is closed or detached. 2631 */ 2632static void ppp_shutdown_interface(struct ppp *ppp) 2633{ 2634 struct ppp_net *pn; 2635 2636 pn = ppp_pernet(ppp->ppp_net); 2637 mutex_lock(&pn->all_ppp_mutex); 2638 2639 /* This will call dev_close() for us. */ 2640 ppp_lock(ppp); 2641 if (!ppp->closing) { 2642 ppp->closing = 1; 2643 ppp_unlock(ppp); 2644 unregister_netdev(ppp->dev); 2645 } else 2646 ppp_unlock(ppp); 2647 2648 unit_put(&pn->units_idr, ppp->file.index); 2649 ppp->file.dead = 1; 2650 ppp->owner = NULL; 2651 wake_up_interruptible(&ppp->file.rwait); 2652 2653 mutex_unlock(&pn->all_ppp_mutex); 2654} 2655 2656/* 2657 * Free the memory used by a ppp unit. This is only called once 2658 * there are no channels connected to the unit and no file structs 2659 * that reference the unit. 2660 */ 2661static void ppp_destroy_interface(struct ppp *ppp) 2662{ 2663 atomic_dec(&ppp_unit_count); 2664 2665 if (!ppp->file.dead || ppp->n_channels) { 2666 /* "can't happen" */ 2667 printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d " 2668 "n_channels=%d !\n", ppp, ppp->file.dead, 2669 ppp->n_channels); 2670 return; 2671 } 2672 2673 ppp_ccp_closed(ppp); 2674 if (ppp->vj) { 2675 slhc_free(ppp->vj); 2676 ppp->vj = NULL; 2677 } 2678 skb_queue_purge(&ppp->file.xq); 2679 skb_queue_purge(&ppp->file.rq); 2680#ifdef CONFIG_PPP_MULTILINK 2681 skb_queue_purge(&ppp->mrq); 2682#endif /* CONFIG_PPP_MULTILINK */ 2683#ifdef CONFIG_PPP_FILTER 2684 kfree(ppp->pass_filter); 2685 ppp->pass_filter = NULL; 2686 kfree(ppp->active_filter); 2687 ppp->active_filter = NULL; 2688#endif /* CONFIG_PPP_FILTER */ 2689 2690 kfree_skb(ppp->xmit_pending); 2691 2692 free_netdev(ppp->dev); 2693} 2694 2695/* 2696 * Locate an existing ppp unit. 2697 * The caller should have locked the all_ppp_mutex. 2698 */ 2699static struct ppp * 2700ppp_find_unit(struct ppp_net *pn, int unit) 2701{ 2702 return unit_find(&pn->units_idr, unit); 2703} 2704 2705/* 2706 * Locate an existing ppp channel. 2707 * The caller should have locked the all_channels_lock. 2708 * First we look in the new_channels list, then in the 2709 * all_channels list. If found in the new_channels list, 2710 * we move it to the all_channels list. This is for speed 2711 * when we have a lot of channels in use. 2712 */ 2713static struct channel * 2714ppp_find_channel(struct ppp_net *pn, int unit) 2715{ 2716 struct channel *pch; 2717 2718 list_for_each_entry(pch, &pn->new_channels, list) { 2719 if (pch->file.index == unit) { 2720 list_move(&pch->list, &pn->all_channels); 2721 return pch; 2722 } 2723 } 2724 2725 list_for_each_entry(pch, &pn->all_channels, list) { 2726 if (pch->file.index == unit) 2727 return pch; 2728 } 2729 2730 return NULL; 2731} 2732 2733/* 2734 * Connect a PPP channel to a PPP interface unit. 2735 */ 2736static int 2737ppp_connect_channel(struct channel *pch, int unit) 2738{ 2739 struct ppp *ppp; 2740 struct ppp_net *pn; 2741 int ret = -ENXIO; 2742 int hdrlen; 2743 2744 pn = ppp_pernet(pch->chan_net); 2745 2746 mutex_lock(&pn->all_ppp_mutex); 2747 ppp = ppp_find_unit(pn, unit); 2748 if (!ppp) 2749 goto out; 2750 write_lock_bh(&pch->upl); 2751 ret = -EINVAL; 2752 if (pch->ppp) 2753 goto outl; 2754 2755 ppp_lock(ppp); 2756 if (pch->file.hdrlen > ppp->file.hdrlen) 2757 ppp->file.hdrlen = pch->file.hdrlen; 2758 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ 2759 if (hdrlen > ppp->dev->hard_header_len) 2760 ppp->dev->hard_header_len = hdrlen; 2761 list_add_tail(&pch->clist, &ppp->channels); 2762 ++ppp->n_channels; 2763 pch->ppp = ppp; 2764 atomic_inc(&ppp->file.refcnt); 2765 ppp_unlock(ppp); 2766 ret = 0; 2767 2768 outl: 2769 write_unlock_bh(&pch->upl); 2770 out: 2771 mutex_unlock(&pn->all_ppp_mutex); 2772 return ret; 2773} 2774 2775/* 2776 * Disconnect a channel from its ppp unit. 2777 */ 2778static int 2779ppp_disconnect_channel(struct channel *pch) 2780{ 2781 struct ppp *ppp; 2782 int err = -EINVAL; 2783 2784 write_lock_bh(&pch->upl); 2785 ppp = pch->ppp; 2786 pch->ppp = NULL; 2787 write_unlock_bh(&pch->upl); 2788 if (ppp) { 2789 /* remove it from the ppp unit's list */ 2790 ppp_lock(ppp); 2791 list_del(&pch->clist); 2792 if (--ppp->n_channels == 0) 2793 wake_up_interruptible(&ppp->file.rwait); 2794 ppp_unlock(ppp); 2795 if (atomic_dec_and_test(&ppp->file.refcnt)) 2796 ppp_destroy_interface(ppp); 2797 err = 0; 2798 } 2799 return err; 2800} 2801 2802/* 2803 * Free up the resources used by a ppp channel. 2804 */ 2805static void ppp_destroy_channel(struct channel *pch) 2806{ 2807 atomic_dec(&channel_count); 2808 2809 if (!pch->file.dead) { 2810 /* "can't happen" */ 2811 printk(KERN_ERR "ppp: destroying undead channel %p !\n", 2812 pch); 2813 return; 2814 } 2815 skb_queue_purge(&pch->file.xq); 2816 skb_queue_purge(&pch->file.rq); 2817 kfree(pch); 2818} 2819 2820static void __exit ppp_cleanup(void) 2821{ 2822 /* should never happen */ 2823 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) 2824 printk(KERN_ERR "PPP: removing module but units remain!\n"); 2825 unregister_chrdev(PPP_MAJOR, "ppp"); 2826 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); 2827 class_destroy(ppp_class); 2828 unregister_pernet_gen_device(ppp_net_id, &ppp_net_ops); 2829} 2830 2831/* 2832 * Units handling. Caller must protect concurrent access 2833 * by holding all_ppp_mutex 2834 */ 2835 2836/* associate pointer with specified number */ 2837static int unit_set(struct idr *p, void *ptr, int n) 2838{ 2839 int unit, err; 2840 2841again: 2842 if (!idr_pre_get(p, GFP_KERNEL)) { 2843 printk(KERN_ERR "PPP: No free memory for idr\n"); 2844 return -ENOMEM; 2845 } 2846 2847 err = idr_get_new_above(p, ptr, n, &unit); 2848 if (err == -EAGAIN) 2849 goto again; 2850 2851 if (unit != n) { 2852 idr_remove(p, unit); 2853 return -EINVAL; 2854 } 2855 2856 return unit; 2857} 2858 2859/* get new free unit number and associate pointer with it */ 2860static int unit_get(struct idr *p, void *ptr) 2861{ 2862 int unit, err; 2863 2864again: 2865 if (!idr_pre_get(p, GFP_KERNEL)) { 2866 printk(KERN_ERR "PPP: No free memory for idr\n"); 2867 return -ENOMEM; 2868 } 2869 2870 err = idr_get_new_above(p, ptr, 0, &unit); 2871 if (err == -EAGAIN) 2872 goto again; 2873 2874 return unit; 2875} 2876 2877/* put unit number back to a pool */ 2878static void unit_put(struct idr *p, int n) 2879{ 2880 idr_remove(p, n); 2881} 2882 2883/* get pointer associated with the number */ 2884static void *unit_find(struct idr *p, int n) 2885{ 2886 return idr_find(p, n); 2887} 2888 2889/* Module/initialization stuff */ 2890 2891module_init(ppp_init); 2892module_exit(ppp_cleanup); 2893 2894EXPORT_SYMBOL(ppp_register_net_channel); 2895EXPORT_SYMBOL(ppp_register_channel); 2896EXPORT_SYMBOL(ppp_unregister_channel); 2897EXPORT_SYMBOL(ppp_channel_index); 2898EXPORT_SYMBOL(ppp_unit_number); 2899EXPORT_SYMBOL(ppp_input); 2900EXPORT_SYMBOL(ppp_input_error); 2901EXPORT_SYMBOL(ppp_output_wakeup); 2902EXPORT_SYMBOL(ppp_register_compressor); 2903EXPORT_SYMBOL(ppp_unregister_compressor); 2904MODULE_LICENSE("GPL"); 2905MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR); 2906MODULE_ALIAS("/dev/ppp");