at v2.6.31-rc2 1028 lines 24 kB view raw
1/* 2 * PPP async serial channel driver for Linux. 3 * 4 * Copyright 1999 Paul Mackerras. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * This driver provides the encapsulation and framing for sending 12 * and receiving PPP frames over async serial lines. It relies on 13 * the generic PPP layer to give it frames to send and to process 14 * received frames. It implements the PPP line discipline. 15 * 16 * Part of the code in this driver was inspired by the old async-only 17 * PPP driver, written by Michael Callahan and Al Longyear, and 18 * subsequently hacked by Paul Mackerras. 19 */ 20 21#include <linux/module.h> 22#include <linux/kernel.h> 23#include <linux/skbuff.h> 24#include <linux/tty.h> 25#include <linux/netdevice.h> 26#include <linux/poll.h> 27#include <linux/crc-ccitt.h> 28#include <linux/ppp_defs.h> 29#include <linux/if_ppp.h> 30#include <linux/ppp_channel.h> 31#include <linux/spinlock.h> 32#include <linux/init.h> 33#include <linux/jiffies.h> 34#include <asm/uaccess.h> 35#include <asm/string.h> 36 37#define PPP_VERSION "2.4.2" 38 39#define OBUFSIZE 256 40 41/* Structure for storing local state. */ 42struct asyncppp { 43 struct tty_struct *tty; 44 unsigned int flags; 45 unsigned int state; 46 unsigned int rbits; 47 int mru; 48 spinlock_t xmit_lock; 49 spinlock_t recv_lock; 50 unsigned long xmit_flags; 51 u32 xaccm[8]; 52 u32 raccm; 53 unsigned int bytes_sent; 54 unsigned int bytes_rcvd; 55 56 struct sk_buff *tpkt; 57 int tpkt_pos; 58 u16 tfcs; 59 unsigned char *optr; 60 unsigned char *olim; 61 unsigned long last_xmit; 62 63 struct sk_buff *rpkt; 64 int lcp_fcs; 65 struct sk_buff_head rqueue; 66 67 struct tasklet_struct tsk; 68 69 atomic_t refcnt; 70 struct semaphore dead_sem; 71 struct ppp_channel chan; /* interface to generic ppp layer */ 72 unsigned char obuf[OBUFSIZE]; 73}; 74 75/* Bit numbers in xmit_flags */ 76#define XMIT_WAKEUP 0 77#define XMIT_FULL 1 78#define XMIT_BUSY 2 79 80/* State bits */ 81#define SC_TOSS 1 82#define SC_ESCAPE 2 83#define SC_PREV_ERROR 4 84 85/* Bits in rbits */ 86#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP) 87 88static int flag_time = HZ; 89module_param(flag_time, int, 0); 90MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)"); 91MODULE_LICENSE("GPL"); 92MODULE_ALIAS_LDISC(N_PPP); 93 94/* 95 * Prototypes. 96 */ 97static int ppp_async_encode(struct asyncppp *ap); 98static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb); 99static int ppp_async_push(struct asyncppp *ap); 100static void ppp_async_flush_output(struct asyncppp *ap); 101static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf, 102 char *flags, int count); 103static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, 104 unsigned long arg); 105static void ppp_async_process(unsigned long arg); 106 107static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, 108 int len, int inbound); 109 110static struct ppp_channel_ops async_ops = { 111 ppp_async_send, 112 ppp_async_ioctl 113}; 114 115/* 116 * Routines implementing the PPP line discipline. 117 */ 118 119/* 120 * We have a potential race on dereferencing tty->disc_data, 121 * because the tty layer provides no locking at all - thus one 122 * cpu could be running ppp_asynctty_receive while another 123 * calls ppp_asynctty_close, which zeroes tty->disc_data and 124 * frees the memory that ppp_asynctty_receive is using. The best 125 * way to fix this is to use a rwlock in the tty struct, but for now 126 * we use a single global rwlock for all ttys in ppp line discipline. 127 * 128 * FIXME: this is no longer true. The _close path for the ldisc is 129 * now guaranteed to be sane. 130 */ 131static DEFINE_RWLOCK(disc_data_lock); 132 133static struct asyncppp *ap_get(struct tty_struct *tty) 134{ 135 struct asyncppp *ap; 136 137 read_lock(&disc_data_lock); 138 ap = tty->disc_data; 139 if (ap != NULL) 140 atomic_inc(&ap->refcnt); 141 read_unlock(&disc_data_lock); 142 return ap; 143} 144 145static void ap_put(struct asyncppp *ap) 146{ 147 if (atomic_dec_and_test(&ap->refcnt)) 148 up(&ap->dead_sem); 149} 150 151/* 152 * Called when a tty is put into PPP line discipline. Called in process 153 * context. 154 */ 155static int 156ppp_asynctty_open(struct tty_struct *tty) 157{ 158 struct asyncppp *ap; 159 int err; 160 int speed; 161 162 if (tty->ops->write == NULL) 163 return -EOPNOTSUPP; 164 165 err = -ENOMEM; 166 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 167 if (!ap) 168 goto out; 169 170 /* initialize the asyncppp structure */ 171 ap->tty = tty; 172 ap->mru = PPP_MRU; 173 spin_lock_init(&ap->xmit_lock); 174 spin_lock_init(&ap->recv_lock); 175 ap->xaccm[0] = ~0U; 176 ap->xaccm[3] = 0x60000000U; 177 ap->raccm = ~0U; 178 ap->optr = ap->obuf; 179 ap->olim = ap->obuf; 180 ap->lcp_fcs = -1; 181 182 skb_queue_head_init(&ap->rqueue); 183 tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap); 184 185 atomic_set(&ap->refcnt, 1); 186 init_MUTEX_LOCKED(&ap->dead_sem); 187 188 ap->chan.private = ap; 189 ap->chan.ops = &async_ops; 190 ap->chan.mtu = PPP_MRU; 191 speed = tty_get_baud_rate(tty); 192 ap->chan.speed = speed; 193 err = ppp_register_channel(&ap->chan); 194 if (err) 195 goto out_free; 196 197 tty->disc_data = ap; 198 tty->receive_room = 65536; 199 return 0; 200 201 out_free: 202 kfree(ap); 203 out: 204 return err; 205} 206 207/* 208 * Called when the tty is put into another line discipline 209 * or it hangs up. We have to wait for any cpu currently 210 * executing in any of the other ppp_asynctty_* routines to 211 * finish before we can call ppp_unregister_channel and free 212 * the asyncppp struct. This routine must be called from 213 * process context, not interrupt or softirq context. 214 */ 215static void 216ppp_asynctty_close(struct tty_struct *tty) 217{ 218 struct asyncppp *ap; 219 220 write_lock_irq(&disc_data_lock); 221 ap = tty->disc_data; 222 tty->disc_data = NULL; 223 write_unlock_irq(&disc_data_lock); 224 if (!ap) 225 return; 226 227 /* 228 * We have now ensured that nobody can start using ap from now 229 * on, but we have to wait for all existing users to finish. 230 * Note that ppp_unregister_channel ensures that no calls to 231 * our channel ops (i.e. ppp_async_send/ioctl) are in progress 232 * by the time it returns. 233 */ 234 if (!atomic_dec_and_test(&ap->refcnt)) 235 down(&ap->dead_sem); 236 tasklet_kill(&ap->tsk); 237 238 ppp_unregister_channel(&ap->chan); 239 kfree_skb(ap->rpkt); 240 skb_queue_purge(&ap->rqueue); 241 kfree_skb(ap->tpkt); 242 kfree(ap); 243} 244 245/* 246 * Called on tty hangup in process context. 247 * 248 * Wait for I/O to driver to complete and unregister PPP channel. 249 * This is already done by the close routine, so just call that. 250 */ 251static int ppp_asynctty_hangup(struct tty_struct *tty) 252{ 253 ppp_asynctty_close(tty); 254 return 0; 255} 256 257/* 258 * Read does nothing - no data is ever available this way. 259 * Pppd reads and writes packets via /dev/ppp instead. 260 */ 261static ssize_t 262ppp_asynctty_read(struct tty_struct *tty, struct file *file, 263 unsigned char __user *buf, size_t count) 264{ 265 return -EAGAIN; 266} 267 268/* 269 * Write on the tty does nothing, the packets all come in 270 * from the ppp generic stuff. 271 */ 272static ssize_t 273ppp_asynctty_write(struct tty_struct *tty, struct file *file, 274 const unsigned char *buf, size_t count) 275{ 276 return -EAGAIN; 277} 278 279/* 280 * Called in process context only. May be re-entered by multiple 281 * ioctl calling threads. 282 */ 283 284static int 285ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file, 286 unsigned int cmd, unsigned long arg) 287{ 288 struct asyncppp *ap = ap_get(tty); 289 int err, val; 290 int __user *p = (int __user *)arg; 291 292 if (!ap) 293 return -ENXIO; 294 err = -EFAULT; 295 switch (cmd) { 296 case PPPIOCGCHAN: 297 err = -EFAULT; 298 if (put_user(ppp_channel_index(&ap->chan), p)) 299 break; 300 err = 0; 301 break; 302 303 case PPPIOCGUNIT: 304 err = -EFAULT; 305 if (put_user(ppp_unit_number(&ap->chan), p)) 306 break; 307 err = 0; 308 break; 309 310 case TCFLSH: 311 /* flush our buffers and the serial port's buffer */ 312 if (arg == TCIOFLUSH || arg == TCOFLUSH) 313 ppp_async_flush_output(ap); 314 err = tty_perform_flush(tty, arg); 315 break; 316 317 case FIONREAD: 318 val = 0; 319 if (put_user(val, p)) 320 break; 321 err = 0; 322 break; 323 324 default: 325 /* Try the various mode ioctls */ 326 err = tty_mode_ioctl(tty, file, cmd, arg); 327 } 328 329 ap_put(ap); 330 return err; 331} 332 333/* No kernel lock - fine */ 334static unsigned int 335ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait) 336{ 337 return 0; 338} 339 340/* 341 * This can now be called from hard interrupt level as well 342 * as soft interrupt level or mainline. 343 */ 344static void 345ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, 346 char *cflags, int count) 347{ 348 struct asyncppp *ap = ap_get(tty); 349 unsigned long flags; 350 351 if (!ap) 352 return; 353 spin_lock_irqsave(&ap->recv_lock, flags); 354 ppp_async_input(ap, buf, cflags, count); 355 spin_unlock_irqrestore(&ap->recv_lock, flags); 356 if (!skb_queue_empty(&ap->rqueue)) 357 tasklet_schedule(&ap->tsk); 358 ap_put(ap); 359} 360 361static void 362ppp_asynctty_wakeup(struct tty_struct *tty) 363{ 364 struct asyncppp *ap = ap_get(tty); 365 366 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 367 if (!ap) 368 return; 369 set_bit(XMIT_WAKEUP, &ap->xmit_flags); 370 tasklet_schedule(&ap->tsk); 371 ap_put(ap); 372} 373 374 375static struct tty_ldisc_ops ppp_ldisc = { 376 .owner = THIS_MODULE, 377 .magic = TTY_LDISC_MAGIC, 378 .name = "ppp", 379 .open = ppp_asynctty_open, 380 .close = ppp_asynctty_close, 381 .hangup = ppp_asynctty_hangup, 382 .read = ppp_asynctty_read, 383 .write = ppp_asynctty_write, 384 .ioctl = ppp_asynctty_ioctl, 385 .poll = ppp_asynctty_poll, 386 .receive_buf = ppp_asynctty_receive, 387 .write_wakeup = ppp_asynctty_wakeup, 388}; 389 390static int __init 391ppp_async_init(void) 392{ 393 int err; 394 395 err = tty_register_ldisc(N_PPP, &ppp_ldisc); 396 if (err != 0) 397 printk(KERN_ERR "PPP_async: error %d registering line disc.\n", 398 err); 399 return err; 400} 401 402/* 403 * The following routines provide the PPP channel interface. 404 */ 405static int 406ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) 407{ 408 struct asyncppp *ap = chan->private; 409 void __user *argp = (void __user *)arg; 410 int __user *p = argp; 411 int err, val; 412 u32 accm[8]; 413 414 err = -EFAULT; 415 switch (cmd) { 416 case PPPIOCGFLAGS: 417 val = ap->flags | ap->rbits; 418 if (put_user(val, p)) 419 break; 420 err = 0; 421 break; 422 case PPPIOCSFLAGS: 423 if (get_user(val, p)) 424 break; 425 ap->flags = val & ~SC_RCV_BITS; 426 spin_lock_irq(&ap->recv_lock); 427 ap->rbits = val & SC_RCV_BITS; 428 spin_unlock_irq(&ap->recv_lock); 429 err = 0; 430 break; 431 432 case PPPIOCGASYNCMAP: 433 if (put_user(ap->xaccm[0], (u32 __user *)argp)) 434 break; 435 err = 0; 436 break; 437 case PPPIOCSASYNCMAP: 438 if (get_user(ap->xaccm[0], (u32 __user *)argp)) 439 break; 440 err = 0; 441 break; 442 443 case PPPIOCGRASYNCMAP: 444 if (put_user(ap->raccm, (u32 __user *)argp)) 445 break; 446 err = 0; 447 break; 448 case PPPIOCSRASYNCMAP: 449 if (get_user(ap->raccm, (u32 __user *)argp)) 450 break; 451 err = 0; 452 break; 453 454 case PPPIOCGXASYNCMAP: 455 if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm))) 456 break; 457 err = 0; 458 break; 459 case PPPIOCSXASYNCMAP: 460 if (copy_from_user(accm, argp, sizeof(accm))) 461 break; 462 accm[2] &= ~0x40000000U; /* can't escape 0x5e */ 463 accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */ 464 memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); 465 err = 0; 466 break; 467 468 case PPPIOCGMRU: 469 if (put_user(ap->mru, p)) 470 break; 471 err = 0; 472 break; 473 case PPPIOCSMRU: 474 if (get_user(val, p)) 475 break; 476 if (val < PPP_MRU) 477 val = PPP_MRU; 478 ap->mru = val; 479 err = 0; 480 break; 481 482 default: 483 err = -ENOTTY; 484 } 485 486 return err; 487} 488 489/* 490 * This is called at softirq level to deliver received packets 491 * to the ppp_generic code, and to tell the ppp_generic code 492 * if we can accept more output now. 493 */ 494static void ppp_async_process(unsigned long arg) 495{ 496 struct asyncppp *ap = (struct asyncppp *) arg; 497 struct sk_buff *skb; 498 499 /* process received packets */ 500 while ((skb = skb_dequeue(&ap->rqueue)) != NULL) { 501 if (skb->cb[0]) 502 ppp_input_error(&ap->chan, 0); 503 ppp_input(&ap->chan, skb); 504 } 505 506 /* try to push more stuff out */ 507 if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap)) 508 ppp_output_wakeup(&ap->chan); 509} 510 511/* 512 * Procedures for encapsulation and framing. 513 */ 514 515/* 516 * Procedure to encode the data for async serial transmission. 517 * Does octet stuffing (escaping), puts the address/control bytes 518 * on if A/C compression is disabled, and does protocol compression. 519 * Assumes ap->tpkt != 0 on entry. 520 * Returns 1 if we finished the current frame, 0 otherwise. 521 */ 522 523#define PUT_BYTE(ap, buf, c, islcp) do { \ 524 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ 525 *buf++ = PPP_ESCAPE; \ 526 *buf++ = c ^ 0x20; \ 527 } else \ 528 *buf++ = c; \ 529} while (0) 530 531static int 532ppp_async_encode(struct asyncppp *ap) 533{ 534 int fcs, i, count, c, proto; 535 unsigned char *buf, *buflim; 536 unsigned char *data; 537 int islcp; 538 539 buf = ap->obuf; 540 ap->olim = buf; 541 ap->optr = buf; 542 i = ap->tpkt_pos; 543 data = ap->tpkt->data; 544 count = ap->tpkt->len; 545 fcs = ap->tfcs; 546 proto = (data[0] << 8) + data[1]; 547 548 /* 549 * LCP packets with code values between 1 (configure-reqest) 550 * and 7 (code-reject) must be sent as though no options 551 * had been negotiated. 552 */ 553 islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7; 554 555 if (i == 0) { 556 if (islcp) 557 async_lcp_peek(ap, data, count, 0); 558 559 /* 560 * Start of a new packet - insert the leading FLAG 561 * character if necessary. 562 */ 563 if (islcp || flag_time == 0 564 || time_after_eq(jiffies, ap->last_xmit + flag_time)) 565 *buf++ = PPP_FLAG; 566 ap->last_xmit = jiffies; 567 fcs = PPP_INITFCS; 568 569 /* 570 * Put in the address/control bytes if necessary 571 */ 572 if ((ap->flags & SC_COMP_AC) == 0 || islcp) { 573 PUT_BYTE(ap, buf, 0xff, islcp); 574 fcs = PPP_FCS(fcs, 0xff); 575 PUT_BYTE(ap, buf, 0x03, islcp); 576 fcs = PPP_FCS(fcs, 0x03); 577 } 578 } 579 580 /* 581 * Once we put in the last byte, we need to put in the FCS 582 * and closing flag, so make sure there is at least 7 bytes 583 * of free space in the output buffer. 584 */ 585 buflim = ap->obuf + OBUFSIZE - 6; 586 while (i < count && buf < buflim) { 587 c = data[i++]; 588 if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT)) 589 continue; /* compress protocol field */ 590 fcs = PPP_FCS(fcs, c); 591 PUT_BYTE(ap, buf, c, islcp); 592 } 593 594 if (i < count) { 595 /* 596 * Remember where we are up to in this packet. 597 */ 598 ap->olim = buf; 599 ap->tpkt_pos = i; 600 ap->tfcs = fcs; 601 return 0; 602 } 603 604 /* 605 * We have finished the packet. Add the FCS and flag. 606 */ 607 fcs = ~fcs; 608 c = fcs & 0xff; 609 PUT_BYTE(ap, buf, c, islcp); 610 c = (fcs >> 8) & 0xff; 611 PUT_BYTE(ap, buf, c, islcp); 612 *buf++ = PPP_FLAG; 613 ap->olim = buf; 614 615 kfree_skb(ap->tpkt); 616 ap->tpkt = NULL; 617 return 1; 618} 619 620/* 621 * Transmit-side routines. 622 */ 623 624/* 625 * Send a packet to the peer over an async tty line. 626 * Returns 1 iff the packet was accepted. 627 * If the packet was not accepted, we will call ppp_output_wakeup 628 * at some later time. 629 */ 630static int 631ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb) 632{ 633 struct asyncppp *ap = chan->private; 634 635 ppp_async_push(ap); 636 637 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags)) 638 return 0; /* already full */ 639 ap->tpkt = skb; 640 ap->tpkt_pos = 0; 641 642 ppp_async_push(ap); 643 return 1; 644} 645 646/* 647 * Push as much data as possible out to the tty. 648 */ 649static int 650ppp_async_push(struct asyncppp *ap) 651{ 652 int avail, sent, done = 0; 653 struct tty_struct *tty = ap->tty; 654 int tty_stuffed = 0; 655 656 /* 657 * We can get called recursively here if the tty write 658 * function calls our wakeup function. This can happen 659 * for example on a pty with both the master and slave 660 * set to PPP line discipline. 661 * We use the XMIT_BUSY bit to detect this and get out, 662 * leaving the XMIT_WAKEUP bit set to tell the other 663 * instance that it may now be able to write more now. 664 */ 665 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) 666 return 0; 667 spin_lock_bh(&ap->xmit_lock); 668 for (;;) { 669 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags)) 670 tty_stuffed = 0; 671 if (!tty_stuffed && ap->optr < ap->olim) { 672 avail = ap->olim - ap->optr; 673 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 674 sent = tty->ops->write(tty, ap->optr, avail); 675 if (sent < 0) 676 goto flush; /* error, e.g. loss of CD */ 677 ap->optr += sent; 678 if (sent < avail) 679 tty_stuffed = 1; 680 continue; 681 } 682 if (ap->optr >= ap->olim && ap->tpkt) { 683 if (ppp_async_encode(ap)) { 684 /* finished processing ap->tpkt */ 685 clear_bit(XMIT_FULL, &ap->xmit_flags); 686 done = 1; 687 } 688 continue; 689 } 690 /* 691 * We haven't made any progress this time around. 692 * Clear XMIT_BUSY to let other callers in, but 693 * after doing so we have to check if anyone set 694 * XMIT_WAKEUP since we last checked it. If they 695 * did, we should try again to set XMIT_BUSY and go 696 * around again in case XMIT_BUSY was still set when 697 * the other caller tried. 698 */ 699 clear_bit(XMIT_BUSY, &ap->xmit_flags); 700 /* any more work to do? if not, exit the loop */ 701 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) 702 || (!tty_stuffed && ap->tpkt))) 703 break; 704 /* more work to do, see if we can do it now */ 705 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) 706 break; 707 } 708 spin_unlock_bh(&ap->xmit_lock); 709 return done; 710 711flush: 712 clear_bit(XMIT_BUSY, &ap->xmit_flags); 713 if (ap->tpkt) { 714 kfree_skb(ap->tpkt); 715 ap->tpkt = NULL; 716 clear_bit(XMIT_FULL, &ap->xmit_flags); 717 done = 1; 718 } 719 ap->optr = ap->olim; 720 spin_unlock_bh(&ap->xmit_lock); 721 return done; 722} 723 724/* 725 * Flush output from our internal buffers. 726 * Called for the TCFLSH ioctl. Can be entered in parallel 727 * but this is covered by the xmit_lock. 728 */ 729static void 730ppp_async_flush_output(struct asyncppp *ap) 731{ 732 int done = 0; 733 734 spin_lock_bh(&ap->xmit_lock); 735 ap->optr = ap->olim; 736 if (ap->tpkt != NULL) { 737 kfree_skb(ap->tpkt); 738 ap->tpkt = NULL; 739 clear_bit(XMIT_FULL, &ap->xmit_flags); 740 done = 1; 741 } 742 spin_unlock_bh(&ap->xmit_lock); 743 if (done) 744 ppp_output_wakeup(&ap->chan); 745} 746 747/* 748 * Receive-side routines. 749 */ 750 751/* see how many ordinary chars there are at the start of buf */ 752static inline int 753scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count) 754{ 755 int i, c; 756 757 for (i = 0; i < count; ++i) { 758 c = buf[i]; 759 if (c == PPP_ESCAPE || c == PPP_FLAG 760 || (c < 0x20 && (ap->raccm & (1 << c)) != 0)) 761 break; 762 } 763 return i; 764} 765 766/* called when a flag is seen - do end-of-packet processing */ 767static void 768process_input_packet(struct asyncppp *ap) 769{ 770 struct sk_buff *skb; 771 unsigned char *p; 772 unsigned int len, fcs, proto; 773 774 skb = ap->rpkt; 775 if (ap->state & (SC_TOSS | SC_ESCAPE)) 776 goto err; 777 778 if (skb == NULL) 779 return; /* 0-length packet */ 780 781 /* check the FCS */ 782 p = skb->data; 783 len = skb->len; 784 if (len < 3) 785 goto err; /* too short */ 786 fcs = PPP_INITFCS; 787 for (; len > 0; --len) 788 fcs = PPP_FCS(fcs, *p++); 789 if (fcs != PPP_GOODFCS) 790 goto err; /* bad FCS */ 791 skb_trim(skb, skb->len - 2); 792 793 /* check for address/control and protocol compression */ 794 p = skb->data; 795 if (p[0] == PPP_ALLSTATIONS) { 796 /* chop off address/control */ 797 if (p[1] != PPP_UI || skb->len < 3) 798 goto err; 799 p = skb_pull(skb, 2); 800 } 801 proto = p[0]; 802 if (proto & 1) { 803 /* protocol is compressed */ 804 skb_push(skb, 1)[0] = 0; 805 } else { 806 if (skb->len < 2) 807 goto err; 808 proto = (proto << 8) + p[1]; 809 if (proto == PPP_LCP) 810 async_lcp_peek(ap, p, skb->len, 1); 811 } 812 813 /* queue the frame to be processed */ 814 skb->cb[0] = ap->state; 815 skb_queue_tail(&ap->rqueue, skb); 816 ap->rpkt = NULL; 817 ap->state = 0; 818 return; 819 820 err: 821 /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */ 822 ap->state = SC_PREV_ERROR; 823 if (skb) { 824 /* make skb appear as freshly allocated */ 825 skb_trim(skb, 0); 826 skb_reserve(skb, - skb_headroom(skb)); 827 } 828} 829 830/* Called when the tty driver has data for us. Runs parallel with the 831 other ldisc functions but will not be re-entered */ 832 833static void 834ppp_async_input(struct asyncppp *ap, const unsigned char *buf, 835 char *flags, int count) 836{ 837 struct sk_buff *skb; 838 int c, i, j, n, s, f; 839 unsigned char *sp; 840 841 /* update bits used for 8-bit cleanness detection */ 842 if (~ap->rbits & SC_RCV_BITS) { 843 s = 0; 844 for (i = 0; i < count; ++i) { 845 c = buf[i]; 846 if (flags && flags[i] != 0) 847 continue; 848 s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0; 849 c = ((c >> 4) ^ c) & 0xf; 850 s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP; 851 } 852 ap->rbits |= s; 853 } 854 855 while (count > 0) { 856 /* scan through and see how many chars we can do in bulk */ 857 if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE) 858 n = 1; 859 else 860 n = scan_ordinary(ap, buf, count); 861 862 f = 0; 863 if (flags && (ap->state & SC_TOSS) == 0) { 864 /* check the flags to see if any char had an error */ 865 for (j = 0; j < n; ++j) 866 if ((f = flags[j]) != 0) 867 break; 868 } 869 if (f != 0) { 870 /* start tossing */ 871 ap->state |= SC_TOSS; 872 873 } else if (n > 0 && (ap->state & SC_TOSS) == 0) { 874 /* stuff the chars in the skb */ 875 skb = ap->rpkt; 876 if (!skb) { 877 skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); 878 if (!skb) 879 goto nomem; 880 ap->rpkt = skb; 881 } 882 if (skb->len == 0) { 883 /* Try to get the payload 4-byte aligned. 884 * This should match the 885 * PPP_ALLSTATIONS/PPP_UI/compressed tests in 886 * process_input_packet, but we do not have 887 * enough chars here to test buf[1] and buf[2]. 888 */ 889 if (buf[0] != PPP_ALLSTATIONS) 890 skb_reserve(skb, 2 + (buf[0] & 1)); 891 } 892 if (n > skb_tailroom(skb)) { 893 /* packet overflowed MRU */ 894 ap->state |= SC_TOSS; 895 } else { 896 sp = skb_put(skb, n); 897 memcpy(sp, buf, n); 898 if (ap->state & SC_ESCAPE) { 899 sp[0] ^= 0x20; 900 ap->state &= ~SC_ESCAPE; 901 } 902 } 903 } 904 905 if (n >= count) 906 break; 907 908 c = buf[n]; 909 if (flags != NULL && flags[n] != 0) { 910 ap->state |= SC_TOSS; 911 } else if (c == PPP_FLAG) { 912 process_input_packet(ap); 913 } else if (c == PPP_ESCAPE) { 914 ap->state |= SC_ESCAPE; 915 } else if (I_IXON(ap->tty)) { 916 if (c == START_CHAR(ap->tty)) 917 start_tty(ap->tty); 918 else if (c == STOP_CHAR(ap->tty)) 919 stop_tty(ap->tty); 920 } 921 /* otherwise it's a char in the recv ACCM */ 922 ++n; 923 924 buf += n; 925 if (flags) 926 flags += n; 927 count -= n; 928 } 929 return; 930 931 nomem: 932 printk(KERN_ERR "PPPasync: no memory (input pkt)\n"); 933 ap->state |= SC_TOSS; 934} 935 936/* 937 * We look at LCP frames going past so that we can notice 938 * and react to the LCP configure-ack from the peer. 939 * In the situation where the peer has been sent a configure-ack 940 * already, LCP is up once it has sent its configure-ack 941 * so the immediately following packet can be sent with the 942 * configured LCP options. This allows us to process the following 943 * packet correctly without pppd needing to respond quickly. 944 * 945 * We only respond to the received configure-ack if we have just 946 * sent a configure-request, and the configure-ack contains the 947 * same data (this is checked using a 16-bit crc of the data). 948 */ 949#define CONFREQ 1 /* LCP code field values */ 950#define CONFACK 2 951#define LCP_MRU 1 /* LCP option numbers */ 952#define LCP_ASYNCMAP 2 953 954static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, 955 int len, int inbound) 956{ 957 int dlen, fcs, i, code; 958 u32 val; 959 960 data += 2; /* skip protocol bytes */ 961 len -= 2; 962 if (len < 4) /* 4 = code, ID, length */ 963 return; 964 code = data[0]; 965 if (code != CONFACK && code != CONFREQ) 966 return; 967 dlen = (data[2] << 8) + data[3]; 968 if (len < dlen) 969 return; /* packet got truncated or length is bogus */ 970 971 if (code == (inbound? CONFACK: CONFREQ)) { 972 /* 973 * sent confreq or received confack: 974 * calculate the crc of the data from the ID field on. 975 */ 976 fcs = PPP_INITFCS; 977 for (i = 1; i < dlen; ++i) 978 fcs = PPP_FCS(fcs, data[i]); 979 980 if (!inbound) { 981 /* outbound confreq - remember the crc for later */ 982 ap->lcp_fcs = fcs; 983 return; 984 } 985 986 /* received confack, check the crc */ 987 fcs ^= ap->lcp_fcs; 988 ap->lcp_fcs = -1; 989 if (fcs != 0) 990 return; 991 } else if (inbound) 992 return; /* not interested in received confreq */ 993 994 /* process the options in the confack */ 995 data += 4; 996 dlen -= 4; 997 /* data[0] is code, data[1] is length */ 998 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { 999 switch (data[0]) { 1000 case LCP_MRU: 1001 val = (data[2] << 8) + data[3]; 1002 if (inbound) 1003 ap->mru = val; 1004 else 1005 ap->chan.mtu = val; 1006 break; 1007 case LCP_ASYNCMAP: 1008 val = (data[2] << 24) + (data[3] << 16) 1009 + (data[4] << 8) + data[5]; 1010 if (inbound) 1011 ap->raccm = val; 1012 else 1013 ap->xaccm[0] = val; 1014 break; 1015 } 1016 dlen -= data[1]; 1017 data += data[1]; 1018 } 1019} 1020 1021static void __exit ppp_async_cleanup(void) 1022{ 1023 if (tty_unregister_ldisc(N_PPP) != 0) 1024 printk(KERN_ERR "failed to unregister PPP line discipline\n"); 1025} 1026 1027module_init(ppp_async_init); 1028module_exit(ppp_async_cleanup);