at v2.6.24-rc2 1037 lines 24 kB view raw
1/* 2 * PPP async serial channel driver for Linux. 3 * 4 * Copyright 1999 Paul Mackerras. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * This driver provides the encapsulation and framing for sending 12 * and receiving PPP frames over async serial lines. It relies on 13 * the generic PPP layer to give it frames to send and to process 14 * received frames. It implements the PPP line discipline. 15 * 16 * Part of the code in this driver was inspired by the old async-only 17 * PPP driver, written by Michael Callahan and Al Longyear, and 18 * subsequently hacked by Paul Mackerras. 19 */ 20 21#include <linux/module.h> 22#include <linux/kernel.h> 23#include <linux/skbuff.h> 24#include <linux/tty.h> 25#include <linux/netdevice.h> 26#include <linux/poll.h> 27#include <linux/crc-ccitt.h> 28#include <linux/ppp_defs.h> 29#include <linux/if_ppp.h> 30#include <linux/ppp_channel.h> 31#include <linux/spinlock.h> 32#include <linux/init.h> 33#include <linux/jiffies.h> 34#include <asm/uaccess.h> 35#include <asm/string.h> 36 37#define PPP_VERSION "2.4.2" 38 39#define OBUFSIZE 256 40 41/* Structure for storing local state. */ 42struct asyncppp { 43 struct tty_struct *tty; 44 unsigned int flags; 45 unsigned int state; 46 unsigned int rbits; 47 int mru; 48 spinlock_t xmit_lock; 49 spinlock_t recv_lock; 50 unsigned long xmit_flags; 51 u32 xaccm[8]; 52 u32 raccm; 53 unsigned int bytes_sent; 54 unsigned int bytes_rcvd; 55 56 struct sk_buff *tpkt; 57 int tpkt_pos; 58 u16 tfcs; 59 unsigned char *optr; 60 unsigned char *olim; 61 unsigned long last_xmit; 62 63 struct sk_buff *rpkt; 64 int lcp_fcs; 65 struct sk_buff_head rqueue; 66 67 struct tasklet_struct tsk; 68 69 atomic_t refcnt; 70 struct semaphore dead_sem; 71 struct ppp_channel chan; /* interface to generic ppp layer */ 72 unsigned char obuf[OBUFSIZE]; 73}; 74 75/* Bit numbers in xmit_flags */ 76#define XMIT_WAKEUP 0 77#define XMIT_FULL 1 78#define XMIT_BUSY 2 79 80/* State bits */ 81#define SC_TOSS 1 82#define SC_ESCAPE 2 83#define SC_PREV_ERROR 4 84 85/* Bits in rbits */ 86#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP) 87 88static int flag_time = HZ; 89module_param(flag_time, int, 0); 90MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)"); 91MODULE_LICENSE("GPL"); 92MODULE_ALIAS_LDISC(N_PPP); 93 94/* 95 * Prototypes. 96 */ 97static int ppp_async_encode(struct asyncppp *ap); 98static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb); 99static int ppp_async_push(struct asyncppp *ap); 100static void ppp_async_flush_output(struct asyncppp *ap); 101static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf, 102 char *flags, int count); 103static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, 104 unsigned long arg); 105static void ppp_async_process(unsigned long arg); 106 107static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, 108 int len, int inbound); 109 110static struct ppp_channel_ops async_ops = { 111 ppp_async_send, 112 ppp_async_ioctl 113}; 114 115/* 116 * Routines implementing the PPP line discipline. 117 */ 118 119/* 120 * We have a potential race on dereferencing tty->disc_data, 121 * because the tty layer provides no locking at all - thus one 122 * cpu could be running ppp_asynctty_receive while another 123 * calls ppp_asynctty_close, which zeroes tty->disc_data and 124 * frees the memory that ppp_asynctty_receive is using. The best 125 * way to fix this is to use a rwlock in the tty struct, but for now 126 * we use a single global rwlock for all ttys in ppp line discipline. 127 * 128 * FIXME: this is no longer true. The _close path for the ldisc is 129 * now guaranteed to be sane. 130 */ 131static DEFINE_RWLOCK(disc_data_lock); 132 133static struct asyncppp *ap_get(struct tty_struct *tty) 134{ 135 struct asyncppp *ap; 136 137 read_lock(&disc_data_lock); 138 ap = tty->disc_data; 139 if (ap != NULL) 140 atomic_inc(&ap->refcnt); 141 read_unlock(&disc_data_lock); 142 return ap; 143} 144 145static void ap_put(struct asyncppp *ap) 146{ 147 if (atomic_dec_and_test(&ap->refcnt)) 148 up(&ap->dead_sem); 149} 150 151/* 152 * Called when a tty is put into PPP line discipline. Called in process 153 * context. 154 */ 155static int 156ppp_asynctty_open(struct tty_struct *tty) 157{ 158 struct asyncppp *ap; 159 int err; 160 161 err = -ENOMEM; 162 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 163 if (ap == 0) 164 goto out; 165 166 /* initialize the asyncppp structure */ 167 ap->tty = tty; 168 ap->mru = PPP_MRU; 169 spin_lock_init(&ap->xmit_lock); 170 spin_lock_init(&ap->recv_lock); 171 ap->xaccm[0] = ~0U; 172 ap->xaccm[3] = 0x60000000U; 173 ap->raccm = ~0U; 174 ap->optr = ap->obuf; 175 ap->olim = ap->obuf; 176 ap->lcp_fcs = -1; 177 178 skb_queue_head_init(&ap->rqueue); 179 tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap); 180 181 atomic_set(&ap->refcnt, 1); 182 init_MUTEX_LOCKED(&ap->dead_sem); 183 184 ap->chan.private = ap; 185 ap->chan.ops = &async_ops; 186 ap->chan.mtu = PPP_MRU; 187 err = ppp_register_channel(&ap->chan); 188 if (err) 189 goto out_free; 190 191 tty->disc_data = ap; 192 tty->receive_room = 65536; 193 return 0; 194 195 out_free: 196 kfree(ap); 197 out: 198 return err; 199} 200 201/* 202 * Called when the tty is put into another line discipline 203 * or it hangs up. We have to wait for any cpu currently 204 * executing in any of the other ppp_asynctty_* routines to 205 * finish before we can call ppp_unregister_channel and free 206 * the asyncppp struct. This routine must be called from 207 * process context, not interrupt or softirq context. 208 */ 209static void 210ppp_asynctty_close(struct tty_struct *tty) 211{ 212 struct asyncppp *ap; 213 214 write_lock_irq(&disc_data_lock); 215 ap = tty->disc_data; 216 tty->disc_data = NULL; 217 write_unlock_irq(&disc_data_lock); 218 if (ap == 0) 219 return; 220 221 /* 222 * We have now ensured that nobody can start using ap from now 223 * on, but we have to wait for all existing users to finish. 224 * Note that ppp_unregister_channel ensures that no calls to 225 * our channel ops (i.e. ppp_async_send/ioctl) are in progress 226 * by the time it returns. 227 */ 228 if (!atomic_dec_and_test(&ap->refcnt)) 229 down(&ap->dead_sem); 230 tasklet_kill(&ap->tsk); 231 232 ppp_unregister_channel(&ap->chan); 233 if (ap->rpkt != 0) 234 kfree_skb(ap->rpkt); 235 skb_queue_purge(&ap->rqueue); 236 if (ap->tpkt != 0) 237 kfree_skb(ap->tpkt); 238 kfree(ap); 239} 240 241/* 242 * Called on tty hangup in process context. 243 * 244 * Wait for I/O to driver to complete and unregister PPP channel. 245 * This is already done by the close routine, so just call that. 246 */ 247static int ppp_asynctty_hangup(struct tty_struct *tty) 248{ 249 ppp_asynctty_close(tty); 250 return 0; 251} 252 253/* 254 * Read does nothing - no data is ever available this way. 255 * Pppd reads and writes packets via /dev/ppp instead. 256 */ 257static ssize_t 258ppp_asynctty_read(struct tty_struct *tty, struct file *file, 259 unsigned char __user *buf, size_t count) 260{ 261 return -EAGAIN; 262} 263 264/* 265 * Write on the tty does nothing, the packets all come in 266 * from the ppp generic stuff. 267 */ 268static ssize_t 269ppp_asynctty_write(struct tty_struct *tty, struct file *file, 270 const unsigned char *buf, size_t count) 271{ 272 return -EAGAIN; 273} 274 275/* 276 * Called in process context only. May be re-entered by multiple 277 * ioctl calling threads. 278 */ 279 280static int 281ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file, 282 unsigned int cmd, unsigned long arg) 283{ 284 struct asyncppp *ap = ap_get(tty); 285 int err, val; 286 int __user *p = (int __user *)arg; 287 288 if (ap == 0) 289 return -ENXIO; 290 err = -EFAULT; 291 switch (cmd) { 292 case PPPIOCGCHAN: 293 err = -ENXIO; 294 if (ap == 0) 295 break; 296 err = -EFAULT; 297 if (put_user(ppp_channel_index(&ap->chan), p)) 298 break; 299 err = 0; 300 break; 301 302 case PPPIOCGUNIT: 303 err = -ENXIO; 304 if (ap == 0) 305 break; 306 err = -EFAULT; 307 if (put_user(ppp_unit_number(&ap->chan), p)) 308 break; 309 err = 0; 310 break; 311 312 case TCGETS: 313 case TCGETA: 314 err = n_tty_ioctl(tty, file, cmd, arg); 315 break; 316 317 case TCFLSH: 318 /* flush our buffers and the serial port's buffer */ 319 if (arg == TCIOFLUSH || arg == TCOFLUSH) 320 ppp_async_flush_output(ap); 321 err = n_tty_ioctl(tty, file, cmd, arg); 322 break; 323 324 case FIONREAD: 325 val = 0; 326 if (put_user(val, p)) 327 break; 328 err = 0; 329 break; 330 331 default: 332 err = -ENOIOCTLCMD; 333 } 334 335 ap_put(ap); 336 return err; 337} 338 339/* No kernel lock - fine */ 340static unsigned int 341ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait) 342{ 343 return 0; 344} 345 346/* 347 * This can now be called from hard interrupt level as well 348 * as soft interrupt level or mainline. 349 */ 350static void 351ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, 352 char *cflags, int count) 353{ 354 struct asyncppp *ap = ap_get(tty); 355 unsigned long flags; 356 357 if (ap == 0) 358 return; 359 spin_lock_irqsave(&ap->recv_lock, flags); 360 ppp_async_input(ap, buf, cflags, count); 361 spin_unlock_irqrestore(&ap->recv_lock, flags); 362 if (!skb_queue_empty(&ap->rqueue)) 363 tasklet_schedule(&ap->tsk); 364 ap_put(ap); 365 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) 366 && tty->driver->unthrottle) 367 tty->driver->unthrottle(tty); 368} 369 370static void 371ppp_asynctty_wakeup(struct tty_struct *tty) 372{ 373 struct asyncppp *ap = ap_get(tty); 374 375 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 376 if (ap == 0) 377 return; 378 set_bit(XMIT_WAKEUP, &ap->xmit_flags); 379 tasklet_schedule(&ap->tsk); 380 ap_put(ap); 381} 382 383 384static struct tty_ldisc ppp_ldisc = { 385 .owner = THIS_MODULE, 386 .magic = TTY_LDISC_MAGIC, 387 .name = "ppp", 388 .open = ppp_asynctty_open, 389 .close = ppp_asynctty_close, 390 .hangup = ppp_asynctty_hangup, 391 .read = ppp_asynctty_read, 392 .write = ppp_asynctty_write, 393 .ioctl = ppp_asynctty_ioctl, 394 .poll = ppp_asynctty_poll, 395 .receive_buf = ppp_asynctty_receive, 396 .write_wakeup = ppp_asynctty_wakeup, 397}; 398 399static int __init 400ppp_async_init(void) 401{ 402 int err; 403 404 err = tty_register_ldisc(N_PPP, &ppp_ldisc); 405 if (err != 0) 406 printk(KERN_ERR "PPP_async: error %d registering line disc.\n", 407 err); 408 return err; 409} 410 411/* 412 * The following routines provide the PPP channel interface. 413 */ 414static int 415ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) 416{ 417 struct asyncppp *ap = chan->private; 418 void __user *argp = (void __user *)arg; 419 int __user *p = argp; 420 int err, val; 421 u32 accm[8]; 422 423 err = -EFAULT; 424 switch (cmd) { 425 case PPPIOCGFLAGS: 426 val = ap->flags | ap->rbits; 427 if (put_user(val, p)) 428 break; 429 err = 0; 430 break; 431 case PPPIOCSFLAGS: 432 if (get_user(val, p)) 433 break; 434 ap->flags = val & ~SC_RCV_BITS; 435 spin_lock_irq(&ap->recv_lock); 436 ap->rbits = val & SC_RCV_BITS; 437 spin_unlock_irq(&ap->recv_lock); 438 err = 0; 439 break; 440 441 case PPPIOCGASYNCMAP: 442 if (put_user(ap->xaccm[0], (u32 __user *)argp)) 443 break; 444 err = 0; 445 break; 446 case PPPIOCSASYNCMAP: 447 if (get_user(ap->xaccm[0], (u32 __user *)argp)) 448 break; 449 err = 0; 450 break; 451 452 case PPPIOCGRASYNCMAP: 453 if (put_user(ap->raccm, (u32 __user *)argp)) 454 break; 455 err = 0; 456 break; 457 case PPPIOCSRASYNCMAP: 458 if (get_user(ap->raccm, (u32 __user *)argp)) 459 break; 460 err = 0; 461 break; 462 463 case PPPIOCGXASYNCMAP: 464 if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm))) 465 break; 466 err = 0; 467 break; 468 case PPPIOCSXASYNCMAP: 469 if (copy_from_user(accm, argp, sizeof(accm))) 470 break; 471 accm[2] &= ~0x40000000U; /* can't escape 0x5e */ 472 accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */ 473 memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); 474 err = 0; 475 break; 476 477 case PPPIOCGMRU: 478 if (put_user(ap->mru, p)) 479 break; 480 err = 0; 481 break; 482 case PPPIOCSMRU: 483 if (get_user(val, p)) 484 break; 485 if (val < PPP_MRU) 486 val = PPP_MRU; 487 ap->mru = val; 488 err = 0; 489 break; 490 491 default: 492 err = -ENOTTY; 493 } 494 495 return err; 496} 497 498/* 499 * This is called at softirq level to deliver received packets 500 * to the ppp_generic code, and to tell the ppp_generic code 501 * if we can accept more output now. 502 */ 503static void ppp_async_process(unsigned long arg) 504{ 505 struct asyncppp *ap = (struct asyncppp *) arg; 506 struct sk_buff *skb; 507 508 /* process received packets */ 509 while ((skb = skb_dequeue(&ap->rqueue)) != NULL) { 510 if (skb->cb[0]) 511 ppp_input_error(&ap->chan, 0); 512 ppp_input(&ap->chan, skb); 513 } 514 515 /* try to push more stuff out */ 516 if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap)) 517 ppp_output_wakeup(&ap->chan); 518} 519 520/* 521 * Procedures for encapsulation and framing. 522 */ 523 524/* 525 * Procedure to encode the data for async serial transmission. 526 * Does octet stuffing (escaping), puts the address/control bytes 527 * on if A/C compression is disabled, and does protocol compression. 528 * Assumes ap->tpkt != 0 on entry. 529 * Returns 1 if we finished the current frame, 0 otherwise. 530 */ 531 532#define PUT_BYTE(ap, buf, c, islcp) do { \ 533 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ 534 *buf++ = PPP_ESCAPE; \ 535 *buf++ = c ^ 0x20; \ 536 } else \ 537 *buf++ = c; \ 538} while (0) 539 540static int 541ppp_async_encode(struct asyncppp *ap) 542{ 543 int fcs, i, count, c, proto; 544 unsigned char *buf, *buflim; 545 unsigned char *data; 546 int islcp; 547 548 buf = ap->obuf; 549 ap->olim = buf; 550 ap->optr = buf; 551 i = ap->tpkt_pos; 552 data = ap->tpkt->data; 553 count = ap->tpkt->len; 554 fcs = ap->tfcs; 555 proto = (data[0] << 8) + data[1]; 556 557 /* 558 * LCP packets with code values between 1 (configure-reqest) 559 * and 7 (code-reject) must be sent as though no options 560 * had been negotiated. 561 */ 562 islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7; 563 564 if (i == 0) { 565 if (islcp) 566 async_lcp_peek(ap, data, count, 0); 567 568 /* 569 * Start of a new packet - insert the leading FLAG 570 * character if necessary. 571 */ 572 if (islcp || flag_time == 0 573 || time_after_eq(jiffies, ap->last_xmit + flag_time)) 574 *buf++ = PPP_FLAG; 575 ap->last_xmit = jiffies; 576 fcs = PPP_INITFCS; 577 578 /* 579 * Put in the address/control bytes if necessary 580 */ 581 if ((ap->flags & SC_COMP_AC) == 0 || islcp) { 582 PUT_BYTE(ap, buf, 0xff, islcp); 583 fcs = PPP_FCS(fcs, 0xff); 584 PUT_BYTE(ap, buf, 0x03, islcp); 585 fcs = PPP_FCS(fcs, 0x03); 586 } 587 } 588 589 /* 590 * Once we put in the last byte, we need to put in the FCS 591 * and closing flag, so make sure there is at least 7 bytes 592 * of free space in the output buffer. 593 */ 594 buflim = ap->obuf + OBUFSIZE - 6; 595 while (i < count && buf < buflim) { 596 c = data[i++]; 597 if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT)) 598 continue; /* compress protocol field */ 599 fcs = PPP_FCS(fcs, c); 600 PUT_BYTE(ap, buf, c, islcp); 601 } 602 603 if (i < count) { 604 /* 605 * Remember where we are up to in this packet. 606 */ 607 ap->olim = buf; 608 ap->tpkt_pos = i; 609 ap->tfcs = fcs; 610 return 0; 611 } 612 613 /* 614 * We have finished the packet. Add the FCS and flag. 615 */ 616 fcs = ~fcs; 617 c = fcs & 0xff; 618 PUT_BYTE(ap, buf, c, islcp); 619 c = (fcs >> 8) & 0xff; 620 PUT_BYTE(ap, buf, c, islcp); 621 *buf++ = PPP_FLAG; 622 ap->olim = buf; 623 624 kfree_skb(ap->tpkt); 625 ap->tpkt = NULL; 626 return 1; 627} 628 629/* 630 * Transmit-side routines. 631 */ 632 633/* 634 * Send a packet to the peer over an async tty line. 635 * Returns 1 iff the packet was accepted. 636 * If the packet was not accepted, we will call ppp_output_wakeup 637 * at some later time. 638 */ 639static int 640ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb) 641{ 642 struct asyncppp *ap = chan->private; 643 644 ppp_async_push(ap); 645 646 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags)) 647 return 0; /* already full */ 648 ap->tpkt = skb; 649 ap->tpkt_pos = 0; 650 651 ppp_async_push(ap); 652 return 1; 653} 654 655/* 656 * Push as much data as possible out to the tty. 657 */ 658static int 659ppp_async_push(struct asyncppp *ap) 660{ 661 int avail, sent, done = 0; 662 struct tty_struct *tty = ap->tty; 663 int tty_stuffed = 0; 664 665 /* 666 * We can get called recursively here if the tty write 667 * function calls our wakeup function. This can happen 668 * for example on a pty with both the master and slave 669 * set to PPP line discipline. 670 * We use the XMIT_BUSY bit to detect this and get out, 671 * leaving the XMIT_WAKEUP bit set to tell the other 672 * instance that it may now be able to write more now. 673 */ 674 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) 675 return 0; 676 spin_lock_bh(&ap->xmit_lock); 677 for (;;) { 678 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags)) 679 tty_stuffed = 0; 680 if (!tty_stuffed && ap->optr < ap->olim) { 681 avail = ap->olim - ap->optr; 682 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 683 sent = tty->driver->write(tty, ap->optr, avail); 684 if (sent < 0) 685 goto flush; /* error, e.g. loss of CD */ 686 ap->optr += sent; 687 if (sent < avail) 688 tty_stuffed = 1; 689 continue; 690 } 691 if (ap->optr >= ap->olim && ap->tpkt != 0) { 692 if (ppp_async_encode(ap)) { 693 /* finished processing ap->tpkt */ 694 clear_bit(XMIT_FULL, &ap->xmit_flags); 695 done = 1; 696 } 697 continue; 698 } 699 /* 700 * We haven't made any progress this time around. 701 * Clear XMIT_BUSY to let other callers in, but 702 * after doing so we have to check if anyone set 703 * XMIT_WAKEUP since we last checked it. If they 704 * did, we should try again to set XMIT_BUSY and go 705 * around again in case XMIT_BUSY was still set when 706 * the other caller tried. 707 */ 708 clear_bit(XMIT_BUSY, &ap->xmit_flags); 709 /* any more work to do? if not, exit the loop */ 710 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) 711 || (!tty_stuffed && ap->tpkt != 0))) 712 break; 713 /* more work to do, see if we can do it now */ 714 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) 715 break; 716 } 717 spin_unlock_bh(&ap->xmit_lock); 718 return done; 719 720flush: 721 clear_bit(XMIT_BUSY, &ap->xmit_flags); 722 if (ap->tpkt != 0) { 723 kfree_skb(ap->tpkt); 724 ap->tpkt = NULL; 725 clear_bit(XMIT_FULL, &ap->xmit_flags); 726 done = 1; 727 } 728 ap->optr = ap->olim; 729 spin_unlock_bh(&ap->xmit_lock); 730 return done; 731} 732 733/* 734 * Flush output from our internal buffers. 735 * Called for the TCFLSH ioctl. Can be entered in parallel 736 * but this is covered by the xmit_lock. 737 */ 738static void 739ppp_async_flush_output(struct asyncppp *ap) 740{ 741 int done = 0; 742 743 spin_lock_bh(&ap->xmit_lock); 744 ap->optr = ap->olim; 745 if (ap->tpkt != NULL) { 746 kfree_skb(ap->tpkt); 747 ap->tpkt = NULL; 748 clear_bit(XMIT_FULL, &ap->xmit_flags); 749 done = 1; 750 } 751 spin_unlock_bh(&ap->xmit_lock); 752 if (done) 753 ppp_output_wakeup(&ap->chan); 754} 755 756/* 757 * Receive-side routines. 758 */ 759 760/* see how many ordinary chars there are at the start of buf */ 761static inline int 762scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count) 763{ 764 int i, c; 765 766 for (i = 0; i < count; ++i) { 767 c = buf[i]; 768 if (c == PPP_ESCAPE || c == PPP_FLAG 769 || (c < 0x20 && (ap->raccm & (1 << c)) != 0)) 770 break; 771 } 772 return i; 773} 774 775/* called when a flag is seen - do end-of-packet processing */ 776static void 777process_input_packet(struct asyncppp *ap) 778{ 779 struct sk_buff *skb; 780 unsigned char *p; 781 unsigned int len, fcs, proto; 782 783 skb = ap->rpkt; 784 if (ap->state & (SC_TOSS | SC_ESCAPE)) 785 goto err; 786 787 if (skb == NULL) 788 return; /* 0-length packet */ 789 790 /* check the FCS */ 791 p = skb->data; 792 len = skb->len; 793 if (len < 3) 794 goto err; /* too short */ 795 fcs = PPP_INITFCS; 796 for (; len > 0; --len) 797 fcs = PPP_FCS(fcs, *p++); 798 if (fcs != PPP_GOODFCS) 799 goto err; /* bad FCS */ 800 skb_trim(skb, skb->len - 2); 801 802 /* check for address/control and protocol compression */ 803 p = skb->data; 804 if (p[0] == PPP_ALLSTATIONS) { 805 /* chop off address/control */ 806 if (p[1] != PPP_UI || skb->len < 3) 807 goto err; 808 p = skb_pull(skb, 2); 809 } 810 proto = p[0]; 811 if (proto & 1) { 812 /* protocol is compressed */ 813 skb_push(skb, 1)[0] = 0; 814 } else { 815 if (skb->len < 2) 816 goto err; 817 proto = (proto << 8) + p[1]; 818 if (proto == PPP_LCP) 819 async_lcp_peek(ap, p, skb->len, 1); 820 } 821 822 /* queue the frame to be processed */ 823 skb->cb[0] = ap->state; 824 skb_queue_tail(&ap->rqueue, skb); 825 ap->rpkt = NULL; 826 ap->state = 0; 827 return; 828 829 err: 830 /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */ 831 ap->state = SC_PREV_ERROR; 832 if (skb) { 833 /* make skb appear as freshly allocated */ 834 skb_trim(skb, 0); 835 skb_reserve(skb, - skb_headroom(skb)); 836 } 837} 838 839/* Called when the tty driver has data for us. Runs parallel with the 840 other ldisc functions but will not be re-entered */ 841 842static void 843ppp_async_input(struct asyncppp *ap, const unsigned char *buf, 844 char *flags, int count) 845{ 846 struct sk_buff *skb; 847 int c, i, j, n, s, f; 848 unsigned char *sp; 849 850 /* update bits used for 8-bit cleanness detection */ 851 if (~ap->rbits & SC_RCV_BITS) { 852 s = 0; 853 for (i = 0; i < count; ++i) { 854 c = buf[i]; 855 if (flags != 0 && flags[i] != 0) 856 continue; 857 s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0; 858 c = ((c >> 4) ^ c) & 0xf; 859 s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP; 860 } 861 ap->rbits |= s; 862 } 863 864 while (count > 0) { 865 /* scan through and see how many chars we can do in bulk */ 866 if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE) 867 n = 1; 868 else 869 n = scan_ordinary(ap, buf, count); 870 871 f = 0; 872 if (flags != 0 && (ap->state & SC_TOSS) == 0) { 873 /* check the flags to see if any char had an error */ 874 for (j = 0; j < n; ++j) 875 if ((f = flags[j]) != 0) 876 break; 877 } 878 if (f != 0) { 879 /* start tossing */ 880 ap->state |= SC_TOSS; 881 882 } else if (n > 0 && (ap->state & SC_TOSS) == 0) { 883 /* stuff the chars in the skb */ 884 skb = ap->rpkt; 885 if (skb == 0) { 886 skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); 887 if (skb == 0) 888 goto nomem; 889 ap->rpkt = skb; 890 } 891 if (skb->len == 0) { 892 /* Try to get the payload 4-byte aligned. 893 * This should match the 894 * PPP_ALLSTATIONS/PPP_UI/compressed tests in 895 * process_input_packet, but we do not have 896 * enough chars here to test buf[1] and buf[2]. 897 */ 898 if (buf[0] != PPP_ALLSTATIONS) 899 skb_reserve(skb, 2 + (buf[0] & 1)); 900 } 901 if (n > skb_tailroom(skb)) { 902 /* packet overflowed MRU */ 903 ap->state |= SC_TOSS; 904 } else { 905 sp = skb_put(skb, n); 906 memcpy(sp, buf, n); 907 if (ap->state & SC_ESCAPE) { 908 sp[0] ^= 0x20; 909 ap->state &= ~SC_ESCAPE; 910 } 911 } 912 } 913 914 if (n >= count) 915 break; 916 917 c = buf[n]; 918 if (flags != NULL && flags[n] != 0) { 919 ap->state |= SC_TOSS; 920 } else if (c == PPP_FLAG) { 921 process_input_packet(ap); 922 } else if (c == PPP_ESCAPE) { 923 ap->state |= SC_ESCAPE; 924 } else if (I_IXON(ap->tty)) { 925 if (c == START_CHAR(ap->tty)) 926 start_tty(ap->tty); 927 else if (c == STOP_CHAR(ap->tty)) 928 stop_tty(ap->tty); 929 } 930 /* otherwise it's a char in the recv ACCM */ 931 ++n; 932 933 buf += n; 934 if (flags != 0) 935 flags += n; 936 count -= n; 937 } 938 return; 939 940 nomem: 941 printk(KERN_ERR "PPPasync: no memory (input pkt)\n"); 942 ap->state |= SC_TOSS; 943} 944 945/* 946 * We look at LCP frames going past so that we can notice 947 * and react to the LCP configure-ack from the peer. 948 * In the situation where the peer has been sent a configure-ack 949 * already, LCP is up once it has sent its configure-ack 950 * so the immediately following packet can be sent with the 951 * configured LCP options. This allows us to process the following 952 * packet correctly without pppd needing to respond quickly. 953 * 954 * We only respond to the received configure-ack if we have just 955 * sent a configure-request, and the configure-ack contains the 956 * same data (this is checked using a 16-bit crc of the data). 957 */ 958#define CONFREQ 1 /* LCP code field values */ 959#define CONFACK 2 960#define LCP_MRU 1 /* LCP option numbers */ 961#define LCP_ASYNCMAP 2 962 963static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, 964 int len, int inbound) 965{ 966 int dlen, fcs, i, code; 967 u32 val; 968 969 data += 2; /* skip protocol bytes */ 970 len -= 2; 971 if (len < 4) /* 4 = code, ID, length */ 972 return; 973 code = data[0]; 974 if (code != CONFACK && code != CONFREQ) 975 return; 976 dlen = (data[2] << 8) + data[3]; 977 if (len < dlen) 978 return; /* packet got truncated or length is bogus */ 979 980 if (code == (inbound? CONFACK: CONFREQ)) { 981 /* 982 * sent confreq or received confack: 983 * calculate the crc of the data from the ID field on. 984 */ 985 fcs = PPP_INITFCS; 986 for (i = 1; i < dlen; ++i) 987 fcs = PPP_FCS(fcs, data[i]); 988 989 if (!inbound) { 990 /* outbound confreq - remember the crc for later */ 991 ap->lcp_fcs = fcs; 992 return; 993 } 994 995 /* received confack, check the crc */ 996 fcs ^= ap->lcp_fcs; 997 ap->lcp_fcs = -1; 998 if (fcs != 0) 999 return; 1000 } else if (inbound) 1001 return; /* not interested in received confreq */ 1002 1003 /* process the options in the confack */ 1004 data += 4; 1005 dlen -= 4; 1006 /* data[0] is code, data[1] is length */ 1007 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { 1008 switch (data[0]) { 1009 case LCP_MRU: 1010 val = (data[2] << 8) + data[3]; 1011 if (inbound) 1012 ap->mru = val; 1013 else 1014 ap->chan.mtu = val; 1015 break; 1016 case LCP_ASYNCMAP: 1017 val = (data[2] << 24) + (data[3] << 16) 1018 + (data[4] << 8) + data[5]; 1019 if (inbound) 1020 ap->raccm = val; 1021 else 1022 ap->xaccm[0] = val; 1023 break; 1024 } 1025 dlen -= data[1]; 1026 data += data[1]; 1027 } 1028} 1029 1030static void __exit ppp_async_cleanup(void) 1031{ 1032 if (tty_unregister_ldisc(N_PPP) != 0) 1033 printk(KERN_ERR "failed to unregister PPP line discipline\n"); 1034} 1035 1036module_init(ppp_async_init); 1037module_exit(ppp_async_cleanup);