at v2.6.17-rc2 1038 lines 24 kB view raw
1/* 2 * PPP async serial channel driver for Linux. 3 * 4 * Copyright 1999 Paul Mackerras. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * This driver provides the encapsulation and framing for sending 12 * and receiving PPP frames over async serial lines. It relies on 13 * the generic PPP layer to give it frames to send and to process 14 * received frames. It implements the PPP line discipline. 15 * 16 * Part of the code in this driver was inspired by the old async-only 17 * PPP driver, written by Michael Callahan and Al Longyear, and 18 * subsequently hacked by Paul Mackerras. 19 */ 20 21#include <linux/module.h> 22#include <linux/kernel.h> 23#include <linux/skbuff.h> 24#include <linux/tty.h> 25#include <linux/netdevice.h> 26#include <linux/poll.h> 27#include <linux/crc-ccitt.h> 28#include <linux/ppp_defs.h> 29#include <linux/if_ppp.h> 30#include <linux/ppp_channel.h> 31#include <linux/spinlock.h> 32#include <linux/init.h> 33#include <linux/jiffies.h> 34#include <asm/uaccess.h> 35#include <asm/string.h> 36 37#define PPP_VERSION "2.4.2" 38 39#define OBUFSIZE 256 40 41/* Structure for storing local state. */ 42struct asyncppp { 43 struct tty_struct *tty; 44 unsigned int flags; 45 unsigned int state; 46 unsigned int rbits; 47 int mru; 48 spinlock_t xmit_lock; 49 spinlock_t recv_lock; 50 unsigned long xmit_flags; 51 u32 xaccm[8]; 52 u32 raccm; 53 unsigned int bytes_sent; 54 unsigned int bytes_rcvd; 55 56 struct sk_buff *tpkt; 57 int tpkt_pos; 58 u16 tfcs; 59 unsigned char *optr; 60 unsigned char *olim; 61 unsigned long last_xmit; 62 63 struct sk_buff *rpkt; 64 int lcp_fcs; 65 struct sk_buff_head rqueue; 66 67 struct tasklet_struct tsk; 68 69 atomic_t refcnt; 70 struct semaphore dead_sem; 71 struct ppp_channel chan; /* interface to generic ppp layer */ 72 unsigned char obuf[OBUFSIZE]; 73}; 74 75/* Bit numbers in xmit_flags */ 76#define XMIT_WAKEUP 0 77#define XMIT_FULL 1 78#define XMIT_BUSY 2 79 80/* State bits */ 81#define SC_TOSS 1 82#define SC_ESCAPE 2 83#define SC_PREV_ERROR 4 84 85/* Bits in rbits */ 86#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP) 87 88static int flag_time = HZ; 89module_param(flag_time, int, 0); 90MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)"); 91MODULE_LICENSE("GPL"); 92MODULE_ALIAS_LDISC(N_PPP); 93 94/* 95 * Prototypes. 96 */ 97static int ppp_async_encode(struct asyncppp *ap); 98static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb); 99static int ppp_async_push(struct asyncppp *ap); 100static void ppp_async_flush_output(struct asyncppp *ap); 101static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf, 102 char *flags, int count); 103static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, 104 unsigned long arg); 105static void ppp_async_process(unsigned long arg); 106 107static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, 108 int len, int inbound); 109 110static struct ppp_channel_ops async_ops = { 111 ppp_async_send, 112 ppp_async_ioctl 113}; 114 115/* 116 * Routines implementing the PPP line discipline. 117 */ 118 119/* 120 * We have a potential race on dereferencing tty->disc_data, 121 * because the tty layer provides no locking at all - thus one 122 * cpu could be running ppp_asynctty_receive while another 123 * calls ppp_asynctty_close, which zeroes tty->disc_data and 124 * frees the memory that ppp_asynctty_receive is using. The best 125 * way to fix this is to use a rwlock in the tty struct, but for now 126 * we use a single global rwlock for all ttys in ppp line discipline. 127 * 128 * FIXME: this is no longer true. The _close path for the ldisc is 129 * now guaranteed to be sane. 130 */ 131static DEFINE_RWLOCK(disc_data_lock); 132 133static struct asyncppp *ap_get(struct tty_struct *tty) 134{ 135 struct asyncppp *ap; 136 137 read_lock(&disc_data_lock); 138 ap = tty->disc_data; 139 if (ap != NULL) 140 atomic_inc(&ap->refcnt); 141 read_unlock(&disc_data_lock); 142 return ap; 143} 144 145static void ap_put(struct asyncppp *ap) 146{ 147 if (atomic_dec_and_test(&ap->refcnt)) 148 up(&ap->dead_sem); 149} 150 151/* 152 * Called when a tty is put into PPP line discipline. Called in process 153 * context. 154 */ 155static int 156ppp_asynctty_open(struct tty_struct *tty) 157{ 158 struct asyncppp *ap; 159 int err; 160 161 err = -ENOMEM; 162 ap = kmalloc(sizeof(*ap), GFP_KERNEL); 163 if (ap == 0) 164 goto out; 165 166 /* initialize the asyncppp structure */ 167 memset(ap, 0, sizeof(*ap)); 168 ap->tty = tty; 169 ap->mru = PPP_MRU; 170 spin_lock_init(&ap->xmit_lock); 171 spin_lock_init(&ap->recv_lock); 172 ap->xaccm[0] = ~0U; 173 ap->xaccm[3] = 0x60000000U; 174 ap->raccm = ~0U; 175 ap->optr = ap->obuf; 176 ap->olim = ap->obuf; 177 ap->lcp_fcs = -1; 178 179 skb_queue_head_init(&ap->rqueue); 180 tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap); 181 182 atomic_set(&ap->refcnt, 1); 183 init_MUTEX_LOCKED(&ap->dead_sem); 184 185 ap->chan.private = ap; 186 ap->chan.ops = &async_ops; 187 ap->chan.mtu = PPP_MRU; 188 err = ppp_register_channel(&ap->chan); 189 if (err) 190 goto out_free; 191 192 tty->disc_data = ap; 193 tty->receive_room = 65536; 194 return 0; 195 196 out_free: 197 kfree(ap); 198 out: 199 return err; 200} 201 202/* 203 * Called when the tty is put into another line discipline 204 * or it hangs up. We have to wait for any cpu currently 205 * executing in any of the other ppp_asynctty_* routines to 206 * finish before we can call ppp_unregister_channel and free 207 * the asyncppp struct. This routine must be called from 208 * process context, not interrupt or softirq context. 209 */ 210static void 211ppp_asynctty_close(struct tty_struct *tty) 212{ 213 struct asyncppp *ap; 214 215 write_lock_irq(&disc_data_lock); 216 ap = tty->disc_data; 217 tty->disc_data = NULL; 218 write_unlock_irq(&disc_data_lock); 219 if (ap == 0) 220 return; 221 222 /* 223 * We have now ensured that nobody can start using ap from now 224 * on, but we have to wait for all existing users to finish. 225 * Note that ppp_unregister_channel ensures that no calls to 226 * our channel ops (i.e. ppp_async_send/ioctl) are in progress 227 * by the time it returns. 228 */ 229 if (!atomic_dec_and_test(&ap->refcnt)) 230 down(&ap->dead_sem); 231 tasklet_kill(&ap->tsk); 232 233 ppp_unregister_channel(&ap->chan); 234 if (ap->rpkt != 0) 235 kfree_skb(ap->rpkt); 236 skb_queue_purge(&ap->rqueue); 237 if (ap->tpkt != 0) 238 kfree_skb(ap->tpkt); 239 kfree(ap); 240} 241 242/* 243 * Called on tty hangup in process context. 244 * 245 * Wait for I/O to driver to complete and unregister PPP channel. 246 * This is already done by the close routine, so just call that. 247 */ 248static int ppp_asynctty_hangup(struct tty_struct *tty) 249{ 250 ppp_asynctty_close(tty); 251 return 0; 252} 253 254/* 255 * Read does nothing - no data is ever available this way. 256 * Pppd reads and writes packets via /dev/ppp instead. 257 */ 258static ssize_t 259ppp_asynctty_read(struct tty_struct *tty, struct file *file, 260 unsigned char __user *buf, size_t count) 261{ 262 return -EAGAIN; 263} 264 265/* 266 * Write on the tty does nothing, the packets all come in 267 * from the ppp generic stuff. 268 */ 269static ssize_t 270ppp_asynctty_write(struct tty_struct *tty, struct file *file, 271 const unsigned char *buf, size_t count) 272{ 273 return -EAGAIN; 274} 275 276/* 277 * Called in process context only. May be re-entered by multiple 278 * ioctl calling threads. 279 */ 280 281static int 282ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file, 283 unsigned int cmd, unsigned long arg) 284{ 285 struct asyncppp *ap = ap_get(tty); 286 int err, val; 287 int __user *p = (int __user *)arg; 288 289 if (ap == 0) 290 return -ENXIO; 291 err = -EFAULT; 292 switch (cmd) { 293 case PPPIOCGCHAN: 294 err = -ENXIO; 295 if (ap == 0) 296 break; 297 err = -EFAULT; 298 if (put_user(ppp_channel_index(&ap->chan), p)) 299 break; 300 err = 0; 301 break; 302 303 case PPPIOCGUNIT: 304 err = -ENXIO; 305 if (ap == 0) 306 break; 307 err = -EFAULT; 308 if (put_user(ppp_unit_number(&ap->chan), p)) 309 break; 310 err = 0; 311 break; 312 313 case TCGETS: 314 case TCGETA: 315 err = n_tty_ioctl(tty, file, cmd, arg); 316 break; 317 318 case TCFLSH: 319 /* flush our buffers and the serial port's buffer */ 320 if (arg == TCIOFLUSH || arg == TCOFLUSH) 321 ppp_async_flush_output(ap); 322 err = n_tty_ioctl(tty, file, cmd, arg); 323 break; 324 325 case FIONREAD: 326 val = 0; 327 if (put_user(val, p)) 328 break; 329 err = 0; 330 break; 331 332 default: 333 err = -ENOIOCTLCMD; 334 } 335 336 ap_put(ap); 337 return err; 338} 339 340/* No kernel lock - fine */ 341static unsigned int 342ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait) 343{ 344 return 0; 345} 346 347/* 348 * This can now be called from hard interrupt level as well 349 * as soft interrupt level or mainline. 350 */ 351static void 352ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, 353 char *cflags, int count) 354{ 355 struct asyncppp *ap = ap_get(tty); 356 unsigned long flags; 357 358 if (ap == 0) 359 return; 360 spin_lock_irqsave(&ap->recv_lock, flags); 361 ppp_async_input(ap, buf, cflags, count); 362 spin_unlock_irqrestore(&ap->recv_lock, flags); 363 if (!skb_queue_empty(&ap->rqueue)) 364 tasklet_schedule(&ap->tsk); 365 ap_put(ap); 366 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) 367 && tty->driver->unthrottle) 368 tty->driver->unthrottle(tty); 369} 370 371static void 372ppp_asynctty_wakeup(struct tty_struct *tty) 373{ 374 struct asyncppp *ap = ap_get(tty); 375 376 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 377 if (ap == 0) 378 return; 379 set_bit(XMIT_WAKEUP, &ap->xmit_flags); 380 tasklet_schedule(&ap->tsk); 381 ap_put(ap); 382} 383 384 385static struct tty_ldisc ppp_ldisc = { 386 .owner = THIS_MODULE, 387 .magic = TTY_LDISC_MAGIC, 388 .name = "ppp", 389 .open = ppp_asynctty_open, 390 .close = ppp_asynctty_close, 391 .hangup = ppp_asynctty_hangup, 392 .read = ppp_asynctty_read, 393 .write = ppp_asynctty_write, 394 .ioctl = ppp_asynctty_ioctl, 395 .poll = ppp_asynctty_poll, 396 .receive_buf = ppp_asynctty_receive, 397 .write_wakeup = ppp_asynctty_wakeup, 398}; 399 400static int __init 401ppp_async_init(void) 402{ 403 int err; 404 405 err = tty_register_ldisc(N_PPP, &ppp_ldisc); 406 if (err != 0) 407 printk(KERN_ERR "PPP_async: error %d registering line disc.\n", 408 err); 409 return err; 410} 411 412/* 413 * The following routines provide the PPP channel interface. 414 */ 415static int 416ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) 417{ 418 struct asyncppp *ap = chan->private; 419 void __user *argp = (void __user *)arg; 420 int __user *p = argp; 421 int err, val; 422 u32 accm[8]; 423 424 err = -EFAULT; 425 switch (cmd) { 426 case PPPIOCGFLAGS: 427 val = ap->flags | ap->rbits; 428 if (put_user(val, p)) 429 break; 430 err = 0; 431 break; 432 case PPPIOCSFLAGS: 433 if (get_user(val, p)) 434 break; 435 ap->flags = val & ~SC_RCV_BITS; 436 spin_lock_irq(&ap->recv_lock); 437 ap->rbits = val & SC_RCV_BITS; 438 spin_unlock_irq(&ap->recv_lock); 439 err = 0; 440 break; 441 442 case PPPIOCGASYNCMAP: 443 if (put_user(ap->xaccm[0], (u32 __user *)argp)) 444 break; 445 err = 0; 446 break; 447 case PPPIOCSASYNCMAP: 448 if (get_user(ap->xaccm[0], (u32 __user *)argp)) 449 break; 450 err = 0; 451 break; 452 453 case PPPIOCGRASYNCMAP: 454 if (put_user(ap->raccm, (u32 __user *)argp)) 455 break; 456 err = 0; 457 break; 458 case PPPIOCSRASYNCMAP: 459 if (get_user(ap->raccm, (u32 __user *)argp)) 460 break; 461 err = 0; 462 break; 463 464 case PPPIOCGXASYNCMAP: 465 if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm))) 466 break; 467 err = 0; 468 break; 469 case PPPIOCSXASYNCMAP: 470 if (copy_from_user(accm, argp, sizeof(accm))) 471 break; 472 accm[2] &= ~0x40000000U; /* can't escape 0x5e */ 473 accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */ 474 memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); 475 err = 0; 476 break; 477 478 case PPPIOCGMRU: 479 if (put_user(ap->mru, p)) 480 break; 481 err = 0; 482 break; 483 case PPPIOCSMRU: 484 if (get_user(val, p)) 485 break; 486 if (val < PPP_MRU) 487 val = PPP_MRU; 488 ap->mru = val; 489 err = 0; 490 break; 491 492 default: 493 err = -ENOTTY; 494 } 495 496 return err; 497} 498 499/* 500 * This is called at softirq level to deliver received packets 501 * to the ppp_generic code, and to tell the ppp_generic code 502 * if we can accept more output now. 503 */ 504static void ppp_async_process(unsigned long arg) 505{ 506 struct asyncppp *ap = (struct asyncppp *) arg; 507 struct sk_buff *skb; 508 509 /* process received packets */ 510 while ((skb = skb_dequeue(&ap->rqueue)) != NULL) { 511 if (skb->cb[0]) 512 ppp_input_error(&ap->chan, 0); 513 ppp_input(&ap->chan, skb); 514 } 515 516 /* try to push more stuff out */ 517 if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap)) 518 ppp_output_wakeup(&ap->chan); 519} 520 521/* 522 * Procedures for encapsulation and framing. 523 */ 524 525/* 526 * Procedure to encode the data for async serial transmission. 527 * Does octet stuffing (escaping), puts the address/control bytes 528 * on if A/C compression is disabled, and does protocol compression. 529 * Assumes ap->tpkt != 0 on entry. 530 * Returns 1 if we finished the current frame, 0 otherwise. 531 */ 532 533#define PUT_BYTE(ap, buf, c, islcp) do { \ 534 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ 535 *buf++ = PPP_ESCAPE; \ 536 *buf++ = c ^ 0x20; \ 537 } else \ 538 *buf++ = c; \ 539} while (0) 540 541static int 542ppp_async_encode(struct asyncppp *ap) 543{ 544 int fcs, i, count, c, proto; 545 unsigned char *buf, *buflim; 546 unsigned char *data; 547 int islcp; 548 549 buf = ap->obuf; 550 ap->olim = buf; 551 ap->optr = buf; 552 i = ap->tpkt_pos; 553 data = ap->tpkt->data; 554 count = ap->tpkt->len; 555 fcs = ap->tfcs; 556 proto = (data[0] << 8) + data[1]; 557 558 /* 559 * LCP packets with code values between 1 (configure-reqest) 560 * and 7 (code-reject) must be sent as though no options 561 * had been negotiated. 562 */ 563 islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7; 564 565 if (i == 0) { 566 if (islcp) 567 async_lcp_peek(ap, data, count, 0); 568 569 /* 570 * Start of a new packet - insert the leading FLAG 571 * character if necessary. 572 */ 573 if (islcp || flag_time == 0 574 || time_after_eq(jiffies, ap->last_xmit + flag_time)) 575 *buf++ = PPP_FLAG; 576 ap->last_xmit = jiffies; 577 fcs = PPP_INITFCS; 578 579 /* 580 * Put in the address/control bytes if necessary 581 */ 582 if ((ap->flags & SC_COMP_AC) == 0 || islcp) { 583 PUT_BYTE(ap, buf, 0xff, islcp); 584 fcs = PPP_FCS(fcs, 0xff); 585 PUT_BYTE(ap, buf, 0x03, islcp); 586 fcs = PPP_FCS(fcs, 0x03); 587 } 588 } 589 590 /* 591 * Once we put in the last byte, we need to put in the FCS 592 * and closing flag, so make sure there is at least 7 bytes 593 * of free space in the output buffer. 594 */ 595 buflim = ap->obuf + OBUFSIZE - 6; 596 while (i < count && buf < buflim) { 597 c = data[i++]; 598 if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT)) 599 continue; /* compress protocol field */ 600 fcs = PPP_FCS(fcs, c); 601 PUT_BYTE(ap, buf, c, islcp); 602 } 603 604 if (i < count) { 605 /* 606 * Remember where we are up to in this packet. 607 */ 608 ap->olim = buf; 609 ap->tpkt_pos = i; 610 ap->tfcs = fcs; 611 return 0; 612 } 613 614 /* 615 * We have finished the packet. Add the FCS and flag. 616 */ 617 fcs = ~fcs; 618 c = fcs & 0xff; 619 PUT_BYTE(ap, buf, c, islcp); 620 c = (fcs >> 8) & 0xff; 621 PUT_BYTE(ap, buf, c, islcp); 622 *buf++ = PPP_FLAG; 623 ap->olim = buf; 624 625 kfree_skb(ap->tpkt); 626 ap->tpkt = NULL; 627 return 1; 628} 629 630/* 631 * Transmit-side routines. 632 */ 633 634/* 635 * Send a packet to the peer over an async tty line. 636 * Returns 1 iff the packet was accepted. 637 * If the packet was not accepted, we will call ppp_output_wakeup 638 * at some later time. 639 */ 640static int 641ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb) 642{ 643 struct asyncppp *ap = chan->private; 644 645 ppp_async_push(ap); 646 647 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags)) 648 return 0; /* already full */ 649 ap->tpkt = skb; 650 ap->tpkt_pos = 0; 651 652 ppp_async_push(ap); 653 return 1; 654} 655 656/* 657 * Push as much data as possible out to the tty. 658 */ 659static int 660ppp_async_push(struct asyncppp *ap) 661{ 662 int avail, sent, done = 0; 663 struct tty_struct *tty = ap->tty; 664 int tty_stuffed = 0; 665 666 /* 667 * We can get called recursively here if the tty write 668 * function calls our wakeup function. This can happen 669 * for example on a pty with both the master and slave 670 * set to PPP line discipline. 671 * We use the XMIT_BUSY bit to detect this and get out, 672 * leaving the XMIT_WAKEUP bit set to tell the other 673 * instance that it may now be able to write more now. 674 */ 675 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) 676 return 0; 677 spin_lock_bh(&ap->xmit_lock); 678 for (;;) { 679 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags)) 680 tty_stuffed = 0; 681 if (!tty_stuffed && ap->optr < ap->olim) { 682 avail = ap->olim - ap->optr; 683 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 684 sent = tty->driver->write(tty, ap->optr, avail); 685 if (sent < 0) 686 goto flush; /* error, e.g. loss of CD */ 687 ap->optr += sent; 688 if (sent < avail) 689 tty_stuffed = 1; 690 continue; 691 } 692 if (ap->optr >= ap->olim && ap->tpkt != 0) { 693 if (ppp_async_encode(ap)) { 694 /* finished processing ap->tpkt */ 695 clear_bit(XMIT_FULL, &ap->xmit_flags); 696 done = 1; 697 } 698 continue; 699 } 700 /* 701 * We haven't made any progress this time around. 702 * Clear XMIT_BUSY to let other callers in, but 703 * after doing so we have to check if anyone set 704 * XMIT_WAKEUP since we last checked it. If they 705 * did, we should try again to set XMIT_BUSY and go 706 * around again in case XMIT_BUSY was still set when 707 * the other caller tried. 708 */ 709 clear_bit(XMIT_BUSY, &ap->xmit_flags); 710 /* any more work to do? if not, exit the loop */ 711 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) 712 || (!tty_stuffed && ap->tpkt != 0))) 713 break; 714 /* more work to do, see if we can do it now */ 715 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) 716 break; 717 } 718 spin_unlock_bh(&ap->xmit_lock); 719 return done; 720 721flush: 722 clear_bit(XMIT_BUSY, &ap->xmit_flags); 723 if (ap->tpkt != 0) { 724 kfree_skb(ap->tpkt); 725 ap->tpkt = NULL; 726 clear_bit(XMIT_FULL, &ap->xmit_flags); 727 done = 1; 728 } 729 ap->optr = ap->olim; 730 spin_unlock_bh(&ap->xmit_lock); 731 return done; 732} 733 734/* 735 * Flush output from our internal buffers. 736 * Called for the TCFLSH ioctl. Can be entered in parallel 737 * but this is covered by the xmit_lock. 738 */ 739static void 740ppp_async_flush_output(struct asyncppp *ap) 741{ 742 int done = 0; 743 744 spin_lock_bh(&ap->xmit_lock); 745 ap->optr = ap->olim; 746 if (ap->tpkt != NULL) { 747 kfree_skb(ap->tpkt); 748 ap->tpkt = NULL; 749 clear_bit(XMIT_FULL, &ap->xmit_flags); 750 done = 1; 751 } 752 spin_unlock_bh(&ap->xmit_lock); 753 if (done) 754 ppp_output_wakeup(&ap->chan); 755} 756 757/* 758 * Receive-side routines. 759 */ 760 761/* see how many ordinary chars there are at the start of buf */ 762static inline int 763scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count) 764{ 765 int i, c; 766 767 for (i = 0; i < count; ++i) { 768 c = buf[i]; 769 if (c == PPP_ESCAPE || c == PPP_FLAG 770 || (c < 0x20 && (ap->raccm & (1 << c)) != 0)) 771 break; 772 } 773 return i; 774} 775 776/* called when a flag is seen - do end-of-packet processing */ 777static void 778process_input_packet(struct asyncppp *ap) 779{ 780 struct sk_buff *skb; 781 unsigned char *p; 782 unsigned int len, fcs, proto; 783 784 skb = ap->rpkt; 785 if (ap->state & (SC_TOSS | SC_ESCAPE)) 786 goto err; 787 788 if (skb == NULL) 789 return; /* 0-length packet */ 790 791 /* check the FCS */ 792 p = skb->data; 793 len = skb->len; 794 if (len < 3) 795 goto err; /* too short */ 796 fcs = PPP_INITFCS; 797 for (; len > 0; --len) 798 fcs = PPP_FCS(fcs, *p++); 799 if (fcs != PPP_GOODFCS) 800 goto err; /* bad FCS */ 801 skb_trim(skb, skb->len - 2); 802 803 /* check for address/control and protocol compression */ 804 p = skb->data; 805 if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) { 806 /* chop off address/control */ 807 if (skb->len < 3) 808 goto err; 809 p = skb_pull(skb, 2); 810 } 811 proto = p[0]; 812 if (proto & 1) { 813 /* protocol is compressed */ 814 skb_push(skb, 1)[0] = 0; 815 } else { 816 if (skb->len < 2) 817 goto err; 818 proto = (proto << 8) + p[1]; 819 if (proto == PPP_LCP) 820 async_lcp_peek(ap, p, skb->len, 1); 821 } 822 823 /* queue the frame to be processed */ 824 skb->cb[0] = ap->state; 825 skb_queue_tail(&ap->rqueue, skb); 826 ap->rpkt = NULL; 827 ap->state = 0; 828 return; 829 830 err: 831 /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */ 832 ap->state = SC_PREV_ERROR; 833 if (skb) { 834 /* make skb appear as freshly allocated */ 835 skb_trim(skb, 0); 836 skb_reserve(skb, - skb_headroom(skb)); 837 } 838} 839 840/* Called when the tty driver has data for us. Runs parallel with the 841 other ldisc functions but will not be re-entered */ 842 843static void 844ppp_async_input(struct asyncppp *ap, const unsigned char *buf, 845 char *flags, int count) 846{ 847 struct sk_buff *skb; 848 int c, i, j, n, s, f; 849 unsigned char *sp; 850 851 /* update bits used for 8-bit cleanness detection */ 852 if (~ap->rbits & SC_RCV_BITS) { 853 s = 0; 854 for (i = 0; i < count; ++i) { 855 c = buf[i]; 856 if (flags != 0 && flags[i] != 0) 857 continue; 858 s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0; 859 c = ((c >> 4) ^ c) & 0xf; 860 s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP; 861 } 862 ap->rbits |= s; 863 } 864 865 while (count > 0) { 866 /* scan through and see how many chars we can do in bulk */ 867 if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE) 868 n = 1; 869 else 870 n = scan_ordinary(ap, buf, count); 871 872 f = 0; 873 if (flags != 0 && (ap->state & SC_TOSS) == 0) { 874 /* check the flags to see if any char had an error */ 875 for (j = 0; j < n; ++j) 876 if ((f = flags[j]) != 0) 877 break; 878 } 879 if (f != 0) { 880 /* start tossing */ 881 ap->state |= SC_TOSS; 882 883 } else if (n > 0 && (ap->state & SC_TOSS) == 0) { 884 /* stuff the chars in the skb */ 885 skb = ap->rpkt; 886 if (skb == 0) { 887 skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); 888 if (skb == 0) 889 goto nomem; 890 ap->rpkt = skb; 891 } 892 if (skb->len == 0) { 893 /* Try to get the payload 4-byte aligned. 894 * This should match the 895 * PPP_ALLSTATIONS/PPP_UI/compressed tests in 896 * process_input_packet, but we do not have 897 * enough chars here to test buf[1] and buf[2]. 898 */ 899 if (buf[0] != PPP_ALLSTATIONS) 900 skb_reserve(skb, 2 + (buf[0] & 1)); 901 } 902 if (n > skb_tailroom(skb)) { 903 /* packet overflowed MRU */ 904 ap->state |= SC_TOSS; 905 } else { 906 sp = skb_put(skb, n); 907 memcpy(sp, buf, n); 908 if (ap->state & SC_ESCAPE) { 909 sp[0] ^= 0x20; 910 ap->state &= ~SC_ESCAPE; 911 } 912 } 913 } 914 915 if (n >= count) 916 break; 917 918 c = buf[n]; 919 if (flags != NULL && flags[n] != 0) { 920 ap->state |= SC_TOSS; 921 } else if (c == PPP_FLAG) { 922 process_input_packet(ap); 923 } else if (c == PPP_ESCAPE) { 924 ap->state |= SC_ESCAPE; 925 } else if (I_IXON(ap->tty)) { 926 if (c == START_CHAR(ap->tty)) 927 start_tty(ap->tty); 928 else if (c == STOP_CHAR(ap->tty)) 929 stop_tty(ap->tty); 930 } 931 /* otherwise it's a char in the recv ACCM */ 932 ++n; 933 934 buf += n; 935 if (flags != 0) 936 flags += n; 937 count -= n; 938 } 939 return; 940 941 nomem: 942 printk(KERN_ERR "PPPasync: no memory (input pkt)\n"); 943 ap->state |= SC_TOSS; 944} 945 946/* 947 * We look at LCP frames going past so that we can notice 948 * and react to the LCP configure-ack from the peer. 949 * In the situation where the peer has been sent a configure-ack 950 * already, LCP is up once it has sent its configure-ack 951 * so the immediately following packet can be sent with the 952 * configured LCP options. This allows us to process the following 953 * packet correctly without pppd needing to respond quickly. 954 * 955 * We only respond to the received configure-ack if we have just 956 * sent a configure-request, and the configure-ack contains the 957 * same data (this is checked using a 16-bit crc of the data). 958 */ 959#define CONFREQ 1 /* LCP code field values */ 960#define CONFACK 2 961#define LCP_MRU 1 /* LCP option numbers */ 962#define LCP_ASYNCMAP 2 963 964static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, 965 int len, int inbound) 966{ 967 int dlen, fcs, i, code; 968 u32 val; 969 970 data += 2; /* skip protocol bytes */ 971 len -= 2; 972 if (len < 4) /* 4 = code, ID, length */ 973 return; 974 code = data[0]; 975 if (code != CONFACK && code != CONFREQ) 976 return; 977 dlen = (data[2] << 8) + data[3]; 978 if (len < dlen) 979 return; /* packet got truncated or length is bogus */ 980 981 if (code == (inbound? CONFACK: CONFREQ)) { 982 /* 983 * sent confreq or received confack: 984 * calculate the crc of the data from the ID field on. 985 */ 986 fcs = PPP_INITFCS; 987 for (i = 1; i < dlen; ++i) 988 fcs = PPP_FCS(fcs, data[i]); 989 990 if (!inbound) { 991 /* outbound confreq - remember the crc for later */ 992 ap->lcp_fcs = fcs; 993 return; 994 } 995 996 /* received confack, check the crc */ 997 fcs ^= ap->lcp_fcs; 998 ap->lcp_fcs = -1; 999 if (fcs != 0) 1000 return; 1001 } else if (inbound) 1002 return; /* not interested in received confreq */ 1003 1004 /* process the options in the confack */ 1005 data += 4; 1006 dlen -= 4; 1007 /* data[0] is code, data[1] is length */ 1008 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { 1009 switch (data[0]) { 1010 case LCP_MRU: 1011 val = (data[2] << 8) + data[3]; 1012 if (inbound) 1013 ap->mru = val; 1014 else 1015 ap->chan.mtu = val; 1016 break; 1017 case LCP_ASYNCMAP: 1018 val = (data[2] << 24) + (data[3] << 16) 1019 + (data[4] << 8) + data[5]; 1020 if (inbound) 1021 ap->raccm = val; 1022 else 1023 ap->xaccm[0] = val; 1024 break; 1025 } 1026 dlen -= data[1]; 1027 data += data[1]; 1028 } 1029} 1030 1031static void __exit ppp_async_cleanup(void) 1032{ 1033 if (tty_unregister_ldisc(N_PPP) != 0) 1034 printk(KERN_ERR "failed to unregister PPP line discipline\n"); 1035} 1036 1037module_init(ppp_async_init); 1038module_exit(ppp_async_cleanup);