at v2.6.26 1488 lines 40 kB view raw
1/* 2 * NET3: A (fairly minimal) implementation of synchronous PPP for Linux 3 * as well as a CISCO HDLC implementation. See the copyright 4 * message below for the original source. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the license, or (at your option) any later version. 10 * 11 * Note however. This code is also used in a different form by FreeBSD. 12 * Therefore when making any non OS specific change please consider 13 * contributing it back to the original author under the terms 14 * below in addition. 15 * -- Alan 16 * 17 * Port for Linux-2.1 by Jan "Yenya" Kasprzak <kas@fi.muni.cz> 18 */ 19 20/* 21 * Synchronous PPP/Cisco link level subroutines. 22 * Keepalive protocol implemented in both Cisco and PPP modes. 23 * 24 * Copyright (C) 1994 Cronyx Ltd. 25 * Author: Serge Vakulenko, <vak@zebub.msk.su> 26 * 27 * This software is distributed with NO WARRANTIES, not even the implied 28 * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 29 * 30 * Authors grant any other persons or organisations permission to use 31 * or modify this software as long as this message is kept with the software, 32 * all derivative works or modified versions. 33 * 34 * Version 1.9, Wed Oct 4 18:58:15 MSK 1995 35 * 36 * $Id: syncppp.c,v 1.18 2000/04/11 05:25:31 asj Exp $ 37 */ 38#undef DEBUG 39 40#include <linux/module.h> 41#include <linux/kernel.h> 42#include <linux/errno.h> 43#include <linux/init.h> 44#include <linux/if_arp.h> 45#include <linux/skbuff.h> 46#include <linux/route.h> 47#include <linux/netdevice.h> 48#include <linux/inetdevice.h> 49#include <linux/random.h> 50#include <linux/pkt_sched.h> 51#include <linux/spinlock.h> 52#include <linux/rcupdate.h> 53 54#include <net/net_namespace.h> 55#include <net/syncppp.h> 56 57#include <asm/byteorder.h> 58#include <asm/uaccess.h> 59 60#define MAXALIVECNT 6 /* max. alive packets */ 61 62#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */ 63#define PPP_UI 0x03 /* Unnumbered Information */ 64#define PPP_IP 0x0021 /* Internet Protocol */ 65#define PPP_ISO 0x0023 /* ISO OSI Protocol */ 66#define PPP_XNS 0x0025 /* Xerox NS Protocol */ 67#define PPP_IPX 0x002b /* Novell IPX Protocol */ 68#define PPP_LCP 0xc021 /* Link Control Protocol */ 69#define PPP_IPCP 0x8021 /* Internet Protocol Control Protocol */ 70 71#define LCP_CONF_REQ 1 /* PPP LCP configure request */ 72#define LCP_CONF_ACK 2 /* PPP LCP configure acknowledge */ 73#define LCP_CONF_NAK 3 /* PPP LCP configure negative ack */ 74#define LCP_CONF_REJ 4 /* PPP LCP configure reject */ 75#define LCP_TERM_REQ 5 /* PPP LCP terminate request */ 76#define LCP_TERM_ACK 6 /* PPP LCP terminate acknowledge */ 77#define LCP_CODE_REJ 7 /* PPP LCP code reject */ 78#define LCP_PROTO_REJ 8 /* PPP LCP protocol reject */ 79#define LCP_ECHO_REQ 9 /* PPP LCP echo request */ 80#define LCP_ECHO_REPLY 10 /* PPP LCP echo reply */ 81#define LCP_DISC_REQ 11 /* PPP LCP discard request */ 82 83#define LCP_OPT_MRU 1 /* maximum receive unit */ 84#define LCP_OPT_ASYNC_MAP 2 /* async control character map */ 85#define LCP_OPT_AUTH_PROTO 3 /* authentication protocol */ 86#define LCP_OPT_QUAL_PROTO 4 /* quality protocol */ 87#define LCP_OPT_MAGIC 5 /* magic number */ 88#define LCP_OPT_RESERVED 6 /* reserved */ 89#define LCP_OPT_PROTO_COMP 7 /* protocol field compression */ 90#define LCP_OPT_ADDR_COMP 8 /* address/control field compression */ 91 92#define IPCP_CONF_REQ LCP_CONF_REQ /* PPP IPCP configure request */ 93#define IPCP_CONF_ACK LCP_CONF_ACK /* PPP IPCP configure acknowledge */ 94#define IPCP_CONF_NAK LCP_CONF_NAK /* PPP IPCP configure negative ack */ 95#define IPCP_CONF_REJ LCP_CONF_REJ /* PPP IPCP configure reject */ 96#define IPCP_TERM_REQ LCP_TERM_REQ /* PPP IPCP terminate request */ 97#define IPCP_TERM_ACK LCP_TERM_ACK /* PPP IPCP terminate acknowledge */ 98#define IPCP_CODE_REJ LCP_CODE_REJ /* PPP IPCP code reject */ 99 100#define CISCO_MULTICAST 0x8f /* Cisco multicast address */ 101#define CISCO_UNICAST 0x0f /* Cisco unicast address */ 102#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */ 103#define CISCO_ADDR_REQ 0 /* Cisco address request */ 104#define CISCO_ADDR_REPLY 1 /* Cisco address reply */ 105#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */ 106 107struct ppp_header { 108 u8 address; 109 u8 control; 110 __be16 protocol; 111}; 112#define PPP_HEADER_LEN sizeof (struct ppp_header) 113 114struct lcp_header { 115 u8 type; 116 u8 ident; 117 __be16 len; 118}; 119#define LCP_HEADER_LEN sizeof (struct lcp_header) 120 121struct cisco_packet { 122 __be32 type; 123 __be32 par1; 124 __be32 par2; 125 __be16 rel; 126 __be16 time0; 127 __be16 time1; 128}; 129#define CISCO_PACKET_LEN 18 130#define CISCO_BIG_PACKET_LEN 20 131 132static struct sppp *spppq; 133static struct timer_list sppp_keepalive_timer; 134static DEFINE_SPINLOCK(spppq_lock); 135 136/* global xmit queue for sending packets while spinlock is held */ 137static struct sk_buff_head tx_queue; 138 139static void sppp_keepalive (unsigned long dummy); 140static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type, 141 u8 ident, u16 len, void *data); 142static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2); 143static void sppp_lcp_input (struct sppp *sp, struct sk_buff *m); 144static void sppp_cisco_input (struct sppp *sp, struct sk_buff *m); 145static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *m); 146static void sppp_lcp_open (struct sppp *sp); 147static void sppp_ipcp_open (struct sppp *sp); 148static int sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h, 149 int len, u32 *magic); 150static void sppp_cp_timeout (unsigned long arg); 151static char *sppp_lcp_type_name (u8 type); 152static char *sppp_ipcp_type_name (u8 type); 153static void sppp_print_bytes (u8 *p, u16 len); 154 155static int debug; 156 157/* Flush global outgoing packet queue to dev_queue_xmit(). 158 * 159 * dev_queue_xmit() must be called with interrupts enabled 160 * which means it can't be called with spinlocks held. 161 * If a packet needs to be sent while a spinlock is held, 162 * then put the packet into tx_queue, and call sppp_flush_xmit() 163 * after spinlock is released. 164 */ 165static void sppp_flush_xmit(void) 166{ 167 struct sk_buff *skb; 168 while ((skb = skb_dequeue(&tx_queue)) != NULL) 169 dev_queue_xmit(skb); 170} 171 172/* 173 * Interface down stub 174 */ 175 176static void if_down(struct net_device *dev) 177{ 178 struct sppp *sp = (struct sppp *)sppp_of(dev); 179 180 sp->pp_link_state=SPPP_LINK_DOWN; 181} 182 183/* 184 * Timeout routine activations. 185 */ 186 187static void sppp_set_timeout(struct sppp *p,int s) 188{ 189 if (! (p->pp_flags & PP_TIMO)) 190 { 191 init_timer(&p->pp_timer); 192 p->pp_timer.function=sppp_cp_timeout; 193 p->pp_timer.expires=jiffies+s*HZ; 194 p->pp_timer.data=(unsigned long)p; 195 p->pp_flags |= PP_TIMO; 196 add_timer(&p->pp_timer); 197 } 198} 199 200static void sppp_clear_timeout(struct sppp *p) 201{ 202 if (p->pp_flags & PP_TIMO) 203 { 204 del_timer(&p->pp_timer); 205 p->pp_flags &= ~PP_TIMO; 206 } 207} 208 209/** 210 * sppp_input - receive and process a WAN PPP frame 211 * @skb: The buffer to process 212 * @dev: The device it arrived on 213 * 214 * This can be called directly by cards that do not have 215 * timing constraints but is normally called from the network layer 216 * after interrupt servicing to process frames queued via netif_rx(). 217 * 218 * We process the options in the card. If the frame is destined for 219 * the protocol stacks then it requeues the frame for the upper level 220 * protocol. If it is a control from it is processed and discarded 221 * here. 222 */ 223 224static void sppp_input (struct net_device *dev, struct sk_buff *skb) 225{ 226 struct ppp_header *h; 227 struct sppp *sp = (struct sppp *)sppp_of(dev); 228 unsigned long flags; 229 230 skb->dev=dev; 231 skb_reset_mac_header(skb); 232 233 if (dev->flags & IFF_RUNNING) 234 { 235 /* Count received bytes, add FCS and one flag */ 236 sp->ibytes+= skb->len + 3; 237 sp->ipkts++; 238 } 239 240 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) { 241 /* Too small packet, drop it. */ 242 if (sp->pp_flags & PP_DEBUG) 243 printk (KERN_DEBUG "%s: input packet is too small, %d bytes\n", 244 dev->name, skb->len); 245 kfree_skb(skb); 246 return; 247 } 248 249 /* Get PPP header. */ 250 h = (struct ppp_header *)skb->data; 251 skb_pull(skb,sizeof(struct ppp_header)); 252 253 spin_lock_irqsave(&sp->lock, flags); 254 255 switch (h->address) { 256 default: /* Invalid PPP packet. */ 257 goto invalid; 258 case PPP_ALLSTATIONS: 259 if (h->control != PPP_UI) 260 goto invalid; 261 if (sp->pp_flags & PP_CISCO) { 262 if (sp->pp_flags & PP_DEBUG) 263 printk (KERN_WARNING "%s: PPP packet in Cisco mode <0x%x 0x%x 0x%x>\n", 264 dev->name, 265 h->address, h->control, ntohs (h->protocol)); 266 goto drop; 267 } 268 switch (ntohs (h->protocol)) { 269 default: 270 if (sp->lcp.state == LCP_STATE_OPENED) 271 sppp_cp_send (sp, PPP_LCP, LCP_PROTO_REJ, 272 ++sp->pp_seq, skb->len + 2, 273 &h->protocol); 274 if (sp->pp_flags & PP_DEBUG) 275 printk (KERN_WARNING "%s: invalid input protocol <0x%x 0x%x 0x%x>\n", 276 dev->name, 277 h->address, h->control, ntohs (h->protocol)); 278 goto drop; 279 case PPP_LCP: 280 sppp_lcp_input (sp, skb); 281 goto drop; 282 case PPP_IPCP: 283 if (sp->lcp.state == LCP_STATE_OPENED) 284 sppp_ipcp_input (sp, skb); 285 else 286 printk(KERN_DEBUG "IPCP when still waiting LCP finish.\n"); 287 goto drop; 288 case PPP_IP: 289 if (sp->ipcp.state == IPCP_STATE_OPENED) { 290 if(sp->pp_flags&PP_DEBUG) 291 printk(KERN_DEBUG "Yow an IP frame.\n"); 292 skb->protocol=htons(ETH_P_IP); 293 netif_rx(skb); 294 dev->last_rx = jiffies; 295 goto done; 296 } 297 break; 298#ifdef IPX 299 case PPP_IPX: 300 /* IPX IPXCP not implemented yet */ 301 if (sp->lcp.state == LCP_STATE_OPENED) { 302 skb->protocol=htons(ETH_P_IPX); 303 netif_rx(skb); 304 dev->last_rx = jiffies; 305 goto done; 306 } 307 break; 308#endif 309 } 310 break; 311 case CISCO_MULTICAST: 312 case CISCO_UNICAST: 313 /* Don't check the control field here (RFC 1547). */ 314 if (! (sp->pp_flags & PP_CISCO)) { 315 if (sp->pp_flags & PP_DEBUG) 316 printk (KERN_WARNING "%s: Cisco packet in PPP mode <0x%x 0x%x 0x%x>\n", 317 dev->name, 318 h->address, h->control, ntohs (h->protocol)); 319 goto drop; 320 } 321 switch (ntohs (h->protocol)) { 322 default: 323 goto invalid; 324 case CISCO_KEEPALIVE: 325 sppp_cisco_input (sp, skb); 326 goto drop; 327#ifdef CONFIG_INET 328 case ETH_P_IP: 329 skb->protocol=htons(ETH_P_IP); 330 netif_rx(skb); 331 dev->last_rx = jiffies; 332 goto done; 333#endif 334#ifdef CONFIG_IPX 335 case ETH_P_IPX: 336 skb->protocol=htons(ETH_P_IPX); 337 netif_rx(skb); 338 dev->last_rx = jiffies; 339 goto done; 340#endif 341 } 342 break; 343 } 344 goto drop; 345 346invalid: 347 if (sp->pp_flags & PP_DEBUG) 348 printk (KERN_WARNING "%s: invalid input packet <0x%x 0x%x 0x%x>\n", 349 dev->name, h->address, h->control, ntohs (h->protocol)); 350drop: 351 kfree_skb(skb); 352done: 353 spin_unlock_irqrestore(&sp->lock, flags); 354 sppp_flush_xmit(); 355 return; 356} 357 358/* 359 * Handle transmit packets. 360 */ 361 362static int sppp_hard_header(struct sk_buff *skb, 363 struct net_device *dev, __u16 type, 364 const void *daddr, const void *saddr, 365 unsigned int len) 366{ 367 struct sppp *sp = (struct sppp *)sppp_of(dev); 368 struct ppp_header *h; 369 skb_push(skb,sizeof(struct ppp_header)); 370 h=(struct ppp_header *)skb->data; 371 if(sp->pp_flags&PP_CISCO) 372 { 373 h->address = CISCO_UNICAST; 374 h->control = 0; 375 } 376 else 377 { 378 h->address = PPP_ALLSTATIONS; 379 h->control = PPP_UI; 380 } 381 if(sp->pp_flags & PP_CISCO) 382 { 383 h->protocol = htons(type); 384 } 385 else switch(type) 386 { 387 case ETH_P_IP: 388 h->protocol = htons(PPP_IP); 389 break; 390 case ETH_P_IPX: 391 h->protocol = htons(PPP_IPX); 392 break; 393 } 394 return sizeof(struct ppp_header); 395} 396 397static const struct header_ops sppp_header_ops = { 398 .create = sppp_hard_header, 399}; 400 401/* 402 * Send keepalive packets, every 10 seconds. 403 */ 404 405static void sppp_keepalive (unsigned long dummy) 406{ 407 struct sppp *sp; 408 unsigned long flags; 409 410 spin_lock_irqsave(&spppq_lock, flags); 411 412 for (sp=spppq; sp; sp=sp->pp_next) 413 { 414 struct net_device *dev = sp->pp_if; 415 416 /* Keepalive mode disabled or channel down? */ 417 if (! (sp->pp_flags & PP_KEEPALIVE) || 418 ! (dev->flags & IFF_UP)) 419 continue; 420 421 spin_lock(&sp->lock); 422 423 /* No keepalive in PPP mode if LCP not opened yet. */ 424 if (! (sp->pp_flags & PP_CISCO) && 425 sp->lcp.state != LCP_STATE_OPENED) { 426 spin_unlock(&sp->lock); 427 continue; 428 } 429 430 if (sp->pp_alivecnt == MAXALIVECNT) { 431 /* No keepalive packets got. Stop the interface. */ 432 printk (KERN_WARNING "%s: protocol down\n", dev->name); 433 if_down (dev); 434 if (! (sp->pp_flags & PP_CISCO)) { 435 /* Shut down the PPP link. */ 436 sp->lcp.magic = jiffies; 437 sp->lcp.state = LCP_STATE_CLOSED; 438 sp->ipcp.state = IPCP_STATE_CLOSED; 439 sppp_clear_timeout (sp); 440 /* Initiate negotiation. */ 441 sppp_lcp_open (sp); 442 } 443 } 444 if (sp->pp_alivecnt <= MAXALIVECNT) 445 ++sp->pp_alivecnt; 446 if (sp->pp_flags & PP_CISCO) 447 sppp_cisco_send (sp, CISCO_KEEPALIVE_REQ, ++sp->pp_seq, 448 sp->pp_rseq); 449 else if (sp->lcp.state == LCP_STATE_OPENED) { 450 __be32 nmagic = htonl (sp->lcp.magic); 451 sp->lcp.echoid = ++sp->pp_seq; 452 sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REQ, 453 sp->lcp.echoid, 4, &nmagic); 454 } 455 456 spin_unlock(&sp->lock); 457 } 458 spin_unlock_irqrestore(&spppq_lock, flags); 459 sppp_flush_xmit(); 460 sppp_keepalive_timer.expires=jiffies+10*HZ; 461 add_timer(&sppp_keepalive_timer); 462} 463 464/* 465 * Handle incoming PPP Link Control Protocol packets. 466 */ 467 468static void sppp_lcp_input (struct sppp *sp, struct sk_buff *skb) 469{ 470 struct lcp_header *h; 471 struct net_device *dev = sp->pp_if; 472 int len = skb->len; 473 u8 *p, opt[6]; 474 u32 rmagic = 0; 475 476 if (!pskb_may_pull(skb, sizeof(struct lcp_header))) { 477 if (sp->pp_flags & PP_DEBUG) 478 printk (KERN_WARNING "%s: invalid lcp packet length: %d bytes\n", 479 dev->name, len); 480 return; 481 } 482 h = (struct lcp_header *)skb->data; 483 skb_pull(skb,sizeof(struct lcp_header *)); 484 485 if (sp->pp_flags & PP_DEBUG) 486 { 487 char state = '?'; 488 switch (sp->lcp.state) { 489 case LCP_STATE_CLOSED: state = 'C'; break; 490 case LCP_STATE_ACK_RCVD: state = 'R'; break; 491 case LCP_STATE_ACK_SENT: state = 'S'; break; 492 case LCP_STATE_OPENED: state = 'O'; break; 493 } 494 printk (KERN_WARNING "%s: lcp input(%c): %d bytes <%s id=%xh len=%xh", 495 dev->name, state, len, 496 sppp_lcp_type_name (h->type), h->ident, ntohs (h->len)); 497 if (len > 4) 498 sppp_print_bytes ((u8*) (h+1), len-4); 499 printk (">\n"); 500 } 501 if (len > ntohs (h->len)) 502 len = ntohs (h->len); 503 switch (h->type) { 504 default: 505 /* Unknown packet type -- send Code-Reject packet. */ 506 sppp_cp_send (sp, PPP_LCP, LCP_CODE_REJ, ++sp->pp_seq, 507 skb->len, h); 508 break; 509 case LCP_CONF_REQ: 510 if (len < 4) { 511 if (sp->pp_flags & PP_DEBUG) 512 printk (KERN_DEBUG"%s: invalid lcp configure request packet length: %d bytes\n", 513 dev->name, len); 514 break; 515 } 516 if (len>4 && !sppp_lcp_conf_parse_options (sp, h, len, &rmagic)) 517 goto badreq; 518 if (rmagic == sp->lcp.magic) { 519 /* Local and remote magics equal -- loopback? */ 520 if (sp->pp_loopcnt >= MAXALIVECNT*5) { 521 printk (KERN_WARNING "%s: loopback\n", 522 dev->name); 523 sp->pp_loopcnt = 0; 524 if (dev->flags & IFF_UP) { 525 if_down (dev); 526 } 527 } else if (sp->pp_flags & PP_DEBUG) 528 printk (KERN_DEBUG "%s: conf req: magic glitch\n", 529 dev->name); 530 ++sp->pp_loopcnt; 531 532 /* MUST send Conf-Nack packet. */ 533 rmagic = ~sp->lcp.magic; 534 opt[0] = LCP_OPT_MAGIC; 535 opt[1] = sizeof (opt); 536 opt[2] = rmagic >> 24; 537 opt[3] = rmagic >> 16; 538 opt[4] = rmagic >> 8; 539 opt[5] = rmagic; 540 sppp_cp_send (sp, PPP_LCP, LCP_CONF_NAK, 541 h->ident, sizeof (opt), &opt); 542badreq: 543 switch (sp->lcp.state) { 544 case LCP_STATE_OPENED: 545 /* Initiate renegotiation. */ 546 sppp_lcp_open (sp); 547 /* fall through... */ 548 case LCP_STATE_ACK_SENT: 549 /* Go to closed state. */ 550 sp->lcp.state = LCP_STATE_CLOSED; 551 sp->ipcp.state = IPCP_STATE_CLOSED; 552 } 553 break; 554 } 555 /* Send Configure-Ack packet. */ 556 sp->pp_loopcnt = 0; 557 if (sp->lcp.state != LCP_STATE_OPENED) { 558 sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK, 559 h->ident, len-4, h+1); 560 } 561 /* Change the state. */ 562 switch (sp->lcp.state) { 563 case LCP_STATE_CLOSED: 564 sp->lcp.state = LCP_STATE_ACK_SENT; 565 break; 566 case LCP_STATE_ACK_RCVD: 567 sp->lcp.state = LCP_STATE_OPENED; 568 sppp_ipcp_open (sp); 569 break; 570 case LCP_STATE_OPENED: 571 /* Remote magic changed -- close session. */ 572 sp->lcp.state = LCP_STATE_CLOSED; 573 sp->ipcp.state = IPCP_STATE_CLOSED; 574 /* Initiate renegotiation. */ 575 sppp_lcp_open (sp); 576 /* Send ACK after our REQ in attempt to break loop */ 577 sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK, 578 h->ident, len-4, h+1); 579 sp->lcp.state = LCP_STATE_ACK_SENT; 580 break; 581 } 582 break; 583 case LCP_CONF_ACK: 584 if (h->ident != sp->lcp.confid) 585 break; 586 sppp_clear_timeout (sp); 587 if ((sp->pp_link_state != SPPP_LINK_UP) && 588 (dev->flags & IFF_UP)) { 589 /* Coming out of loopback mode. */ 590 sp->pp_link_state=SPPP_LINK_UP; 591 printk (KERN_INFO "%s: protocol up\n", dev->name); 592 } 593 switch (sp->lcp.state) { 594 case LCP_STATE_CLOSED: 595 sp->lcp.state = LCP_STATE_ACK_RCVD; 596 sppp_set_timeout (sp, 5); 597 break; 598 case LCP_STATE_ACK_SENT: 599 sp->lcp.state = LCP_STATE_OPENED; 600 sppp_ipcp_open (sp); 601 break; 602 } 603 break; 604 case LCP_CONF_NAK: 605 if (h->ident != sp->lcp.confid) 606 break; 607 p = (u8*) (h+1); 608 if (len>=10 && p[0] == LCP_OPT_MAGIC && p[1] >= 4) { 609 rmagic = (u32)p[2] << 24 | 610 (u32)p[3] << 16 | p[4] << 8 | p[5]; 611 if (rmagic == ~sp->lcp.magic) { 612 int newmagic; 613 if (sp->pp_flags & PP_DEBUG) 614 printk (KERN_DEBUG "%s: conf nak: magic glitch\n", 615 dev->name); 616 get_random_bytes(&newmagic, sizeof(newmagic)); 617 sp->lcp.magic += newmagic; 618 } else 619 sp->lcp.magic = rmagic; 620 } 621 if (sp->lcp.state != LCP_STATE_ACK_SENT) { 622 /* Go to closed state. */ 623 sp->lcp.state = LCP_STATE_CLOSED; 624 sp->ipcp.state = IPCP_STATE_CLOSED; 625 } 626 /* The link will be renegotiated after timeout, 627 * to avoid endless req-nack loop. */ 628 sppp_clear_timeout (sp); 629 sppp_set_timeout (sp, 2); 630 break; 631 case LCP_CONF_REJ: 632 if (h->ident != sp->lcp.confid) 633 break; 634 sppp_clear_timeout (sp); 635 /* Initiate renegotiation. */ 636 sppp_lcp_open (sp); 637 if (sp->lcp.state != LCP_STATE_ACK_SENT) { 638 /* Go to closed state. */ 639 sp->lcp.state = LCP_STATE_CLOSED; 640 sp->ipcp.state = IPCP_STATE_CLOSED; 641 } 642 break; 643 case LCP_TERM_REQ: 644 sppp_clear_timeout (sp); 645 /* Send Terminate-Ack packet. */ 646 sppp_cp_send (sp, PPP_LCP, LCP_TERM_ACK, h->ident, 0, NULL); 647 /* Go to closed state. */ 648 sp->lcp.state = LCP_STATE_CLOSED; 649 sp->ipcp.state = IPCP_STATE_CLOSED; 650 /* Initiate renegotiation. */ 651 sppp_lcp_open (sp); 652 break; 653 case LCP_TERM_ACK: 654 case LCP_CODE_REJ: 655 case LCP_PROTO_REJ: 656 /* Ignore for now. */ 657 break; 658 case LCP_DISC_REQ: 659 /* Discard the packet. */ 660 break; 661 case LCP_ECHO_REQ: 662 if (sp->lcp.state != LCP_STATE_OPENED) 663 break; 664 if (len < 8) { 665 if (sp->pp_flags & PP_DEBUG) 666 printk (KERN_WARNING "%s: invalid lcp echo request packet length: %d bytes\n", 667 dev->name, len); 668 break; 669 } 670 if (ntohl (*(__be32*)(h+1)) == sp->lcp.magic) { 671 /* Line loopback mode detected. */ 672 printk (KERN_WARNING "%s: loopback\n", dev->name); 673 if_down (dev); 674 675 /* Shut down the PPP link. */ 676 sp->lcp.state = LCP_STATE_CLOSED; 677 sp->ipcp.state = IPCP_STATE_CLOSED; 678 sppp_clear_timeout (sp); 679 /* Initiate negotiation. */ 680 sppp_lcp_open (sp); 681 break; 682 } 683 *(__be32 *)(h+1) = htonl (sp->lcp.magic); 684 sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REPLY, h->ident, len-4, h+1); 685 break; 686 case LCP_ECHO_REPLY: 687 if (h->ident != sp->lcp.echoid) 688 break; 689 if (len < 8) { 690 if (sp->pp_flags & PP_DEBUG) 691 printk (KERN_WARNING "%s: invalid lcp echo reply packet length: %d bytes\n", 692 dev->name, len); 693 break; 694 } 695 if (ntohl(*(__be32 *)(h+1)) != sp->lcp.magic) 696 sp->pp_alivecnt = 0; 697 break; 698 } 699} 700 701/* 702 * Handle incoming Cisco keepalive protocol packets. 703 */ 704 705static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb) 706{ 707 struct cisco_packet *h; 708 struct net_device *dev = sp->pp_if; 709 710 if (!pskb_may_pull(skb, sizeof(struct cisco_packet)) 711 || (skb->len != CISCO_PACKET_LEN 712 && skb->len != CISCO_BIG_PACKET_LEN)) { 713 if (sp->pp_flags & PP_DEBUG) 714 printk (KERN_WARNING "%s: invalid cisco packet length: %d bytes\n", 715 dev->name, skb->len); 716 return; 717 } 718 h = (struct cisco_packet *)skb->data; 719 skb_pull(skb, sizeof(struct cisco_packet*)); 720 if (sp->pp_flags & PP_DEBUG) 721 printk (KERN_WARNING "%s: cisco input: %d bytes <%xh %xh %xh %xh %xh-%xh>\n", 722 dev->name, skb->len, 723 ntohl (h->type), h->par1, h->par2, h->rel, 724 h->time0, h->time1); 725 switch (ntohl (h->type)) { 726 default: 727 if (sp->pp_flags & PP_DEBUG) 728 printk (KERN_WARNING "%s: unknown cisco packet type: 0x%x\n", 729 dev->name, ntohl (h->type)); 730 break; 731 case CISCO_ADDR_REPLY: 732 /* Reply on address request, ignore */ 733 break; 734 case CISCO_KEEPALIVE_REQ: 735 sp->pp_alivecnt = 0; 736 sp->pp_rseq = ntohl (h->par1); 737 if (sp->pp_seq == sp->pp_rseq) { 738 /* Local and remote sequence numbers are equal. 739 * Probably, the line is in loopback mode. */ 740 int newseq; 741 if (sp->pp_loopcnt >= MAXALIVECNT) { 742 printk (KERN_WARNING "%s: loopback\n", 743 dev->name); 744 sp->pp_loopcnt = 0; 745 if (dev->flags & IFF_UP) { 746 if_down (dev); 747 } 748 } 749 ++sp->pp_loopcnt; 750 751 /* Generate new local sequence number */ 752 get_random_bytes(&newseq, sizeof(newseq)); 753 sp->pp_seq ^= newseq; 754 break; 755 } 756 sp->pp_loopcnt = 0; 757 if (sp->pp_link_state==SPPP_LINK_DOWN && 758 (dev->flags & IFF_UP)) { 759 sp->pp_link_state=SPPP_LINK_UP; 760 printk (KERN_INFO "%s: protocol up\n", dev->name); 761 } 762 break; 763 case CISCO_ADDR_REQ: 764 /* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */ 765 { 766 struct in_device *in_dev; 767 struct in_ifaddr *ifa; 768 __be32 addr = 0, mask = htonl(~0U); /* FIXME: is the mask correct? */ 769#ifdef CONFIG_INET 770 rcu_read_lock(); 771 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) 772 { 773 for (ifa=in_dev->ifa_list; ifa != NULL; 774 ifa=ifa->ifa_next) { 775 if (strcmp(dev->name, ifa->ifa_label) == 0) 776 { 777 addr = ifa->ifa_local; 778 mask = ifa->ifa_mask; 779 break; 780 } 781 } 782 } 783 rcu_read_unlock(); 784#endif 785 sppp_cisco_send (sp, CISCO_ADDR_REPLY, ntohl(addr), ntohl(mask)); 786 break; 787 } 788 } 789} 790 791 792/* 793 * Send PPP LCP packet. 794 */ 795 796static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type, 797 u8 ident, u16 len, void *data) 798{ 799 struct ppp_header *h; 800 struct lcp_header *lh; 801 struct sk_buff *skb; 802 struct net_device *dev = sp->pp_if; 803 804 skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+LCP_HEADER_LEN+len, 805 GFP_ATOMIC); 806 if (skb==NULL) 807 return; 808 809 skb_reserve(skb,dev->hard_header_len); 810 811 h = (struct ppp_header *)skb_put(skb, sizeof(struct ppp_header)); 812 h->address = PPP_ALLSTATIONS; /* broadcast address */ 813 h->control = PPP_UI; /* Unnumbered Info */ 814 h->protocol = htons (proto); /* Link Control Protocol */ 815 816 lh = (struct lcp_header *)skb_put(skb, sizeof(struct lcp_header)); 817 lh->type = type; 818 lh->ident = ident; 819 lh->len = htons (LCP_HEADER_LEN + len); 820 821 if (len) 822 memcpy(skb_put(skb,len),data, len); 823 824 if (sp->pp_flags & PP_DEBUG) { 825 printk (KERN_WARNING "%s: %s output <%s id=%xh len=%xh", 826 dev->name, 827 proto==PPP_LCP ? "lcp" : "ipcp", 828 proto==PPP_LCP ? sppp_lcp_type_name (lh->type) : 829 sppp_ipcp_type_name (lh->type), lh->ident, 830 ntohs (lh->len)); 831 if (len) 832 sppp_print_bytes ((u8*) (lh+1), len); 833 printk (">\n"); 834 } 835 sp->obytes += skb->len; 836 /* Control is high priority so it doesn't get queued behind data */ 837 skb->priority=TC_PRIO_CONTROL; 838 skb->dev = dev; 839 skb_queue_tail(&tx_queue, skb); 840} 841 842/* 843 * Send Cisco keepalive packet. 844 */ 845 846static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2) 847{ 848 struct ppp_header *h; 849 struct cisco_packet *ch; 850 struct sk_buff *skb; 851 struct net_device *dev = sp->pp_if; 852 u32 t = jiffies * 1000/HZ; 853 854 skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+CISCO_PACKET_LEN, 855 GFP_ATOMIC); 856 857 if(skb==NULL) 858 return; 859 860 skb_reserve(skb, dev->hard_header_len); 861 h = (struct ppp_header *)skb_put (skb, sizeof(struct ppp_header)); 862 h->address = CISCO_MULTICAST; 863 h->control = 0; 864 h->protocol = htons (CISCO_KEEPALIVE); 865 866 ch = (struct cisco_packet*)skb_put(skb, CISCO_PACKET_LEN); 867 ch->type = htonl (type); 868 ch->par1 = htonl (par1); 869 ch->par2 = htonl (par2); 870 ch->rel = htons(0xffff); 871 ch->time0 = htons ((u16) (t >> 16)); 872 ch->time1 = htons ((u16) t); 873 874 if (sp->pp_flags & PP_DEBUG) 875 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n", 876 dev->name, ntohl (ch->type), ch->par1, 877 ch->par2, ch->rel, ch->time0, ch->time1); 878 sp->obytes += skb->len; 879 skb->priority=TC_PRIO_CONTROL; 880 skb->dev = dev; 881 skb_queue_tail(&tx_queue, skb); 882} 883 884/** 885 * sppp_close - close down a synchronous PPP or Cisco HDLC link 886 * @dev: The network device to drop the link of 887 * 888 * This drops the logical interface to the channel. It is not 889 * done politely as we assume we will also be dropping DTR. Any 890 * timeouts are killed. 891 */ 892 893int sppp_close (struct net_device *dev) 894{ 895 struct sppp *sp = (struct sppp *)sppp_of(dev); 896 unsigned long flags; 897 898 spin_lock_irqsave(&sp->lock, flags); 899 sp->pp_link_state = SPPP_LINK_DOWN; 900 sp->lcp.state = LCP_STATE_CLOSED; 901 sp->ipcp.state = IPCP_STATE_CLOSED; 902 sppp_clear_timeout (sp); 903 spin_unlock_irqrestore(&sp->lock, flags); 904 905 return 0; 906} 907 908EXPORT_SYMBOL(sppp_close); 909 910/** 911 * sppp_open - open a synchronous PPP or Cisco HDLC link 912 * @dev: Network device to activate 913 * 914 * Close down any existing synchronous session and commence 915 * from scratch. In the PPP case this means negotiating LCP/IPCP 916 * and friends, while for Cisco HDLC we simply need to start sending 917 * keepalives 918 */ 919 920int sppp_open (struct net_device *dev) 921{ 922 struct sppp *sp = (struct sppp *)sppp_of(dev); 923 unsigned long flags; 924 925 sppp_close(dev); 926 927 spin_lock_irqsave(&sp->lock, flags); 928 if (!(sp->pp_flags & PP_CISCO)) { 929 sppp_lcp_open (sp); 930 } 931 sp->pp_link_state = SPPP_LINK_DOWN; 932 spin_unlock_irqrestore(&sp->lock, flags); 933 sppp_flush_xmit(); 934 935 return 0; 936} 937 938EXPORT_SYMBOL(sppp_open); 939 940/** 941 * sppp_reopen - notify of physical link loss 942 * @dev: Device that lost the link 943 * 944 * This function informs the synchronous protocol code that 945 * the underlying link died (for example a carrier drop on X.21) 946 * 947 * We increment the magic numbers to ensure that if the other end 948 * failed to notice we will correctly start a new session. It happens 949 * do to the nature of telco circuits is that you can lose carrier on 950 * one endonly. 951 * 952 * Having done this we go back to negotiating. This function may 953 * be called from an interrupt context. 954 */ 955 956int sppp_reopen (struct net_device *dev) 957{ 958 struct sppp *sp = (struct sppp *)sppp_of(dev); 959 unsigned long flags; 960 961 sppp_close(dev); 962 963 spin_lock_irqsave(&sp->lock, flags); 964 if (!(sp->pp_flags & PP_CISCO)) 965 { 966 sp->lcp.magic = jiffies; 967 ++sp->pp_seq; 968 sp->lcp.state = LCP_STATE_CLOSED; 969 sp->ipcp.state = IPCP_STATE_CLOSED; 970 /* Give it a moment for the line to settle then go */ 971 sppp_set_timeout (sp, 1); 972 } 973 sp->pp_link_state=SPPP_LINK_DOWN; 974 spin_unlock_irqrestore(&sp->lock, flags); 975 976 return 0; 977} 978 979EXPORT_SYMBOL(sppp_reopen); 980 981/** 982 * sppp_change_mtu - Change the link MTU 983 * @dev: Device to change MTU on 984 * @new_mtu: New MTU 985 * 986 * Change the MTU on the link. This can only be called with 987 * the link down. It returns an error if the link is up or 988 * the mtu is out of range. 989 */ 990 991static int sppp_change_mtu(struct net_device *dev, int new_mtu) 992{ 993 if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP)) 994 return -EINVAL; 995 dev->mtu=new_mtu; 996 return 0; 997} 998 999/** 1000 * sppp_do_ioctl - Ioctl handler for ppp/hdlc 1001 * @dev: Device subject to ioctl 1002 * @ifr: Interface request block from the user 1003 * @cmd: Command that is being issued 1004 * 1005 * This function handles the ioctls that may be issued by the user 1006 * to control the settings of a PPP/HDLC link. It does both busy 1007 * and security checks. This function is intended to be wrapped by 1008 * callers who wish to add additional ioctl calls of their own. 1009 */ 1010 1011int sppp_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1012{ 1013 struct sppp *sp = (struct sppp *)sppp_of(dev); 1014 1015 if(dev->flags&IFF_UP) 1016 return -EBUSY; 1017 1018 if(!capable(CAP_NET_ADMIN)) 1019 return -EPERM; 1020 1021 switch(cmd) 1022 { 1023 case SPPPIOCCISCO: 1024 sp->pp_flags|=PP_CISCO; 1025 dev->type = ARPHRD_HDLC; 1026 break; 1027 case SPPPIOCPPP: 1028 sp->pp_flags&=~PP_CISCO; 1029 dev->type = ARPHRD_PPP; 1030 break; 1031 case SPPPIOCDEBUG: 1032 sp->pp_flags&=~PP_DEBUG; 1033 if(ifr->ifr_flags) 1034 sp->pp_flags|=PP_DEBUG; 1035 break; 1036 case SPPPIOCGFLAGS: 1037 if(copy_to_user(ifr->ifr_data, &sp->pp_flags, sizeof(sp->pp_flags))) 1038 return -EFAULT; 1039 break; 1040 case SPPPIOCSFLAGS: 1041 if(copy_from_user(&sp->pp_flags, ifr->ifr_data, sizeof(sp->pp_flags))) 1042 return -EFAULT; 1043 break; 1044 default: 1045 return -EINVAL; 1046 } 1047 return 0; 1048} 1049 1050EXPORT_SYMBOL(sppp_do_ioctl); 1051 1052/** 1053 * sppp_attach - attach synchronous PPP/HDLC to a device 1054 * @pd: PPP device to initialise 1055 * 1056 * This initialises the PPP/HDLC support on an interface. At the 1057 * time of calling the dev element must point to the network device 1058 * that this interface is attached to. The interface should not yet 1059 * be registered. 1060 */ 1061 1062void sppp_attach(struct ppp_device *pd) 1063{ 1064 struct net_device *dev = pd->dev; 1065 struct sppp *sp = &pd->sppp; 1066 unsigned long flags; 1067 1068 /* Make sure embedding is safe for sppp_of */ 1069 BUG_ON(sppp_of(dev) != sp); 1070 1071 spin_lock_irqsave(&spppq_lock, flags); 1072 /* Initialize keepalive handler. */ 1073 if (! spppq) 1074 { 1075 init_timer(&sppp_keepalive_timer); 1076 sppp_keepalive_timer.expires=jiffies+10*HZ; 1077 sppp_keepalive_timer.function=sppp_keepalive; 1078 add_timer(&sppp_keepalive_timer); 1079 } 1080 /* Insert new entry into the keepalive list. */ 1081 sp->pp_next = spppq; 1082 spppq = sp; 1083 spin_unlock_irqrestore(&spppq_lock, flags); 1084 1085 sp->pp_loopcnt = 0; 1086 sp->pp_alivecnt = 0; 1087 sp->pp_seq = 0; 1088 sp->pp_rseq = 0; 1089 sp->pp_flags = PP_KEEPALIVE|PP_CISCO|debug;/*PP_DEBUG;*/ 1090 sp->lcp.magic = 0; 1091 sp->lcp.state = LCP_STATE_CLOSED; 1092 sp->ipcp.state = IPCP_STATE_CLOSED; 1093 sp->pp_if = dev; 1094 spin_lock_init(&sp->lock); 1095 1096 /* 1097 * Device specific setup. All but interrupt handler and 1098 * hard_start_xmit. 1099 */ 1100 1101 dev->header_ops = &sppp_header_ops; 1102 1103 dev->tx_queue_len = 10; 1104 dev->type = ARPHRD_HDLC; 1105 dev->addr_len = 0; 1106 dev->hard_header_len = sizeof(struct ppp_header); 1107 dev->mtu = PPP_MTU; 1108 /* 1109 * These 4 are callers but MUST also call sppp_ functions 1110 */ 1111 dev->do_ioctl = sppp_do_ioctl; 1112#if 0 1113 dev->get_stats = NULL; /* Let the driver override these */ 1114 dev->open = sppp_open; 1115 dev->stop = sppp_close; 1116#endif 1117 dev->change_mtu = sppp_change_mtu; 1118 dev->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP; 1119} 1120 1121EXPORT_SYMBOL(sppp_attach); 1122 1123/** 1124 * sppp_detach - release PPP resources from a device 1125 * @dev: Network device to release 1126 * 1127 * Stop and free up any PPP/HDLC resources used by this 1128 * interface. This must be called before the device is 1129 * freed. 1130 */ 1131 1132void sppp_detach (struct net_device *dev) 1133{ 1134 struct sppp **q, *p, *sp = (struct sppp *)sppp_of(dev); 1135 unsigned long flags; 1136 1137 spin_lock_irqsave(&spppq_lock, flags); 1138 /* Remove the entry from the keepalive list. */ 1139 for (q = &spppq; (p = *q); q = &p->pp_next) 1140 if (p == sp) { 1141 *q = p->pp_next; 1142 break; 1143 } 1144 1145 /* Stop keepalive handler. */ 1146 if (! spppq) 1147 del_timer(&sppp_keepalive_timer); 1148 sppp_clear_timeout (sp); 1149 spin_unlock_irqrestore(&spppq_lock, flags); 1150} 1151 1152EXPORT_SYMBOL(sppp_detach); 1153 1154/* 1155 * Analyze the LCP Configure-Request options list 1156 * for the presence of unknown options. 1157 * If the request contains unknown options, build and 1158 * send Configure-reject packet, containing only unknown options. 1159 */ 1160static int 1161sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h, 1162 int len, u32 *magic) 1163{ 1164 u8 *buf, *r, *p; 1165 int rlen; 1166 1167 len -= 4; 1168 buf = r = kmalloc (len, GFP_ATOMIC); 1169 if (! buf) 1170 return (0); 1171 1172 p = (void*) (h+1); 1173 for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) { 1174 switch (*p) { 1175 case LCP_OPT_MAGIC: 1176 /* Magic number -- extract. */ 1177 if (len >= 6 && p[1] == 6) { 1178 *magic = (u32)p[2] << 24 | 1179 (u32)p[3] << 16 | p[4] << 8 | p[5]; 1180 continue; 1181 } 1182 break; 1183 case LCP_OPT_ASYNC_MAP: 1184 /* Async control character map -- check to be zero. */ 1185 if (len >= 6 && p[1] == 6 && ! p[2] && ! p[3] && 1186 ! p[4] && ! p[5]) 1187 continue; 1188 break; 1189 case LCP_OPT_MRU: 1190 /* Maximum receive unit -- always OK. */ 1191 continue; 1192 default: 1193 /* Others not supported. */ 1194 break; 1195 } 1196 /* Add the option to rejected list. */ 1197 memcpy(r, p, p[1]); 1198 r += p[1]; 1199 rlen += p[1]; 1200 } 1201 if (rlen) 1202 sppp_cp_send (sp, PPP_LCP, LCP_CONF_REJ, h->ident, rlen, buf); 1203 kfree(buf); 1204 return (rlen == 0); 1205} 1206 1207static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *skb) 1208{ 1209 struct lcp_header *h; 1210 struct net_device *dev = sp->pp_if; 1211 int len = skb->len; 1212 1213 if (!pskb_may_pull(skb, sizeof(struct lcp_header))) { 1214 if (sp->pp_flags & PP_DEBUG) 1215 printk (KERN_WARNING "%s: invalid ipcp packet length: %d bytes\n", 1216 dev->name, len); 1217 return; 1218 } 1219 h = (struct lcp_header *)skb->data; 1220 skb_pull(skb,sizeof(struct lcp_header)); 1221 if (sp->pp_flags & PP_DEBUG) { 1222 printk (KERN_WARNING "%s: ipcp input: %d bytes <%s id=%xh len=%xh", 1223 dev->name, len, 1224 sppp_ipcp_type_name (h->type), h->ident, ntohs (h->len)); 1225 if (len > 4) 1226 sppp_print_bytes ((u8*) (h+1), len-4); 1227 printk (">\n"); 1228 } 1229 if (len > ntohs (h->len)) 1230 len = ntohs (h->len); 1231 switch (h->type) { 1232 default: 1233 /* Unknown packet type -- send Code-Reject packet. */ 1234 sppp_cp_send (sp, PPP_IPCP, IPCP_CODE_REJ, ++sp->pp_seq, len, h); 1235 break; 1236 case IPCP_CONF_REQ: 1237 if (len < 4) { 1238 if (sp->pp_flags & PP_DEBUG) 1239 printk (KERN_WARNING "%s: invalid ipcp configure request packet length: %d bytes\n", 1240 dev->name, len); 1241 return; 1242 } 1243 if (len > 4) { 1244 sppp_cp_send (sp, PPP_IPCP, LCP_CONF_REJ, h->ident, 1245 len-4, h+1); 1246 1247 switch (sp->ipcp.state) { 1248 case IPCP_STATE_OPENED: 1249 /* Initiate renegotiation. */ 1250 sppp_ipcp_open (sp); 1251 /* fall through... */ 1252 case IPCP_STATE_ACK_SENT: 1253 /* Go to closed state. */ 1254 sp->ipcp.state = IPCP_STATE_CLOSED; 1255 } 1256 } else { 1257 /* Send Configure-Ack packet. */ 1258 sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_ACK, h->ident, 1259 0, NULL); 1260 /* Change the state. */ 1261 if (sp->ipcp.state == IPCP_STATE_ACK_RCVD) 1262 sp->ipcp.state = IPCP_STATE_OPENED; 1263 else 1264 sp->ipcp.state = IPCP_STATE_ACK_SENT; 1265 } 1266 break; 1267 case IPCP_CONF_ACK: 1268 if (h->ident != sp->ipcp.confid) 1269 break; 1270 sppp_clear_timeout (sp); 1271 switch (sp->ipcp.state) { 1272 case IPCP_STATE_CLOSED: 1273 sp->ipcp.state = IPCP_STATE_ACK_RCVD; 1274 sppp_set_timeout (sp, 5); 1275 break; 1276 case IPCP_STATE_ACK_SENT: 1277 sp->ipcp.state = IPCP_STATE_OPENED; 1278 break; 1279 } 1280 break; 1281 case IPCP_CONF_NAK: 1282 case IPCP_CONF_REJ: 1283 if (h->ident != sp->ipcp.confid) 1284 break; 1285 sppp_clear_timeout (sp); 1286 /* Initiate renegotiation. */ 1287 sppp_ipcp_open (sp); 1288 if (sp->ipcp.state != IPCP_STATE_ACK_SENT) 1289 /* Go to closed state. */ 1290 sp->ipcp.state = IPCP_STATE_CLOSED; 1291 break; 1292 case IPCP_TERM_REQ: 1293 /* Send Terminate-Ack packet. */ 1294 sppp_cp_send (sp, PPP_IPCP, IPCP_TERM_ACK, h->ident, 0, NULL); 1295 /* Go to closed state. */ 1296 sp->ipcp.state = IPCP_STATE_CLOSED; 1297 /* Initiate renegotiation. */ 1298 sppp_ipcp_open (sp); 1299 break; 1300 case IPCP_TERM_ACK: 1301 /* Ignore for now. */ 1302 case IPCP_CODE_REJ: 1303 /* Ignore for now. */ 1304 break; 1305 } 1306} 1307 1308static void sppp_lcp_open (struct sppp *sp) 1309{ 1310 char opt[6]; 1311 1312 if (! sp->lcp.magic) 1313 sp->lcp.magic = jiffies; 1314 opt[0] = LCP_OPT_MAGIC; 1315 opt[1] = sizeof (opt); 1316 opt[2] = sp->lcp.magic >> 24; 1317 opt[3] = sp->lcp.magic >> 16; 1318 opt[4] = sp->lcp.magic >> 8; 1319 opt[5] = sp->lcp.magic; 1320 sp->lcp.confid = ++sp->pp_seq; 1321 sppp_cp_send (sp, PPP_LCP, LCP_CONF_REQ, sp->lcp.confid, 1322 sizeof (opt), &opt); 1323 sppp_set_timeout (sp, 2); 1324} 1325 1326static void sppp_ipcp_open (struct sppp *sp) 1327{ 1328 sp->ipcp.confid = ++sp->pp_seq; 1329 sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_REQ, sp->ipcp.confid, 0, NULL); 1330 sppp_set_timeout (sp, 2); 1331} 1332 1333/* 1334 * Process PPP control protocol timeouts. 1335 */ 1336 1337static void sppp_cp_timeout (unsigned long arg) 1338{ 1339 struct sppp *sp = (struct sppp*) arg; 1340 unsigned long flags; 1341 1342 spin_lock_irqsave(&sp->lock, flags); 1343 1344 sp->pp_flags &= ~PP_TIMO; 1345 if (! (sp->pp_if->flags & IFF_UP) || (sp->pp_flags & PP_CISCO)) { 1346 spin_unlock_irqrestore(&sp->lock, flags); 1347 return; 1348 } 1349 switch (sp->lcp.state) { 1350 case LCP_STATE_CLOSED: 1351 /* No ACK for Configure-Request, retry. */ 1352 sppp_lcp_open (sp); 1353 break; 1354 case LCP_STATE_ACK_RCVD: 1355 /* ACK got, but no Configure-Request for peer, retry. */ 1356 sppp_lcp_open (sp); 1357 sp->lcp.state = LCP_STATE_CLOSED; 1358 break; 1359 case LCP_STATE_ACK_SENT: 1360 /* ACK sent but no ACK for Configure-Request, retry. */ 1361 sppp_lcp_open (sp); 1362 break; 1363 case LCP_STATE_OPENED: 1364 /* LCP is already OK, try IPCP. */ 1365 switch (sp->ipcp.state) { 1366 case IPCP_STATE_CLOSED: 1367 /* No ACK for Configure-Request, retry. */ 1368 sppp_ipcp_open (sp); 1369 break; 1370 case IPCP_STATE_ACK_RCVD: 1371 /* ACK got, but no Configure-Request for peer, retry. */ 1372 sppp_ipcp_open (sp); 1373 sp->ipcp.state = IPCP_STATE_CLOSED; 1374 break; 1375 case IPCP_STATE_ACK_SENT: 1376 /* ACK sent but no ACK for Configure-Request, retry. */ 1377 sppp_ipcp_open (sp); 1378 break; 1379 case IPCP_STATE_OPENED: 1380 /* IPCP is OK. */ 1381 break; 1382 } 1383 break; 1384 } 1385 spin_unlock_irqrestore(&sp->lock, flags); 1386 sppp_flush_xmit(); 1387} 1388 1389static char *sppp_lcp_type_name (u8 type) 1390{ 1391 static char buf [8]; 1392 switch (type) { 1393 case LCP_CONF_REQ: return ("conf-req"); 1394 case LCP_CONF_ACK: return ("conf-ack"); 1395 case LCP_CONF_NAK: return ("conf-nack"); 1396 case LCP_CONF_REJ: return ("conf-rej"); 1397 case LCP_TERM_REQ: return ("term-req"); 1398 case LCP_TERM_ACK: return ("term-ack"); 1399 case LCP_CODE_REJ: return ("code-rej"); 1400 case LCP_PROTO_REJ: return ("proto-rej"); 1401 case LCP_ECHO_REQ: return ("echo-req"); 1402 case LCP_ECHO_REPLY: return ("echo-reply"); 1403 case LCP_DISC_REQ: return ("discard-req"); 1404 } 1405 sprintf (buf, "%xh", type); 1406 return (buf); 1407} 1408 1409static char *sppp_ipcp_type_name (u8 type) 1410{ 1411 static char buf [8]; 1412 switch (type) { 1413 case IPCP_CONF_REQ: return ("conf-req"); 1414 case IPCP_CONF_ACK: return ("conf-ack"); 1415 case IPCP_CONF_NAK: return ("conf-nack"); 1416 case IPCP_CONF_REJ: return ("conf-rej"); 1417 case IPCP_TERM_REQ: return ("term-req"); 1418 case IPCP_TERM_ACK: return ("term-ack"); 1419 case IPCP_CODE_REJ: return ("code-rej"); 1420 } 1421 sprintf (buf, "%xh", type); 1422 return (buf); 1423} 1424 1425static void sppp_print_bytes (u_char *p, u16 len) 1426{ 1427 printk (" %x", *p++); 1428 while (--len > 0) 1429 printk ("-%x", *p++); 1430} 1431 1432/** 1433 * sppp_rcv - receive and process a WAN PPP frame 1434 * @skb: The buffer to process 1435 * @dev: The device it arrived on 1436 * @p: Unused 1437 * @orig_dev: Unused 1438 * 1439 * Protocol glue. This drives the deferred processing mode the poorer 1440 * cards use. This can be called directly by cards that do not have 1441 * timing constraints but is normally called from the network layer 1442 * after interrupt servicing to process frames queued via netif_rx. 1443 */ 1444 1445static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev) 1446{ 1447 if (dev_net(dev) != &init_net) { 1448 kfree_skb(skb); 1449 return 0; 1450 } 1451 1452 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 1453 return NET_RX_DROP; 1454 sppp_input(dev,skb); 1455 return 0; 1456} 1457 1458static struct packet_type sppp_packet_type = { 1459 .type = __constant_htons(ETH_P_WAN_PPP), 1460 .func = sppp_rcv, 1461}; 1462 1463static char banner[] __initdata = 1464 KERN_INFO "Cronyx Ltd, Synchronous PPP and CISCO HDLC (c) 1994\n" 1465 KERN_INFO "Linux port (c) 1998 Building Number Three Ltd & " 1466 "Jan \"Yenya\" Kasprzak.\n"; 1467 1468static int __init sync_ppp_init(void) 1469{ 1470 if(debug) 1471 debug=PP_DEBUG; 1472 printk(banner); 1473 skb_queue_head_init(&tx_queue); 1474 dev_add_pack(&sppp_packet_type); 1475 return 0; 1476} 1477 1478 1479static void __exit sync_ppp_cleanup(void) 1480{ 1481 dev_remove_pack(&sppp_packet_type); 1482} 1483 1484module_init(sync_ppp_init); 1485module_exit(sync_ppp_cleanup); 1486module_param(debug, int, 0); 1487MODULE_LICENSE("GPL"); 1488