at v2.6.13 677 lines 17 kB view raw
1/********************************************************************* 2 * 3 * sir_dev.c: irda sir network device 4 * 5 * Copyright (c) 2002 Martin Diehl 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as 9 * published by the Free Software Foundation; either version 2 of 10 * the License, or (at your option) any later version. 11 * 12 ********************************************************************/ 13 14#include <linux/module.h> 15#include <linux/kernel.h> 16#include <linux/init.h> 17#include <linux/smp_lock.h> 18#include <linux/delay.h> 19 20#include <net/irda/irda.h> 21#include <net/irda/wrapper.h> 22#include <net/irda/irda_device.h> 23 24#include "sir-dev.h" 25 26/***************************************************************************/ 27 28void sirdev_enable_rx(struct sir_dev *dev) 29{ 30 if (unlikely(atomic_read(&dev->enable_rx))) 31 return; 32 33 /* flush rx-buffer - should also help in case of problems with echo cancelation */ 34 dev->rx_buff.data = dev->rx_buff.head; 35 dev->rx_buff.len = 0; 36 dev->rx_buff.in_frame = FALSE; 37 dev->rx_buff.state = OUTSIDE_FRAME; 38 atomic_set(&dev->enable_rx, 1); 39} 40 41static int sirdev_is_receiving(struct sir_dev *dev) 42{ 43 if (!atomic_read(&dev->enable_rx)) 44 return 0; 45 46 return (dev->rx_buff.state != OUTSIDE_FRAME); 47} 48 49int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type) 50{ 51 int err; 52 53 IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __FUNCTION__, type); 54 55 err = sirdev_schedule_dongle_open(dev, type); 56 if (unlikely(err)) 57 return err; 58 down(&dev->fsm.sem); /* block until config change completed */ 59 err = dev->fsm.result; 60 up(&dev->fsm.sem); 61 return err; 62} 63 64/* used by dongle drivers for dongle programming */ 65 66int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len) 67{ 68 unsigned long flags; 69 int ret; 70 71 if (unlikely(len > dev->tx_buff.truesize)) 72 return -ENOSPC; 73 74 spin_lock_irqsave(&dev->tx_lock, flags); /* serialize with other tx operations */ 75 while (dev->tx_buff.len > 0) { /* wait until tx idle */ 76 spin_unlock_irqrestore(&dev->tx_lock, flags); 77 msleep(10); 78 spin_lock_irqsave(&dev->tx_lock, flags); 79 } 80 81 dev->tx_buff.data = dev->tx_buff.head; 82 memcpy(dev->tx_buff.data, buf, len); 83 dev->tx_buff.len = len; 84 85 ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); 86 if (ret > 0) { 87 IRDA_DEBUG(3, "%s(), raw-tx started\n", __FUNCTION__); 88 89 dev->tx_buff.data += ret; 90 dev->tx_buff.len -= ret; 91 dev->raw_tx = 1; 92 ret = len; /* all data is going to be sent */ 93 } 94 spin_unlock_irqrestore(&dev->tx_lock, flags); 95 return ret; 96} 97 98/* seems some dongle drivers may need this */ 99 100int sirdev_raw_read(struct sir_dev *dev, char *buf, int len) 101{ 102 int count; 103 104 if (atomic_read(&dev->enable_rx)) 105 return -EIO; /* fail if we expect irda-frames */ 106 107 count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len; 108 109 if (count > 0) { 110 memcpy(buf, dev->rx_buff.data, count); 111 dev->rx_buff.data += count; 112 dev->rx_buff.len -= count; 113 } 114 115 /* remaining stuff gets flushed when re-enabling normal rx */ 116 117 return count; 118} 119 120int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts) 121{ 122 int ret = -ENXIO; 123 if (dev->drv->set_dtr_rts != 0) 124 ret = dev->drv->set_dtr_rts(dev, dtr, rts); 125 return ret; 126} 127 128/**********************************************************************/ 129 130/* called from client driver - likely with bh-context - to indicate 131 * it made some progress with transmission. Hence we send the next 132 * chunk, if any, or complete the skb otherwise 133 */ 134 135void sirdev_write_complete(struct sir_dev *dev) 136{ 137 unsigned long flags; 138 struct sk_buff *skb; 139 int actual = 0; 140 int err; 141 142 spin_lock_irqsave(&dev->tx_lock, flags); 143 144 IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n", 145 __FUNCTION__, dev->tx_buff.len); 146 147 if (likely(dev->tx_buff.len > 0)) { 148 /* Write data left in transmit buffer */ 149 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); 150 151 if (likely(actual>0)) { 152 dev->tx_buff.data += actual; 153 dev->tx_buff.len -= actual; 154 } 155 else if (unlikely(actual<0)) { 156 /* could be dropped later when we have tx_timeout to recover */ 157 IRDA_ERROR("%s: drv->do_write failed (%d)\n", 158 __FUNCTION__, actual); 159 if ((skb=dev->tx_skb) != NULL) { 160 dev->tx_skb = NULL; 161 dev_kfree_skb_any(skb); 162 dev->stats.tx_errors++; 163 dev->stats.tx_dropped++; 164 } 165 dev->tx_buff.len = 0; 166 } 167 if (dev->tx_buff.len > 0) 168 goto done; /* more data to send later */ 169 } 170 171 if (unlikely(dev->raw_tx != 0)) { 172 /* in raw mode we are just done now after the buffer was sent 173 * completely. Since this was requested by some dongle driver 174 * running under the control of the irda-thread we must take 175 * care here not to re-enable the queue. The queue will be 176 * restarted when the irda-thread has completed the request. 177 */ 178 179 IRDA_DEBUG(3, "%s(), raw-tx done\n", __FUNCTION__); 180 dev->raw_tx = 0; 181 goto done; /* no post-frame handling in raw mode */ 182 } 183 184 /* we have finished now sending this skb. 185 * update statistics and free the skb. 186 * finally we check and trigger a pending speed change, if any. 187 * if not we switch to rx mode and wake the queue for further 188 * packets. 189 * note the scheduled speed request blocks until the lower 190 * client driver and the corresponding hardware has really 191 * finished sending all data (xmit fifo drained f.e.) 192 * before the speed change gets finally done and the queue 193 * re-activated. 194 */ 195 196 IRDA_DEBUG(5, "%s(), finished with frame!\n", __FUNCTION__); 197 198 if ((skb=dev->tx_skb) != NULL) { 199 dev->tx_skb = NULL; 200 dev->stats.tx_packets++; 201 dev->stats.tx_bytes += skb->len; 202 dev_kfree_skb_any(skb); 203 } 204 205 if (unlikely(dev->new_speed > 0)) { 206 IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__); 207 err = sirdev_schedule_speed(dev, dev->new_speed); 208 if (unlikely(err)) { 209 /* should never happen 210 * forget the speed change and hope the stack recovers 211 */ 212 IRDA_ERROR("%s - schedule speed change failed: %d\n", 213 __FUNCTION__, err); 214 netif_wake_queue(dev->netdev); 215 } 216 /* else: success 217 * speed change in progress now 218 * on completion dev->new_speed gets cleared, 219 * rx-reenabled and the queue restarted 220 */ 221 } 222 else { 223 sirdev_enable_rx(dev); 224 netif_wake_queue(dev->netdev); 225 } 226 227done: 228 spin_unlock_irqrestore(&dev->tx_lock, flags); 229} 230 231/* called from client driver - likely with bh-context - to give us 232 * some more received bytes. We put them into the rx-buffer, 233 * normally unwrapping and building LAP-skb's (unless rx disabled) 234 */ 235 236int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) 237{ 238 if (!dev || !dev->netdev) { 239 IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__); 240 return -1; 241 } 242 243 if (!dev->irlap) { 244 IRDA_WARNING("%s - too early: %p / %zd!\n", 245 __FUNCTION__, cp, count); 246 return -1; 247 } 248 249 if (cp==NULL) { 250 /* error already at lower level receive 251 * just update stats and set media busy 252 */ 253 irda_device_set_media_busy(dev->netdev, TRUE); 254 dev->stats.rx_dropped++; 255 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count); 256 return 0; 257 } 258 259 /* Read the characters into the buffer */ 260 if (likely(atomic_read(&dev->enable_rx))) { 261 while (count--) 262 /* Unwrap and destuff one byte */ 263 async_unwrap_char(dev->netdev, &dev->stats, 264 &dev->rx_buff, *cp++); 265 } else { 266 while (count--) { 267 /* rx not enabled: save the raw bytes and never 268 * trigger any netif_rx. The received bytes are flushed 269 * later when we re-enable rx but might be read meanwhile 270 * by the dongle driver. 271 */ 272 dev->rx_buff.data[dev->rx_buff.len++] = *cp++; 273 274 /* What should we do when the buffer is full? */ 275 if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize)) 276 dev->rx_buff.len = 0; 277 } 278 } 279 280 return 0; 281} 282 283/**********************************************************************/ 284 285/* callbacks from network layer */ 286 287static struct net_device_stats *sirdev_get_stats(struct net_device *ndev) 288{ 289 struct sir_dev *dev = ndev->priv; 290 291 return (dev) ? &dev->stats : NULL; 292} 293 294static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev) 295{ 296 struct sir_dev *dev = ndev->priv; 297 unsigned long flags; 298 int actual = 0; 299 int err; 300 s32 speed; 301 302 IRDA_ASSERT(dev != NULL, return 0;); 303 304 netif_stop_queue(ndev); 305 306 IRDA_DEBUG(3, "%s(), skb->len = %d\n", __FUNCTION__, skb->len); 307 308 speed = irda_get_next_speed(skb); 309 if ((speed != dev->speed) && (speed != -1)) { 310 if (!skb->len) { 311 err = sirdev_schedule_speed(dev, speed); 312 if (unlikely(err == -EWOULDBLOCK)) { 313 /* Failed to initiate the speed change, likely the fsm 314 * is still busy (pretty unlikely, but...) 315 * We refuse to accept the skb and return with the queue 316 * stopped so the network layer will retry after the 317 * fsm completes and wakes the queue. 318 */ 319 return 1; 320 } 321 else if (unlikely(err)) { 322 /* other fatal error - forget the speed change and 323 * hope the stack will recover somehow 324 */ 325 netif_start_queue(ndev); 326 } 327 /* else: success 328 * speed change in progress now 329 * on completion the queue gets restarted 330 */ 331 332 dev_kfree_skb_any(skb); 333 return 0; 334 } else 335 dev->new_speed = speed; 336 } 337 338 /* Init tx buffer*/ 339 dev->tx_buff.data = dev->tx_buff.head; 340 341 /* Check problems */ 342 if(spin_is_locked(&dev->tx_lock)) { 343 IRDA_DEBUG(3, "%s(), write not completed\n", __FUNCTION__); 344 } 345 346 /* serialize with write completion */ 347 spin_lock_irqsave(&dev->tx_lock, flags); 348 349 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */ 350 dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize); 351 352 /* transmission will start now - disable receive. 353 * if we are just in the middle of an incoming frame, 354 * treat it as collision. probably it's a good idea to 355 * reset the rx_buf OUTSIDE_FRAME in this case too? 356 */ 357 atomic_set(&dev->enable_rx, 0); 358 if (unlikely(sirdev_is_receiving(dev))) 359 dev->stats.collisions++; 360 361 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); 362 363 if (likely(actual > 0)) { 364 dev->tx_skb = skb; 365 ndev->trans_start = jiffies; 366 dev->tx_buff.data += actual; 367 dev->tx_buff.len -= actual; 368 } 369 else if (unlikely(actual < 0)) { 370 /* could be dropped later when we have tx_timeout to recover */ 371 IRDA_ERROR("%s: drv->do_write failed (%d)\n", 372 __FUNCTION__, actual); 373 dev_kfree_skb_any(skb); 374 dev->stats.tx_errors++; 375 dev->stats.tx_dropped++; 376 netif_wake_queue(ndev); 377 } 378 spin_unlock_irqrestore(&dev->tx_lock, flags); 379 380 return 0; 381} 382 383/* called from network layer with rtnl hold */ 384 385static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 386{ 387 struct if_irda_req *irq = (struct if_irda_req *) rq; 388 struct sir_dev *dev = ndev->priv; 389 int ret = 0; 390 391 IRDA_ASSERT(dev != NULL, return -1;); 392 393 IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, ndev->name, cmd); 394 395 switch (cmd) { 396 case SIOCSBANDWIDTH: /* Set bandwidth */ 397 if (!capable(CAP_NET_ADMIN)) 398 ret = -EPERM; 399 else 400 ret = sirdev_schedule_speed(dev, irq->ifr_baudrate); 401 /* cannot sleep here for completion 402 * we are called from network layer with rtnl hold 403 */ 404 break; 405 406 case SIOCSDONGLE: /* Set dongle */ 407 if (!capable(CAP_NET_ADMIN)) 408 ret = -EPERM; 409 else 410 ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle); 411 /* cannot sleep here for completion 412 * we are called from network layer with rtnl hold 413 */ 414 break; 415 416 case SIOCSMEDIABUSY: /* Set media busy */ 417 if (!capable(CAP_NET_ADMIN)) 418 ret = -EPERM; 419 else 420 irda_device_set_media_busy(dev->netdev, TRUE); 421 break; 422 423 case SIOCGRECEIVING: /* Check if we are receiving right now */ 424 irq->ifr_receiving = sirdev_is_receiving(dev); 425 break; 426 427 case SIOCSDTRRTS: 428 if (!capable(CAP_NET_ADMIN)) 429 ret = -EPERM; 430 else 431 ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts); 432 /* cannot sleep here for completion 433 * we are called from network layer with rtnl hold 434 */ 435 break; 436 437 case SIOCSMODE: 438#if 0 439 if (!capable(CAP_NET_ADMIN)) 440 ret = -EPERM; 441 else 442 ret = sirdev_schedule_mode(dev, irq->ifr_mode); 443 /* cannot sleep here for completion 444 * we are called from network layer with rtnl hold 445 */ 446 break; 447#endif 448 default: 449 ret = -EOPNOTSUPP; 450 } 451 452 return ret; 453} 454 455/* ----------------------------------------------------------------------------- */ 456 457#define SIRBUF_ALLOCSIZE 4269 /* worst case size of a wrapped IrLAP frame */ 458 459static int sirdev_alloc_buffers(struct sir_dev *dev) 460{ 461 dev->tx_buff.truesize = SIRBUF_ALLOCSIZE; 462 dev->rx_buff.truesize = IRDA_SKB_MAX_MTU; 463 464 /* Bootstrap ZeroCopy Rx */ 465 dev->rx_buff.skb = __dev_alloc_skb(dev->rx_buff.truesize, GFP_KERNEL); 466 if (dev->rx_buff.skb == NULL) 467 return -ENOMEM; 468 skb_reserve(dev->rx_buff.skb, 1); 469 dev->rx_buff.head = dev->rx_buff.skb->data; 470 471 dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL); 472 if (dev->tx_buff.head == NULL) { 473 kfree_skb(dev->rx_buff.skb); 474 dev->rx_buff.skb = NULL; 475 dev->rx_buff.head = NULL; 476 return -ENOMEM; 477 } 478 479 dev->tx_buff.data = dev->tx_buff.head; 480 dev->rx_buff.data = dev->rx_buff.head; 481 dev->tx_buff.len = 0; 482 dev->rx_buff.len = 0; 483 484 dev->rx_buff.in_frame = FALSE; 485 dev->rx_buff.state = OUTSIDE_FRAME; 486 return 0; 487}; 488 489static void sirdev_free_buffers(struct sir_dev *dev) 490{ 491 if (dev->rx_buff.skb) 492 kfree_skb(dev->rx_buff.skb); 493 if (dev->tx_buff.head) 494 kfree(dev->tx_buff.head); 495 dev->rx_buff.head = dev->tx_buff.head = NULL; 496 dev->rx_buff.skb = NULL; 497} 498 499static int sirdev_open(struct net_device *ndev) 500{ 501 struct sir_dev *dev = ndev->priv; 502 const struct sir_driver *drv = dev->drv; 503 504 if (!drv) 505 return -ENODEV; 506 507 /* increase the reference count of the driver module before doing serious stuff */ 508 if (!try_module_get(drv->owner)) 509 return -ESTALE; 510 511 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 512 513 if (sirdev_alloc_buffers(dev)) 514 goto errout_dec; 515 516 if (!dev->drv->start_dev || dev->drv->start_dev(dev)) 517 goto errout_free; 518 519 sirdev_enable_rx(dev); 520 dev->raw_tx = 0; 521 522 netif_start_queue(ndev); 523 dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname); 524 if (!dev->irlap) 525 goto errout_stop; 526 527 netif_wake_queue(ndev); 528 529 IRDA_DEBUG(2, "%s - done, speed = %d\n", __FUNCTION__, dev->speed); 530 531 return 0; 532 533errout_stop: 534 atomic_set(&dev->enable_rx, 0); 535 if (dev->drv->stop_dev) 536 dev->drv->stop_dev(dev); 537errout_free: 538 sirdev_free_buffers(dev); 539errout_dec: 540 module_put(drv->owner); 541 return -EAGAIN; 542} 543 544static int sirdev_close(struct net_device *ndev) 545{ 546 struct sir_dev *dev = ndev->priv; 547 const struct sir_driver *drv; 548 549// IRDA_DEBUG(0, "%s\n", __FUNCTION__); 550 551 netif_stop_queue(ndev); 552 553 down(&dev->fsm.sem); /* block on pending config completion */ 554 555 atomic_set(&dev->enable_rx, 0); 556 557 if (unlikely(!dev->irlap)) 558 goto out; 559 irlap_close(dev->irlap); 560 dev->irlap = NULL; 561 562 drv = dev->drv; 563 if (unlikely(!drv || !dev->priv)) 564 goto out; 565 566 if (drv->stop_dev) 567 drv->stop_dev(dev); 568 569 sirdev_free_buffers(dev); 570 module_put(drv->owner); 571 572out: 573 dev->speed = 0; 574 up(&dev->fsm.sem); 575 return 0; 576} 577 578/* ----------------------------------------------------------------------------- */ 579 580struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name) 581{ 582 struct net_device *ndev; 583 struct sir_dev *dev; 584 585 IRDA_DEBUG(0, "%s - %s\n", __FUNCTION__, name); 586 587 /* instead of adding tests to protect against drv->do_write==NULL 588 * at several places we refuse to create a sir_dev instance for 589 * drivers which don't implement do_write. 590 */ 591 if (!drv || !drv->do_write) 592 return NULL; 593 594 /* 595 * Allocate new instance of the device 596 */ 597 ndev = alloc_irdadev(sizeof(*dev)); 598 if (ndev == NULL) { 599 IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __FUNCTION__); 600 goto out; 601 } 602 dev = ndev->priv; 603 604 irda_init_max_qos_capabilies(&dev->qos); 605 dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; 606 dev->qos.min_turn_time.bits = drv->qos_mtt_bits; 607 irda_qos_bits_to_value(&dev->qos); 608 609 strncpy(dev->hwname, name, sizeof(dev->hwname)-1); 610 611 atomic_set(&dev->enable_rx, 0); 612 dev->tx_skb = NULL; 613 614 spin_lock_init(&dev->tx_lock); 615 init_MUTEX(&dev->fsm.sem); 616 617 INIT_LIST_HEAD(&dev->fsm.rq.lh_request); 618 dev->fsm.rq.pending = 0; 619 init_timer(&dev->fsm.rq.timer); 620 621 dev->drv = drv; 622 dev->netdev = ndev; 623 624 SET_MODULE_OWNER(ndev); 625 626 /* Override the network functions we need to use */ 627 ndev->hard_start_xmit = sirdev_hard_xmit; 628 ndev->open = sirdev_open; 629 ndev->stop = sirdev_close; 630 ndev->get_stats = sirdev_get_stats; 631 ndev->do_ioctl = sirdev_ioctl; 632 633 if (register_netdev(ndev)) { 634 IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__); 635 goto out_freenetdev; 636 } 637 638 return dev; 639 640out_freenetdev: 641 free_netdev(ndev); 642out: 643 return NULL; 644} 645 646int sirdev_put_instance(struct sir_dev *dev) 647{ 648 int err = 0; 649 650 IRDA_DEBUG(0, "%s\n", __FUNCTION__); 651 652 atomic_set(&dev->enable_rx, 0); 653 654 netif_carrier_off(dev->netdev); 655 netif_device_detach(dev->netdev); 656 657 if (dev->dongle_drv) 658 err = sirdev_schedule_dongle_close(dev); 659 if (err) 660 IRDA_ERROR("%s - error %d\n", __FUNCTION__, err); 661 662 sirdev_close(dev->netdev); 663 664 down(&dev->fsm.sem); 665 dev->fsm.state = SIRDEV_STATE_DEAD; /* mark staled */ 666 dev->dongle_drv = NULL; 667 dev->priv = NULL; 668 up(&dev->fsm.sem); 669 670 /* Remove netdevice */ 671 unregister_netdev(dev->netdev); 672 673 free_netdev(dev->netdev); 674 675 return 0; 676} 677