at v3.1 1000 lines 25 kB view raw
1/* 2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack 3 * 4 * Copyright (C) 2003-2005,2008 David Brownell 5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger 6 * Copyright (C) 2008 Nokia Corporation 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 */ 22 23/* #define VERBOSE_DEBUG */ 24 25#include <linux/kernel.h> 26#include <linux/gfp.h> 27#include <linux/device.h> 28#include <linux/ctype.h> 29#include <linux/etherdevice.h> 30#include <linux/ethtool.h> 31 32#include "u_ether.h" 33 34 35/* 36 * This component encapsulates the Ethernet link glue needed to provide 37 * one (!) network link through the USB gadget stack, normally "usb0". 38 * 39 * The control and data models are handled by the function driver which 40 * connects to this code; such as CDC Ethernet (ECM or EEM), 41 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint 42 * management. 43 * 44 * Link level addressing is handled by this component using module 45 * parameters; if no such parameters are provided, random link level 46 * addresses are used. Each end of the link uses one address. The 47 * host end address is exported in various ways, and is often recorded 48 * in configuration databases. 49 * 50 * The driver which assembles each configuration using such a link is 51 * responsible for ensuring that each configuration includes at most one 52 * instance of is network link. (The network layer provides ways for 53 * this single "physical" link to be used by multiple virtual links.) 54 */ 55 56#define UETH__VERSION "29-May-2008" 57 58struct eth_dev { 59 /* lock is held while accessing port_usb 60 * or updating its backlink port_usb->ioport 61 */ 62 spinlock_t lock; 63 struct gether *port_usb; 64 65 struct net_device *net; 66 struct usb_gadget *gadget; 67 68 spinlock_t req_lock; /* guard {rx,tx}_reqs */ 69 struct list_head tx_reqs, rx_reqs; 70 atomic_t tx_qlen; 71 72 struct sk_buff_head rx_frames; 73 74 unsigned header_len; 75 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); 76 int (*unwrap)(struct gether *, 77 struct sk_buff *skb, 78 struct sk_buff_head *list); 79 80 struct work_struct work; 81 82 unsigned long todo; 83#define WORK_RX_MEMORY 0 84 85 bool zlp; 86 u8 host_mac[ETH_ALEN]; 87}; 88 89/*-------------------------------------------------------------------------*/ 90 91#define RX_EXTRA 20 /* bytes guarding against rx overflows */ 92 93#define DEFAULT_QLEN 2 /* double buffering by default */ 94 95 96#ifdef CONFIG_USB_GADGET_DUALSPEED 97 98static unsigned qmult = 5; 99module_param(qmult, uint, S_IRUGO|S_IWUSR); 100MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed"); 101 102#else /* full speed (low speed doesn't do bulk) */ 103#define qmult 1 104#endif 105 106/* for dual-speed hardware, use deeper queues at high/super speed */ 107static inline int qlen(struct usb_gadget *gadget) 108{ 109 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || 110 gadget->speed == USB_SPEED_SUPER)) 111 return qmult * DEFAULT_QLEN; 112 else 113 return DEFAULT_QLEN; 114} 115 116/*-------------------------------------------------------------------------*/ 117 118/* REVISIT there must be a better way than having two sets 119 * of debug calls ... 120 */ 121 122#undef DBG 123#undef VDBG 124#undef ERROR 125#undef INFO 126 127#define xprintk(d, level, fmt, args...) \ 128 printk(level "%s: " fmt , (d)->net->name , ## args) 129 130#ifdef DEBUG 131#undef DEBUG 132#define DBG(dev, fmt, args...) \ 133 xprintk(dev , KERN_DEBUG , fmt , ## args) 134#else 135#define DBG(dev, fmt, args...) \ 136 do { } while (0) 137#endif /* DEBUG */ 138 139#ifdef VERBOSE_DEBUG 140#define VDBG DBG 141#else 142#define VDBG(dev, fmt, args...) \ 143 do { } while (0) 144#endif /* DEBUG */ 145 146#define ERROR(dev, fmt, args...) \ 147 xprintk(dev , KERN_ERR , fmt , ## args) 148#define INFO(dev, fmt, args...) \ 149 xprintk(dev , KERN_INFO , fmt , ## args) 150 151/*-------------------------------------------------------------------------*/ 152 153/* NETWORK DRIVER HOOKUP (to the layer above this driver) */ 154 155static int ueth_change_mtu(struct net_device *net, int new_mtu) 156{ 157 struct eth_dev *dev = netdev_priv(net); 158 unsigned long flags; 159 int status = 0; 160 161 /* don't change MTU on "live" link (peer won't know) */ 162 spin_lock_irqsave(&dev->lock, flags); 163 if (dev->port_usb) 164 status = -EBUSY; 165 else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN) 166 status = -ERANGE; 167 else 168 net->mtu = new_mtu; 169 spin_unlock_irqrestore(&dev->lock, flags); 170 171 return status; 172} 173 174static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) 175{ 176 struct eth_dev *dev = netdev_priv(net); 177 178 strlcpy(p->driver, "g_ether", sizeof p->driver); 179 strlcpy(p->version, UETH__VERSION, sizeof p->version); 180 strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version); 181 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info); 182} 183 184/* REVISIT can also support: 185 * - WOL (by tracking suspends and issuing remote wakeup) 186 * - msglevel (implies updated messaging) 187 * - ... probably more ethtool ops 188 */ 189 190static const struct ethtool_ops ops = { 191 .get_drvinfo = eth_get_drvinfo, 192 .get_link = ethtool_op_get_link, 193}; 194 195static void defer_kevent(struct eth_dev *dev, int flag) 196{ 197 if (test_and_set_bit(flag, &dev->todo)) 198 return; 199 if (!schedule_work(&dev->work)) 200 ERROR(dev, "kevent %d may have been dropped\n", flag); 201 else 202 DBG(dev, "kevent %d scheduled\n", flag); 203} 204 205static void rx_complete(struct usb_ep *ep, struct usb_request *req); 206 207static int 208rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) 209{ 210 struct sk_buff *skb; 211 int retval = -ENOMEM; 212 size_t size = 0; 213 struct usb_ep *out; 214 unsigned long flags; 215 216 spin_lock_irqsave(&dev->lock, flags); 217 if (dev->port_usb) 218 out = dev->port_usb->out_ep; 219 else 220 out = NULL; 221 spin_unlock_irqrestore(&dev->lock, flags); 222 223 if (!out) 224 return -ENOTCONN; 225 226 227 /* Padding up to RX_EXTRA handles minor disagreements with host. 228 * Normally we use the USB "terminate on short read" convention; 229 * so allow up to (N*maxpacket), since that memory is normally 230 * already allocated. Some hardware doesn't deal well with short 231 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a 232 * byte off the end (to force hardware errors on overflow). 233 * 234 * RNDIS uses internal framing, and explicitly allows senders to 235 * pad to end-of-packet. That's potentially nice for speed, but 236 * means receivers can't recover lost synch on their own (because 237 * new packets don't only start after a short RX). 238 */ 239 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; 240 size += dev->port_usb->header_len; 241 size += out->maxpacket - 1; 242 size -= size % out->maxpacket; 243 244 if (dev->port_usb->is_fixed) 245 size = max_t(size_t, size, dev->port_usb->fixed_out_len); 246 247 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); 248 if (skb == NULL) { 249 DBG(dev, "no rx skb\n"); 250 goto enomem; 251 } 252 253 /* Some platforms perform better when IP packets are aligned, 254 * but on at least one, checksumming fails otherwise. Note: 255 * RNDIS headers involve variable numbers of LE32 values. 256 */ 257 skb_reserve(skb, NET_IP_ALIGN); 258 259 req->buf = skb->data; 260 req->length = size; 261 req->complete = rx_complete; 262 req->context = skb; 263 264 retval = usb_ep_queue(out, req, gfp_flags); 265 if (retval == -ENOMEM) 266enomem: 267 defer_kevent(dev, WORK_RX_MEMORY); 268 if (retval) { 269 DBG(dev, "rx submit --> %d\n", retval); 270 if (skb) 271 dev_kfree_skb_any(skb); 272 spin_lock_irqsave(&dev->req_lock, flags); 273 list_add(&req->list, &dev->rx_reqs); 274 spin_unlock_irqrestore(&dev->req_lock, flags); 275 } 276 return retval; 277} 278 279static void rx_complete(struct usb_ep *ep, struct usb_request *req) 280{ 281 struct sk_buff *skb = req->context, *skb2; 282 struct eth_dev *dev = ep->driver_data; 283 int status = req->status; 284 285 switch (status) { 286 287 /* normal completion */ 288 case 0: 289 skb_put(skb, req->actual); 290 291 if (dev->unwrap) { 292 unsigned long flags; 293 294 spin_lock_irqsave(&dev->lock, flags); 295 if (dev->port_usb) { 296 status = dev->unwrap(dev->port_usb, 297 skb, 298 &dev->rx_frames); 299 } else { 300 dev_kfree_skb_any(skb); 301 status = -ENOTCONN; 302 } 303 spin_unlock_irqrestore(&dev->lock, flags); 304 } else { 305 skb_queue_tail(&dev->rx_frames, skb); 306 } 307 skb = NULL; 308 309 skb2 = skb_dequeue(&dev->rx_frames); 310 while (skb2) { 311 if (status < 0 312 || ETH_HLEN > skb2->len 313 || skb2->len > ETH_FRAME_LEN) { 314 dev->net->stats.rx_errors++; 315 dev->net->stats.rx_length_errors++; 316 DBG(dev, "rx length %d\n", skb2->len); 317 dev_kfree_skb_any(skb2); 318 goto next_frame; 319 } 320 skb2->protocol = eth_type_trans(skb2, dev->net); 321 dev->net->stats.rx_packets++; 322 dev->net->stats.rx_bytes += skb2->len; 323 324 /* no buffer copies needed, unless hardware can't 325 * use skb buffers. 326 */ 327 status = netif_rx(skb2); 328next_frame: 329 skb2 = skb_dequeue(&dev->rx_frames); 330 } 331 break; 332 333 /* software-driven interface shutdown */ 334 case -ECONNRESET: /* unlink */ 335 case -ESHUTDOWN: /* disconnect etc */ 336 VDBG(dev, "rx shutdown, code %d\n", status); 337 goto quiesce; 338 339 /* for hardware automagic (such as pxa) */ 340 case -ECONNABORTED: /* endpoint reset */ 341 DBG(dev, "rx %s reset\n", ep->name); 342 defer_kevent(dev, WORK_RX_MEMORY); 343quiesce: 344 dev_kfree_skb_any(skb); 345 goto clean; 346 347 /* data overrun */ 348 case -EOVERFLOW: 349 dev->net->stats.rx_over_errors++; 350 /* FALLTHROUGH */ 351 352 default: 353 dev->net->stats.rx_errors++; 354 DBG(dev, "rx status %d\n", status); 355 break; 356 } 357 358 if (skb) 359 dev_kfree_skb_any(skb); 360 if (!netif_running(dev->net)) { 361clean: 362 spin_lock(&dev->req_lock); 363 list_add(&req->list, &dev->rx_reqs); 364 spin_unlock(&dev->req_lock); 365 req = NULL; 366 } 367 if (req) 368 rx_submit(dev, req, GFP_ATOMIC); 369} 370 371static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) 372{ 373 unsigned i; 374 struct usb_request *req; 375 376 if (!n) 377 return -ENOMEM; 378 379 /* queue/recycle up to N requests */ 380 i = n; 381 list_for_each_entry(req, list, list) { 382 if (i-- == 0) 383 goto extra; 384 } 385 while (i--) { 386 req = usb_ep_alloc_request(ep, GFP_ATOMIC); 387 if (!req) 388 return list_empty(list) ? -ENOMEM : 0; 389 list_add(&req->list, list); 390 } 391 return 0; 392 393extra: 394 /* free extras */ 395 for (;;) { 396 struct list_head *next; 397 398 next = req->list.next; 399 list_del(&req->list); 400 usb_ep_free_request(ep, req); 401 402 if (next == list) 403 break; 404 405 req = container_of(next, struct usb_request, list); 406 } 407 return 0; 408} 409 410static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) 411{ 412 int status; 413 414 spin_lock(&dev->req_lock); 415 status = prealloc(&dev->tx_reqs, link->in_ep, n); 416 if (status < 0) 417 goto fail; 418 status = prealloc(&dev->rx_reqs, link->out_ep, n); 419 if (status < 0) 420 goto fail; 421 goto done; 422fail: 423 DBG(dev, "can't alloc requests\n"); 424done: 425 spin_unlock(&dev->req_lock); 426 return status; 427} 428 429static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) 430{ 431 struct usb_request *req; 432 unsigned long flags; 433 434 /* fill unused rxq slots with some skb */ 435 spin_lock_irqsave(&dev->req_lock, flags); 436 while (!list_empty(&dev->rx_reqs)) { 437 req = container_of(dev->rx_reqs.next, 438 struct usb_request, list); 439 list_del_init(&req->list); 440 spin_unlock_irqrestore(&dev->req_lock, flags); 441 442 if (rx_submit(dev, req, gfp_flags) < 0) { 443 defer_kevent(dev, WORK_RX_MEMORY); 444 return; 445 } 446 447 spin_lock_irqsave(&dev->req_lock, flags); 448 } 449 spin_unlock_irqrestore(&dev->req_lock, flags); 450} 451 452static void eth_work(struct work_struct *work) 453{ 454 struct eth_dev *dev = container_of(work, struct eth_dev, work); 455 456 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { 457 if (netif_running(dev->net)) 458 rx_fill(dev, GFP_KERNEL); 459 } 460 461 if (dev->todo) 462 DBG(dev, "work done, flags = 0x%lx\n", dev->todo); 463} 464 465static void tx_complete(struct usb_ep *ep, struct usb_request *req) 466{ 467 struct sk_buff *skb = req->context; 468 struct eth_dev *dev = ep->driver_data; 469 470 switch (req->status) { 471 default: 472 dev->net->stats.tx_errors++; 473 VDBG(dev, "tx err %d\n", req->status); 474 /* FALLTHROUGH */ 475 case -ECONNRESET: /* unlink */ 476 case -ESHUTDOWN: /* disconnect etc */ 477 break; 478 case 0: 479 dev->net->stats.tx_bytes += skb->len; 480 } 481 dev->net->stats.tx_packets++; 482 483 spin_lock(&dev->req_lock); 484 list_add(&req->list, &dev->tx_reqs); 485 spin_unlock(&dev->req_lock); 486 dev_kfree_skb_any(skb); 487 488 atomic_dec(&dev->tx_qlen); 489 if (netif_carrier_ok(dev->net)) 490 netif_wake_queue(dev->net); 491} 492 493static inline int is_promisc(u16 cdc_filter) 494{ 495 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; 496} 497 498static netdev_tx_t eth_start_xmit(struct sk_buff *skb, 499 struct net_device *net) 500{ 501 struct eth_dev *dev = netdev_priv(net); 502 int length = skb->len; 503 int retval; 504 struct usb_request *req = NULL; 505 unsigned long flags; 506 struct usb_ep *in; 507 u16 cdc_filter; 508 509 spin_lock_irqsave(&dev->lock, flags); 510 if (dev->port_usb) { 511 in = dev->port_usb->in_ep; 512 cdc_filter = dev->port_usb->cdc_filter; 513 } else { 514 in = NULL; 515 cdc_filter = 0; 516 } 517 spin_unlock_irqrestore(&dev->lock, flags); 518 519 if (!in) { 520 dev_kfree_skb_any(skb); 521 return NETDEV_TX_OK; 522 } 523 524 /* apply outgoing CDC or RNDIS filters */ 525 if (!is_promisc(cdc_filter)) { 526 u8 *dest = skb->data; 527 528 if (is_multicast_ether_addr(dest)) { 529 u16 type; 530 531 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host 532 * SET_ETHERNET_MULTICAST_FILTERS requests 533 */ 534 if (is_broadcast_ether_addr(dest)) 535 type = USB_CDC_PACKET_TYPE_BROADCAST; 536 else 537 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; 538 if (!(cdc_filter & type)) { 539 dev_kfree_skb_any(skb); 540 return NETDEV_TX_OK; 541 } 542 } 543 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 544 } 545 546 spin_lock_irqsave(&dev->req_lock, flags); 547 /* 548 * this freelist can be empty if an interrupt triggered disconnect() 549 * and reconfigured the gadget (shutting down this queue) after the 550 * network stack decided to xmit but before we got the spinlock. 551 */ 552 if (list_empty(&dev->tx_reqs)) { 553 spin_unlock_irqrestore(&dev->req_lock, flags); 554 return NETDEV_TX_BUSY; 555 } 556 557 req = container_of(dev->tx_reqs.next, struct usb_request, list); 558 list_del(&req->list); 559 560 /* temporarily stop TX queue when the freelist empties */ 561 if (list_empty(&dev->tx_reqs)) 562 netif_stop_queue(net); 563 spin_unlock_irqrestore(&dev->req_lock, flags); 564 565 /* no buffer copies needed, unless the network stack did it 566 * or the hardware can't use skb buffers. 567 * or there's not enough space for extra headers we need 568 */ 569 if (dev->wrap) { 570 unsigned long flags; 571 572 spin_lock_irqsave(&dev->lock, flags); 573 if (dev->port_usb) 574 skb = dev->wrap(dev->port_usb, skb); 575 spin_unlock_irqrestore(&dev->lock, flags); 576 if (!skb) 577 goto drop; 578 579 length = skb->len; 580 } 581 req->buf = skb->data; 582 req->context = skb; 583 req->complete = tx_complete; 584 585 /* NCM requires no zlp if transfer is dwNtbInMaxSize */ 586 if (dev->port_usb->is_fixed && 587 length == dev->port_usb->fixed_in_len && 588 (length % in->maxpacket) == 0) 589 req->zero = 0; 590 else 591 req->zero = 1; 592 593 /* use zlp framing on tx for strict CDC-Ether conformance, 594 * though any robust network rx path ignores extra padding. 595 * and some hardware doesn't like to write zlps. 596 */ 597 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) 598 length++; 599 600 req->length = length; 601 602 /* throttle high/super speed IRQ rate back slightly */ 603 if (gadget_is_dualspeed(dev->gadget)) 604 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH || 605 dev->gadget->speed == USB_SPEED_SUPER) 606 ? ((atomic_read(&dev->tx_qlen) % qmult) != 0) 607 : 0; 608 609 retval = usb_ep_queue(in, req, GFP_ATOMIC); 610 switch (retval) { 611 default: 612 DBG(dev, "tx queue err %d\n", retval); 613 break; 614 case 0: 615 net->trans_start = jiffies; 616 atomic_inc(&dev->tx_qlen); 617 } 618 619 if (retval) { 620 dev_kfree_skb_any(skb); 621drop: 622 dev->net->stats.tx_dropped++; 623 spin_lock_irqsave(&dev->req_lock, flags); 624 if (list_empty(&dev->tx_reqs)) 625 netif_start_queue(net); 626 list_add(&req->list, &dev->tx_reqs); 627 spin_unlock_irqrestore(&dev->req_lock, flags); 628 } 629 return NETDEV_TX_OK; 630} 631 632/*-------------------------------------------------------------------------*/ 633 634static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) 635{ 636 DBG(dev, "%s\n", __func__); 637 638 /* fill the rx queue */ 639 rx_fill(dev, gfp_flags); 640 641 /* and open the tx floodgates */ 642 atomic_set(&dev->tx_qlen, 0); 643 netif_wake_queue(dev->net); 644} 645 646static int eth_open(struct net_device *net) 647{ 648 struct eth_dev *dev = netdev_priv(net); 649 struct gether *link; 650 651 DBG(dev, "%s\n", __func__); 652 if (netif_carrier_ok(dev->net)) 653 eth_start(dev, GFP_KERNEL); 654 655 spin_lock_irq(&dev->lock); 656 link = dev->port_usb; 657 if (link && link->open) 658 link->open(link); 659 spin_unlock_irq(&dev->lock); 660 661 return 0; 662} 663 664static int eth_stop(struct net_device *net) 665{ 666 struct eth_dev *dev = netdev_priv(net); 667 unsigned long flags; 668 669 VDBG(dev, "%s\n", __func__); 670 netif_stop_queue(net); 671 672 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", 673 dev->net->stats.rx_packets, dev->net->stats.tx_packets, 674 dev->net->stats.rx_errors, dev->net->stats.tx_errors 675 ); 676 677 /* ensure there are no more active requests */ 678 spin_lock_irqsave(&dev->lock, flags); 679 if (dev->port_usb) { 680 struct gether *link = dev->port_usb; 681 682 if (link->close) 683 link->close(link); 684 685 /* NOTE: we have no abort-queue primitive we could use 686 * to cancel all pending I/O. Instead, we disable then 687 * reenable the endpoints ... this idiom may leave toggle 688 * wrong, but that's a self-correcting error. 689 * 690 * REVISIT: we *COULD* just let the transfers complete at 691 * their own pace; the network stack can handle old packets. 692 * For the moment we leave this here, since it works. 693 */ 694 usb_ep_disable(link->in_ep); 695 usb_ep_disable(link->out_ep); 696 if (netif_carrier_ok(net)) { 697 DBG(dev, "host still using in/out endpoints\n"); 698 usb_ep_enable(link->in_ep); 699 usb_ep_enable(link->out_ep); 700 } 701 } 702 spin_unlock_irqrestore(&dev->lock, flags); 703 704 return 0; 705} 706 707/*-------------------------------------------------------------------------*/ 708 709/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */ 710static char *dev_addr; 711module_param(dev_addr, charp, S_IRUGO); 712MODULE_PARM_DESC(dev_addr, "Device Ethernet Address"); 713 714/* this address is invisible to ifconfig */ 715static char *host_addr; 716module_param(host_addr, charp, S_IRUGO); 717MODULE_PARM_DESC(host_addr, "Host Ethernet Address"); 718 719static int get_ether_addr(const char *str, u8 *dev_addr) 720{ 721 if (str) { 722 unsigned i; 723 724 for (i = 0; i < 6; i++) { 725 unsigned char num; 726 727 if ((*str == '.') || (*str == ':')) 728 str++; 729 num = hex_to_bin(*str++) << 4; 730 num |= hex_to_bin(*str++); 731 dev_addr [i] = num; 732 } 733 if (is_valid_ether_addr(dev_addr)) 734 return 0; 735 } 736 random_ether_addr(dev_addr); 737 return 1; 738} 739 740static struct eth_dev *the_dev; 741 742static const struct net_device_ops eth_netdev_ops = { 743 .ndo_open = eth_open, 744 .ndo_stop = eth_stop, 745 .ndo_start_xmit = eth_start_xmit, 746 .ndo_change_mtu = ueth_change_mtu, 747 .ndo_set_mac_address = eth_mac_addr, 748 .ndo_validate_addr = eth_validate_addr, 749}; 750 751static struct device_type gadget_type = { 752 .name = "gadget", 753}; 754 755/** 756 * gether_setup - initialize one ethernet-over-usb link 757 * @g: gadget to associated with these links 758 * @ethaddr: NULL, or a buffer in which the ethernet address of the 759 * host side of the link is recorded 760 * Context: may sleep 761 * 762 * This sets up the single network link that may be exported by a 763 * gadget driver using this framework. The link layer addresses are 764 * set up using module parameters. 765 * 766 * Returns negative errno, or zero on success 767 */ 768int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN]) 769{ 770 struct eth_dev *dev; 771 struct net_device *net; 772 int status; 773 774 if (the_dev) 775 return -EBUSY; 776 777 net = alloc_etherdev(sizeof *dev); 778 if (!net) 779 return -ENOMEM; 780 781 dev = netdev_priv(net); 782 spin_lock_init(&dev->lock); 783 spin_lock_init(&dev->req_lock); 784 INIT_WORK(&dev->work, eth_work); 785 INIT_LIST_HEAD(&dev->tx_reqs); 786 INIT_LIST_HEAD(&dev->rx_reqs); 787 788 skb_queue_head_init(&dev->rx_frames); 789 790 /* network device setup */ 791 dev->net = net; 792 strcpy(net->name, "usb%d"); 793 794 if (get_ether_addr(dev_addr, net->dev_addr)) 795 dev_warn(&g->dev, 796 "using random %s ethernet address\n", "self"); 797 if (get_ether_addr(host_addr, dev->host_mac)) 798 dev_warn(&g->dev, 799 "using random %s ethernet address\n", "host"); 800 801 if (ethaddr) 802 memcpy(ethaddr, dev->host_mac, ETH_ALEN); 803 804 net->netdev_ops = &eth_netdev_ops; 805 806 SET_ETHTOOL_OPS(net, &ops); 807 808 /* two kinds of host-initiated state changes: 809 * - iff DATA transfer is active, carrier is "on" 810 * - tx queueing enabled if open *and* carrier is "on" 811 */ 812 netif_carrier_off(net); 813 814 dev->gadget = g; 815 SET_NETDEV_DEV(net, &g->dev); 816 SET_NETDEV_DEVTYPE(net, &gadget_type); 817 818 status = register_netdev(net); 819 if (status < 0) { 820 dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 821 free_netdev(net); 822 } else { 823 INFO(dev, "MAC %pM\n", net->dev_addr); 824 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 825 826 the_dev = dev; 827 } 828 829 return status; 830} 831 832/** 833 * gether_cleanup - remove Ethernet-over-USB device 834 * Context: may sleep 835 * 836 * This is called to free all resources allocated by @gether_setup(). 837 */ 838void gether_cleanup(void) 839{ 840 if (!the_dev) 841 return; 842 843 unregister_netdev(the_dev->net); 844 flush_work_sync(&the_dev->work); 845 free_netdev(the_dev->net); 846 847 the_dev = NULL; 848} 849 850 851/** 852 * gether_connect - notify network layer that USB link is active 853 * @link: the USB link, set up with endpoints, descriptors matching 854 * current device speed, and any framing wrapper(s) set up. 855 * Context: irqs blocked 856 * 857 * This is called to activate endpoints and let the network layer know 858 * the connection is active ("carrier detect"). It may cause the I/O 859 * queues to open and start letting network packets flow, but will in 860 * any case activate the endpoints so that they respond properly to the 861 * USB host. 862 * 863 * Verify net_device pointer returned using IS_ERR(). If it doesn't 864 * indicate some error code (negative errno), ep->driver_data values 865 * have been overwritten. 866 */ 867struct net_device *gether_connect(struct gether *link) 868{ 869 struct eth_dev *dev = the_dev; 870 int result = 0; 871 872 if (!dev) 873 return ERR_PTR(-EINVAL); 874 875 link->in_ep->driver_data = dev; 876 result = usb_ep_enable(link->in_ep); 877 if (result != 0) { 878 DBG(dev, "enable %s --> %d\n", 879 link->in_ep->name, result); 880 goto fail0; 881 } 882 883 link->out_ep->driver_data = dev; 884 result = usb_ep_enable(link->out_ep); 885 if (result != 0) { 886 DBG(dev, "enable %s --> %d\n", 887 link->out_ep->name, result); 888 goto fail1; 889 } 890 891 if (result == 0) 892 result = alloc_requests(dev, link, qlen(dev->gadget)); 893 894 if (result == 0) { 895 dev->zlp = link->is_zlp_ok; 896 DBG(dev, "qlen %d\n", qlen(dev->gadget)); 897 898 dev->header_len = link->header_len; 899 dev->unwrap = link->unwrap; 900 dev->wrap = link->wrap; 901 902 spin_lock(&dev->lock); 903 dev->port_usb = link; 904 link->ioport = dev; 905 if (netif_running(dev->net)) { 906 if (link->open) 907 link->open(link); 908 } else { 909 if (link->close) 910 link->close(link); 911 } 912 spin_unlock(&dev->lock); 913 914 netif_carrier_on(dev->net); 915 if (netif_running(dev->net)) 916 eth_start(dev, GFP_ATOMIC); 917 918 /* on error, disable any endpoints */ 919 } else { 920 (void) usb_ep_disable(link->out_ep); 921fail1: 922 (void) usb_ep_disable(link->in_ep); 923 } 924fail0: 925 /* caller is responsible for cleanup on error */ 926 if (result < 0) 927 return ERR_PTR(result); 928 return dev->net; 929} 930 931/** 932 * gether_disconnect - notify network layer that USB link is inactive 933 * @link: the USB link, on which gether_connect() was called 934 * Context: irqs blocked 935 * 936 * This is called to deactivate endpoints and let the network layer know 937 * the connection went inactive ("no carrier"). 938 * 939 * On return, the state is as if gether_connect() had never been called. 940 * The endpoints are inactive, and accordingly without active USB I/O. 941 * Pointers to endpoint descriptors and endpoint private data are nulled. 942 */ 943void gether_disconnect(struct gether *link) 944{ 945 struct eth_dev *dev = link->ioport; 946 struct usb_request *req; 947 948 WARN_ON(!dev); 949 if (!dev) 950 return; 951 952 DBG(dev, "%s\n", __func__); 953 954 netif_stop_queue(dev->net); 955 netif_carrier_off(dev->net); 956 957 /* disable endpoints, forcing (synchronous) completion 958 * of all pending i/o. then free the request objects 959 * and forget about the endpoints. 960 */ 961 usb_ep_disable(link->in_ep); 962 spin_lock(&dev->req_lock); 963 while (!list_empty(&dev->tx_reqs)) { 964 req = container_of(dev->tx_reqs.next, 965 struct usb_request, list); 966 list_del(&req->list); 967 968 spin_unlock(&dev->req_lock); 969 usb_ep_free_request(link->in_ep, req); 970 spin_lock(&dev->req_lock); 971 } 972 spin_unlock(&dev->req_lock); 973 link->in_ep->driver_data = NULL; 974 link->in_ep->desc = NULL; 975 976 usb_ep_disable(link->out_ep); 977 spin_lock(&dev->req_lock); 978 while (!list_empty(&dev->rx_reqs)) { 979 req = container_of(dev->rx_reqs.next, 980 struct usb_request, list); 981 list_del(&req->list); 982 983 spin_unlock(&dev->req_lock); 984 usb_ep_free_request(link->out_ep, req); 985 spin_lock(&dev->req_lock); 986 } 987 spin_unlock(&dev->req_lock); 988 link->out_ep->driver_data = NULL; 989 link->out_ep->desc = NULL; 990 991 /* finish forgetting about this USB link episode */ 992 dev->header_len = 0; 993 dev->unwrap = NULL; 994 dev->wrap = NULL; 995 996 spin_lock(&dev->lock); 997 dev->port_usb = NULL; 998 link->ioport = NULL; 999 spin_unlock(&dev->lock); 1000}