Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.31-rc9 1297 lines 29 kB view raw
1/* sunvnet.c: Sun LDOM Virtual Network Driver. 2 * 3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 4 */ 5 6#include <linux/module.h> 7#include <linux/kernel.h> 8#include <linux/types.h> 9#include <linux/slab.h> 10#include <linux/delay.h> 11#include <linux/init.h> 12#include <linux/netdevice.h> 13#include <linux/ethtool.h> 14#include <linux/etherdevice.h> 15#include <linux/mutex.h> 16 17#include <asm/vio.h> 18#include <asm/ldc.h> 19 20#include "sunvnet.h" 21 22#define DRV_MODULE_NAME "sunvnet" 23#define PFX DRV_MODULE_NAME ": " 24#define DRV_MODULE_VERSION "1.0" 25#define DRV_MODULE_RELDATE "June 25, 2007" 26 27static char version[] __devinitdata = 28 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 29MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 30MODULE_DESCRIPTION("Sun LDOM virtual network driver"); 31MODULE_LICENSE("GPL"); 32MODULE_VERSION(DRV_MODULE_VERSION); 33 34/* Ordered from largest major to lowest */ 35static struct vio_version vnet_versions[] = { 36 { .major = 1, .minor = 0 }, 37}; 38 39static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) 40{ 41 return vio_dring_avail(dr, VNET_TX_RING_SIZE); 42} 43 44static int vnet_handle_unknown(struct vnet_port *port, void *arg) 45{ 46 struct vio_msg_tag *pkt = arg; 47 48 printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n", 49 pkt->type, pkt->stype, pkt->stype_env, pkt->sid); 50 printk(KERN_ERR PFX "Resetting connection.\n"); 51 52 ldc_disconnect(port->vio.lp); 53 54 return -ECONNRESET; 55} 56 57static int vnet_send_attr(struct vio_driver_state *vio) 58{ 59 struct vnet_port *port = to_vnet_port(vio); 60 struct net_device *dev = port->vp->dev; 61 struct vio_net_attr_info pkt; 62 int i; 63 64 memset(&pkt, 0, sizeof(pkt)); 65 pkt.tag.type = VIO_TYPE_CTRL; 66 pkt.tag.stype = VIO_SUBTYPE_INFO; 67 pkt.tag.stype_env = VIO_ATTR_INFO; 68 pkt.tag.sid = vio_send_sid(vio); 69 pkt.xfer_mode = VIO_DRING_MODE; 70 pkt.addr_type = VNET_ADDR_ETHERMAC; 71 pkt.ack_freq = 0; 72 for (i = 0; i < 6; i++) 73 pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); 74 pkt.mtu = ETH_FRAME_LEN; 75 76 viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 77 "ackfreq[%u] mtu[%llu]\n", 78 pkt.xfer_mode, pkt.addr_type, 79 (unsigned long long) pkt.addr, 80 pkt.ack_freq, 81 (unsigned long long) pkt.mtu); 82 83 return vio_ldc_send(vio, &pkt, sizeof(pkt)); 84} 85 86static int handle_attr_info(struct vio_driver_state *vio, 87 struct vio_net_attr_info *pkt) 88{ 89 viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] " 90 "ackfreq[%u] mtu[%llu]\n", 91 pkt->xfer_mode, pkt->addr_type, 92 (unsigned long long) pkt->addr, 93 pkt->ack_freq, 94 (unsigned long long) pkt->mtu); 95 96 pkt->tag.sid = vio_send_sid(vio); 97 98 if (pkt->xfer_mode != VIO_DRING_MODE || 99 pkt->addr_type != VNET_ADDR_ETHERMAC || 100 pkt->mtu != ETH_FRAME_LEN) { 101 viodbg(HS, "SEND NET ATTR NACK\n"); 102 103 pkt->tag.stype = VIO_SUBTYPE_NACK; 104 105 (void) vio_ldc_send(vio, pkt, sizeof(*pkt)); 106 107 return -ECONNRESET; 108 } else { 109 viodbg(HS, "SEND NET ATTR ACK\n"); 110 111 pkt->tag.stype = VIO_SUBTYPE_ACK; 112 113 return vio_ldc_send(vio, pkt, sizeof(*pkt)); 114 } 115 116} 117 118static int handle_attr_ack(struct vio_driver_state *vio, 119 struct vio_net_attr_info *pkt) 120{ 121 viodbg(HS, "GOT NET ATTR ACK\n"); 122 123 return 0; 124} 125 126static int handle_attr_nack(struct vio_driver_state *vio, 127 struct vio_net_attr_info *pkt) 128{ 129 viodbg(HS, "GOT NET ATTR NACK\n"); 130 131 return -ECONNRESET; 132} 133 134static int vnet_handle_attr(struct vio_driver_state *vio, void *arg) 135{ 136 struct vio_net_attr_info *pkt = arg; 137 138 switch (pkt->tag.stype) { 139 case VIO_SUBTYPE_INFO: 140 return handle_attr_info(vio, pkt); 141 142 case VIO_SUBTYPE_ACK: 143 return handle_attr_ack(vio, pkt); 144 145 case VIO_SUBTYPE_NACK: 146 return handle_attr_nack(vio, pkt); 147 148 default: 149 return -ECONNRESET; 150 } 151} 152 153static void vnet_handshake_complete(struct vio_driver_state *vio) 154{ 155 struct vio_dring_state *dr; 156 157 dr = &vio->drings[VIO_DRIVER_RX_RING]; 158 dr->snd_nxt = dr->rcv_nxt = 1; 159 160 dr = &vio->drings[VIO_DRIVER_TX_RING]; 161 dr->snd_nxt = dr->rcv_nxt = 1; 162} 163 164/* The hypervisor interface that implements copying to/from imported 165 * memory from another domain requires that copies are done to 8-byte 166 * aligned buffers, and that the lengths of such copies are also 8-byte 167 * multiples. 168 * 169 * So we align skb->data to an 8-byte multiple and pad-out the data 170 * area so we can round the copy length up to the next multiple of 171 * 8 for the copy. 172 * 173 * The transmitter puts the actual start of the packet 6 bytes into 174 * the buffer it sends over, so that the IP headers after the ethernet 175 * header are aligned properly. These 6 bytes are not in the descriptor 176 * length, they are simply implied. This offset is represented using 177 * the VNET_PACKET_SKIP macro. 178 */ 179static struct sk_buff *alloc_and_align_skb(struct net_device *dev, 180 unsigned int len) 181{ 182 struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8); 183 unsigned long addr, off; 184 185 if (unlikely(!skb)) 186 return NULL; 187 188 addr = (unsigned long) skb->data; 189 off = ((addr + 7UL) & ~7UL) - addr; 190 if (off) 191 skb_reserve(skb, off); 192 193 return skb; 194} 195 196static int vnet_rx_one(struct vnet_port *port, unsigned int len, 197 struct ldc_trans_cookie *cookies, int ncookies) 198{ 199 struct net_device *dev = port->vp->dev; 200 unsigned int copy_len; 201 struct sk_buff *skb; 202 int err; 203 204 err = -EMSGSIZE; 205 if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) { 206 dev->stats.rx_length_errors++; 207 goto out_dropped; 208 } 209 210 skb = alloc_and_align_skb(dev, len); 211 err = -ENOMEM; 212 if (unlikely(!skb)) { 213 dev->stats.rx_missed_errors++; 214 goto out_dropped; 215 } 216 217 copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; 218 skb_put(skb, copy_len); 219 err = ldc_copy(port->vio.lp, LDC_COPY_IN, 220 skb->data, copy_len, 0, 221 cookies, ncookies); 222 if (unlikely(err < 0)) { 223 dev->stats.rx_frame_errors++; 224 goto out_free_skb; 225 } 226 227 skb_pull(skb, VNET_PACKET_SKIP); 228 skb_trim(skb, len); 229 skb->protocol = eth_type_trans(skb, dev); 230 231 dev->stats.rx_packets++; 232 dev->stats.rx_bytes += len; 233 234 netif_rx(skb); 235 236 return 0; 237 238out_free_skb: 239 kfree_skb(skb); 240 241out_dropped: 242 dev->stats.rx_dropped++; 243 return err; 244} 245 246static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, 247 u32 start, u32 end, u8 vio_dring_state) 248{ 249 struct vio_dring_data hdr = { 250 .tag = { 251 .type = VIO_TYPE_DATA, 252 .stype = VIO_SUBTYPE_ACK, 253 .stype_env = VIO_DRING_DATA, 254 .sid = vio_send_sid(&port->vio), 255 }, 256 .dring_ident = dr->ident, 257 .start_idx = start, 258 .end_idx = end, 259 .state = vio_dring_state, 260 }; 261 int err, delay; 262 263 hdr.seq = dr->snd_nxt; 264 delay = 1; 265 do { 266 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 267 if (err > 0) { 268 dr->snd_nxt++; 269 break; 270 } 271 udelay(delay); 272 if ((delay <<= 1) > 128) 273 delay = 128; 274 } while (err == -EAGAIN); 275 276 return err; 277} 278 279static u32 next_idx(u32 idx, struct vio_dring_state *dr) 280{ 281 if (++idx == dr->num_entries) 282 idx = 0; 283 return idx; 284} 285 286static u32 prev_idx(u32 idx, struct vio_dring_state *dr) 287{ 288 if (idx == 0) 289 idx = dr->num_entries - 1; 290 else 291 idx--; 292 293 return idx; 294} 295 296static struct vio_net_desc *get_rx_desc(struct vnet_port *port, 297 struct vio_dring_state *dr, 298 u32 index) 299{ 300 struct vio_net_desc *desc = port->vio.desc_buf; 301 int err; 302 303 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, 304 (index * dr->entry_size), 305 dr->cookies, dr->ncookies); 306 if (err < 0) 307 return ERR_PTR(err); 308 309 return desc; 310} 311 312static int put_rx_desc(struct vnet_port *port, 313 struct vio_dring_state *dr, 314 struct vio_net_desc *desc, 315 u32 index) 316{ 317 int err; 318 319 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, 320 (index * dr->entry_size), 321 dr->cookies, dr->ncookies); 322 if (err < 0) 323 return err; 324 325 return 0; 326} 327 328static int vnet_walk_rx_one(struct vnet_port *port, 329 struct vio_dring_state *dr, 330 u32 index, int *needs_ack) 331{ 332 struct vio_net_desc *desc = get_rx_desc(port, dr, index); 333 struct vio_driver_state *vio = &port->vio; 334 int err; 335 336 if (IS_ERR(desc)) 337 return PTR_ERR(desc); 338 339 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", 340 desc->hdr.state, desc->hdr.ack, 341 desc->size, desc->ncookies, 342 desc->cookies[0].cookie_addr, 343 desc->cookies[0].cookie_size); 344 345 if (desc->hdr.state != VIO_DESC_READY) 346 return 1; 347 err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); 348 if (err == -ECONNRESET) 349 return err; 350 desc->hdr.state = VIO_DESC_DONE; 351 err = put_rx_desc(port, dr, desc, index); 352 if (err < 0) 353 return err; 354 *needs_ack = desc->hdr.ack; 355 return 0; 356} 357 358static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, 359 u32 start, u32 end) 360{ 361 struct vio_driver_state *vio = &port->vio; 362 int ack_start = -1, ack_end = -1; 363 364 end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr); 365 366 viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); 367 368 while (start != end) { 369 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); 370 if (err == -ECONNRESET) 371 return err; 372 if (err != 0) 373 break; 374 if (ack_start == -1) 375 ack_start = start; 376 ack_end = start; 377 start = next_idx(start, dr); 378 if (ack && start != end) { 379 err = vnet_send_ack(port, dr, ack_start, ack_end, 380 VIO_DRING_ACTIVE); 381 if (err == -ECONNRESET) 382 return err; 383 ack_start = -1; 384 } 385 } 386 if (unlikely(ack_start == -1)) 387 ack_start = ack_end = prev_idx(start, dr); 388 return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED); 389} 390 391static int vnet_rx(struct vnet_port *port, void *msgbuf) 392{ 393 struct vio_dring_data *pkt = msgbuf; 394 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; 395 struct vio_driver_state *vio = &port->vio; 396 397 viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", 398 pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); 399 400 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 401 return 0; 402 if (unlikely(pkt->seq != dr->rcv_nxt)) { 403 printk(KERN_ERR PFX "RX out of sequence seq[0x%llx] " 404 "rcv_nxt[0x%llx]\n", pkt->seq, dr->rcv_nxt); 405 return 0; 406 } 407 408 dr->rcv_nxt++; 409 410 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ 411 412 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx); 413} 414 415static int idx_is_pending(struct vio_dring_state *dr, u32 end) 416{ 417 u32 idx = dr->cons; 418 int found = 0; 419 420 while (idx != dr->prod) { 421 if (idx == end) { 422 found = 1; 423 break; 424 } 425 idx = next_idx(idx, dr); 426 } 427 return found; 428} 429 430static int vnet_ack(struct vnet_port *port, void *msgbuf) 431{ 432 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 433 struct vio_dring_data *pkt = msgbuf; 434 struct net_device *dev; 435 struct vnet *vp; 436 u32 end; 437 438 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 439 return 0; 440 441 end = pkt->end_idx; 442 if (unlikely(!idx_is_pending(dr, end))) 443 return 0; 444 445 dr->cons = next_idx(end, dr); 446 447 vp = port->vp; 448 dev = vp->dev; 449 if (unlikely(netif_queue_stopped(dev) && 450 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) 451 return 1; 452 453 return 0; 454} 455 456static int vnet_nack(struct vnet_port *port, void *msgbuf) 457{ 458 /* XXX just reset or similar XXX */ 459 return 0; 460} 461 462static int handle_mcast(struct vnet_port *port, void *msgbuf) 463{ 464 struct vio_net_mcast_info *pkt = msgbuf; 465 466 if (pkt->tag.stype != VIO_SUBTYPE_ACK) 467 printk(KERN_ERR PFX "%s: Got unexpected MCAST reply " 468 "[%02x:%02x:%04x:%08x]\n", 469 port->vp->dev->name, 470 pkt->tag.type, 471 pkt->tag.stype, 472 pkt->tag.stype_env, 473 pkt->tag.sid); 474 475 return 0; 476} 477 478static void maybe_tx_wakeup(struct vnet *vp) 479{ 480 struct net_device *dev = vp->dev; 481 482 netif_tx_lock(dev); 483 if (likely(netif_queue_stopped(dev))) { 484 struct vnet_port *port; 485 int wake = 1; 486 487 list_for_each_entry(port, &vp->port_list, list) { 488 struct vio_dring_state *dr; 489 490 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 491 if (vnet_tx_dring_avail(dr) < 492 VNET_TX_WAKEUP_THRESH(dr)) { 493 wake = 0; 494 break; 495 } 496 } 497 if (wake) 498 netif_wake_queue(dev); 499 } 500 netif_tx_unlock(dev); 501} 502 503static void vnet_event(void *arg, int event) 504{ 505 struct vnet_port *port = arg; 506 struct vio_driver_state *vio = &port->vio; 507 unsigned long flags; 508 int tx_wakeup, err; 509 510 spin_lock_irqsave(&vio->lock, flags); 511 512 if (unlikely(event == LDC_EVENT_RESET || 513 event == LDC_EVENT_UP)) { 514 vio_link_state_change(vio, event); 515 spin_unlock_irqrestore(&vio->lock, flags); 516 517 if (event == LDC_EVENT_RESET) 518 vio_port_up(vio); 519 return; 520 } 521 522 if (unlikely(event != LDC_EVENT_DATA_READY)) { 523 printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); 524 spin_unlock_irqrestore(&vio->lock, flags); 525 return; 526 } 527 528 tx_wakeup = err = 0; 529 while (1) { 530 union { 531 struct vio_msg_tag tag; 532 u64 raw[8]; 533 } msgbuf; 534 535 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); 536 if (unlikely(err < 0)) { 537 if (err == -ECONNRESET) 538 vio_conn_reset(vio); 539 break; 540 } 541 if (err == 0) 542 break; 543 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", 544 msgbuf.tag.type, 545 msgbuf.tag.stype, 546 msgbuf.tag.stype_env, 547 msgbuf.tag.sid); 548 err = vio_validate_sid(vio, &msgbuf.tag); 549 if (err < 0) 550 break; 551 552 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { 553 if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { 554 err = vnet_rx(port, &msgbuf); 555 } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { 556 err = vnet_ack(port, &msgbuf); 557 if (err > 0) 558 tx_wakeup |= err; 559 } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { 560 err = vnet_nack(port, &msgbuf); 561 } 562 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { 563 if (msgbuf.tag.stype_env == VNET_MCAST_INFO) 564 err = handle_mcast(port, &msgbuf); 565 else 566 err = vio_control_pkt_engine(vio, &msgbuf); 567 if (err) 568 break; 569 } else { 570 err = vnet_handle_unknown(port, &msgbuf); 571 } 572 if (err == -ECONNRESET) 573 break; 574 } 575 spin_unlock(&vio->lock); 576 if (unlikely(tx_wakeup && err != -ECONNRESET)) 577 maybe_tx_wakeup(port->vp); 578 local_irq_restore(flags); 579} 580 581static int __vnet_tx_trigger(struct vnet_port *port) 582{ 583 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 584 struct vio_dring_data hdr = { 585 .tag = { 586 .type = VIO_TYPE_DATA, 587 .stype = VIO_SUBTYPE_INFO, 588 .stype_env = VIO_DRING_DATA, 589 .sid = vio_send_sid(&port->vio), 590 }, 591 .dring_ident = dr->ident, 592 .start_idx = dr->prod, 593 .end_idx = (u32) -1, 594 }; 595 int err, delay; 596 597 hdr.seq = dr->snd_nxt; 598 delay = 1; 599 do { 600 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 601 if (err > 0) { 602 dr->snd_nxt++; 603 break; 604 } 605 udelay(delay); 606 if ((delay <<= 1) > 128) 607 delay = 128; 608 } while (err == -EAGAIN); 609 610 return err; 611} 612 613struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) 614{ 615 unsigned int hash = vnet_hashfn(skb->data); 616 struct hlist_head *hp = &vp->port_hash[hash]; 617 struct hlist_node *n; 618 struct vnet_port *port; 619 620 hlist_for_each_entry(port, n, hp, hash) { 621 if (!compare_ether_addr(port->raddr, skb->data)) 622 return port; 623 } 624 port = NULL; 625 if (!list_empty(&vp->port_list)) 626 port = list_entry(vp->port_list.next, struct vnet_port, list); 627 628 return port; 629} 630 631struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb) 632{ 633 struct vnet_port *ret; 634 unsigned long flags; 635 636 spin_lock_irqsave(&vp->lock, flags); 637 ret = __tx_port_find(vp, skb); 638 spin_unlock_irqrestore(&vp->lock, flags); 639 640 return ret; 641} 642 643static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) 644{ 645 struct vnet *vp = netdev_priv(dev); 646 struct vnet_port *port = tx_port_find(vp, skb); 647 struct vio_dring_state *dr; 648 struct vio_net_desc *d; 649 unsigned long flags; 650 unsigned int len; 651 void *tx_buf; 652 int i, err; 653 654 if (unlikely(!port)) 655 goto out_dropped; 656 657 spin_lock_irqsave(&port->vio.lock, flags); 658 659 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 660 if (unlikely(vnet_tx_dring_avail(dr) < 2)) { 661 if (!netif_queue_stopped(dev)) { 662 netif_stop_queue(dev); 663 664 /* This is a hard error, log it. */ 665 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 666 "queue awake!\n", dev->name); 667 dev->stats.tx_errors++; 668 } 669 spin_unlock_irqrestore(&port->vio.lock, flags); 670 return NETDEV_TX_BUSY; 671 } 672 673 d = vio_dring_cur(dr); 674 675 tx_buf = port->tx_bufs[dr->prod].buf; 676 skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len); 677 678 len = skb->len; 679 if (len < ETH_ZLEN) { 680 len = ETH_ZLEN; 681 memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len); 682 } 683 684 d->hdr.ack = VIO_ACK_ENABLE; 685 d->size = len; 686 d->ncookies = port->tx_bufs[dr->prod].ncookies; 687 for (i = 0; i < d->ncookies; i++) 688 d->cookies[i] = port->tx_bufs[dr->prod].cookies[i]; 689 690 /* This has to be a non-SMP write barrier because we are writing 691 * to memory which is shared with the peer LDOM. 692 */ 693 wmb(); 694 695 d->hdr.state = VIO_DESC_READY; 696 697 err = __vnet_tx_trigger(port); 698 if (unlikely(err < 0)) { 699 printk(KERN_INFO PFX "%s: TX trigger error %d\n", 700 dev->name, err); 701 d->hdr.state = VIO_DESC_FREE; 702 dev->stats.tx_carrier_errors++; 703 goto out_dropped_unlock; 704 } 705 706 dev->stats.tx_packets++; 707 dev->stats.tx_bytes += skb->len; 708 709 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); 710 if (unlikely(vnet_tx_dring_avail(dr) < 2)) { 711 netif_stop_queue(dev); 712 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) 713 netif_wake_queue(dev); 714 } 715 716 spin_unlock_irqrestore(&port->vio.lock, flags); 717 718 dev_kfree_skb(skb); 719 720 dev->trans_start = jiffies; 721 return NETDEV_TX_OK; 722 723out_dropped_unlock: 724 spin_unlock_irqrestore(&port->vio.lock, flags); 725 726out_dropped: 727 dev_kfree_skb(skb); 728 dev->stats.tx_dropped++; 729 return NETDEV_TX_OK; 730} 731 732static void vnet_tx_timeout(struct net_device *dev) 733{ 734 /* XXX Implement me XXX */ 735} 736 737static int vnet_open(struct net_device *dev) 738{ 739 netif_carrier_on(dev); 740 netif_start_queue(dev); 741 742 return 0; 743} 744 745static int vnet_close(struct net_device *dev) 746{ 747 netif_stop_queue(dev); 748 netif_carrier_off(dev); 749 750 return 0; 751} 752 753static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) 754{ 755 struct vnet_mcast_entry *m; 756 757 for (m = vp->mcast_list; m; m = m->next) { 758 if (!memcmp(m->addr, addr, ETH_ALEN)) 759 return m; 760 } 761 return NULL; 762} 763 764static void __update_mc_list(struct vnet *vp, struct net_device *dev) 765{ 766 struct dev_addr_list *p; 767 768 for (p = dev->mc_list; p; p = p->next) { 769 struct vnet_mcast_entry *m; 770 771 m = __vnet_mc_find(vp, p->dmi_addr); 772 if (m) { 773 m->hit = 1; 774 continue; 775 } 776 777 if (!m) { 778 m = kzalloc(sizeof(*m), GFP_ATOMIC); 779 if (!m) 780 continue; 781 memcpy(m->addr, p->dmi_addr, ETH_ALEN); 782 m->hit = 1; 783 784 m->next = vp->mcast_list; 785 vp->mcast_list = m; 786 } 787 } 788} 789 790static void __send_mc_list(struct vnet *vp, struct vnet_port *port) 791{ 792 struct vio_net_mcast_info info; 793 struct vnet_mcast_entry *m, **pp; 794 int n_addrs; 795 796 memset(&info, 0, sizeof(info)); 797 798 info.tag.type = VIO_TYPE_CTRL; 799 info.tag.stype = VIO_SUBTYPE_INFO; 800 info.tag.stype_env = VNET_MCAST_INFO; 801 info.tag.sid = vio_send_sid(&port->vio); 802 info.set = 1; 803 804 n_addrs = 0; 805 for (m = vp->mcast_list; m; m = m->next) { 806 if (m->sent) 807 continue; 808 m->sent = 1; 809 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 810 m->addr, ETH_ALEN); 811 if (++n_addrs == VNET_NUM_MCAST) { 812 info.count = n_addrs; 813 814 (void) vio_ldc_send(&port->vio, &info, 815 sizeof(info)); 816 n_addrs = 0; 817 } 818 } 819 if (n_addrs) { 820 info.count = n_addrs; 821 (void) vio_ldc_send(&port->vio, &info, sizeof(info)); 822 } 823 824 info.set = 0; 825 826 n_addrs = 0; 827 pp = &vp->mcast_list; 828 while ((m = *pp) != NULL) { 829 if (m->hit) { 830 m->hit = 0; 831 pp = &m->next; 832 continue; 833 } 834 835 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 836 m->addr, ETH_ALEN); 837 if (++n_addrs == VNET_NUM_MCAST) { 838 info.count = n_addrs; 839 (void) vio_ldc_send(&port->vio, &info, 840 sizeof(info)); 841 n_addrs = 0; 842 } 843 844 *pp = m->next; 845 kfree(m); 846 } 847 if (n_addrs) { 848 info.count = n_addrs; 849 (void) vio_ldc_send(&port->vio, &info, sizeof(info)); 850 } 851} 852 853static void vnet_set_rx_mode(struct net_device *dev) 854{ 855 struct vnet *vp = netdev_priv(dev); 856 struct vnet_port *port; 857 unsigned long flags; 858 859 spin_lock_irqsave(&vp->lock, flags); 860 if (!list_empty(&vp->port_list)) { 861 port = list_entry(vp->port_list.next, struct vnet_port, list); 862 863 if (port->switch_port) { 864 __update_mc_list(vp, dev); 865 __send_mc_list(vp, port); 866 } 867 } 868 spin_unlock_irqrestore(&vp->lock, flags); 869} 870 871static int vnet_change_mtu(struct net_device *dev, int new_mtu) 872{ 873 if (new_mtu != ETH_DATA_LEN) 874 return -EINVAL; 875 876 dev->mtu = new_mtu; 877 return 0; 878} 879 880static int vnet_set_mac_addr(struct net_device *dev, void *p) 881{ 882 return -EINVAL; 883} 884 885static void vnet_get_drvinfo(struct net_device *dev, 886 struct ethtool_drvinfo *info) 887{ 888 strcpy(info->driver, DRV_MODULE_NAME); 889 strcpy(info->version, DRV_MODULE_VERSION); 890} 891 892static u32 vnet_get_msglevel(struct net_device *dev) 893{ 894 struct vnet *vp = netdev_priv(dev); 895 return vp->msg_enable; 896} 897 898static void vnet_set_msglevel(struct net_device *dev, u32 value) 899{ 900 struct vnet *vp = netdev_priv(dev); 901 vp->msg_enable = value; 902} 903 904static const struct ethtool_ops vnet_ethtool_ops = { 905 .get_drvinfo = vnet_get_drvinfo, 906 .get_msglevel = vnet_get_msglevel, 907 .set_msglevel = vnet_set_msglevel, 908 .get_link = ethtool_op_get_link, 909}; 910 911static void vnet_port_free_tx_bufs(struct vnet_port *port) 912{ 913 struct vio_dring_state *dr; 914 int i; 915 916 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 917 if (dr->base) { 918 ldc_free_exp_dring(port->vio.lp, dr->base, 919 (dr->entry_size * dr->num_entries), 920 dr->cookies, dr->ncookies); 921 dr->base = NULL; 922 dr->entry_size = 0; 923 dr->num_entries = 0; 924 dr->pending = 0; 925 dr->ncookies = 0; 926 } 927 928 for (i = 0; i < VNET_TX_RING_SIZE; i++) { 929 void *buf = port->tx_bufs[i].buf; 930 931 if (!buf) 932 continue; 933 934 ldc_unmap(port->vio.lp, 935 port->tx_bufs[i].cookies, 936 port->tx_bufs[i].ncookies); 937 938 kfree(buf); 939 port->tx_bufs[i].buf = NULL; 940 } 941} 942 943static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port) 944{ 945 struct vio_dring_state *dr; 946 unsigned long len; 947 int i, err, ncookies; 948 void *dring; 949 950 for (i = 0; i < VNET_TX_RING_SIZE; i++) { 951 void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL); 952 int map_len = (ETH_FRAME_LEN + 7) & ~7; 953 954 err = -ENOMEM; 955 if (!buf) { 956 printk(KERN_ERR "TX buffer allocation failure\n"); 957 goto err_out; 958 } 959 err = -EFAULT; 960 if ((unsigned long)buf & (8UL - 1)) { 961 printk(KERN_ERR "TX buffer misaligned\n"); 962 kfree(buf); 963 goto err_out; 964 } 965 966 err = ldc_map_single(port->vio.lp, buf, map_len, 967 port->tx_bufs[i].cookies, 2, 968 (LDC_MAP_SHADOW | 969 LDC_MAP_DIRECT | 970 LDC_MAP_RW)); 971 if (err < 0) { 972 kfree(buf); 973 goto err_out; 974 } 975 port->tx_bufs[i].buf = buf; 976 port->tx_bufs[i].ncookies = err; 977 } 978 979 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 980 981 len = (VNET_TX_RING_SIZE * 982 (sizeof(struct vio_net_desc) + 983 (sizeof(struct ldc_trans_cookie) * 2))); 984 985 ncookies = VIO_MAX_RING_COOKIES; 986 dring = ldc_alloc_exp_dring(port->vio.lp, len, 987 dr->cookies, &ncookies, 988 (LDC_MAP_SHADOW | 989 LDC_MAP_DIRECT | 990 LDC_MAP_RW)); 991 if (IS_ERR(dring)) { 992 err = PTR_ERR(dring); 993 goto err_out; 994 } 995 996 dr->base = dring; 997 dr->entry_size = (sizeof(struct vio_net_desc) + 998 (sizeof(struct ldc_trans_cookie) * 2)); 999 dr->num_entries = VNET_TX_RING_SIZE; 1000 dr->prod = dr->cons = 0; 1001 dr->pending = VNET_TX_RING_SIZE; 1002 dr->ncookies = ncookies; 1003 1004 return 0; 1005 1006err_out: 1007 vnet_port_free_tx_bufs(port); 1008 1009 return err; 1010} 1011 1012static LIST_HEAD(vnet_list); 1013static DEFINE_MUTEX(vnet_list_mutex); 1014 1015static const struct net_device_ops vnet_ops = { 1016 .ndo_open = vnet_open, 1017 .ndo_stop = vnet_close, 1018 .ndo_set_multicast_list = vnet_set_rx_mode, 1019 .ndo_change_mtu = eth_change_mtu, 1020 .ndo_set_mac_address = vnet_set_mac_addr, 1021 .ndo_validate_addr = eth_validate_addr, 1022 .ndo_tx_timeout = vnet_tx_timeout, 1023 .ndo_change_mtu = vnet_change_mtu, 1024 .ndo_start_xmit = vnet_start_xmit, 1025}; 1026 1027static struct vnet * __devinit vnet_new(const u64 *local_mac) 1028{ 1029 struct net_device *dev; 1030 struct vnet *vp; 1031 int err, i; 1032 1033 dev = alloc_etherdev(sizeof(*vp)); 1034 if (!dev) { 1035 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 1036 return ERR_PTR(-ENOMEM); 1037 } 1038 1039 for (i = 0; i < ETH_ALEN; i++) 1040 dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; 1041 1042 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 1043 1044 vp = netdev_priv(dev); 1045 1046 spin_lock_init(&vp->lock); 1047 vp->dev = dev; 1048 1049 INIT_LIST_HEAD(&vp->port_list); 1050 for (i = 0; i < VNET_PORT_HASH_SIZE; i++) 1051 INIT_HLIST_HEAD(&vp->port_hash[i]); 1052 INIT_LIST_HEAD(&vp->list); 1053 vp->local_mac = *local_mac; 1054 1055 dev->netdev_ops = &vnet_ops; 1056 dev->ethtool_ops = &vnet_ethtool_ops; 1057 dev->watchdog_timeo = VNET_TX_TIMEOUT; 1058 1059 err = register_netdev(dev); 1060 if (err) { 1061 printk(KERN_ERR PFX "Cannot register net device, " 1062 "aborting.\n"); 1063 goto err_out_free_dev; 1064 } 1065 1066 printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name); 1067 1068 for (i = 0; i < 6; i++) 1069 printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); 1070 1071 list_add(&vp->list, &vnet_list); 1072 1073 return vp; 1074 1075err_out_free_dev: 1076 free_netdev(dev); 1077 1078 return ERR_PTR(err); 1079} 1080 1081static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac) 1082{ 1083 struct vnet *iter, *vp; 1084 1085 mutex_lock(&vnet_list_mutex); 1086 vp = NULL; 1087 list_for_each_entry(iter, &vnet_list, list) { 1088 if (iter->local_mac == *local_mac) { 1089 vp = iter; 1090 break; 1091 } 1092 } 1093 if (!vp) 1094 vp = vnet_new(local_mac); 1095 mutex_unlock(&vnet_list_mutex); 1096 1097 return vp; 1098} 1099 1100static const char *local_mac_prop = "local-mac-address"; 1101 1102static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp, 1103 u64 port_node) 1104{ 1105 const u64 *local_mac = NULL; 1106 u64 a; 1107 1108 mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { 1109 u64 target = mdesc_arc_target(hp, a); 1110 const char *name; 1111 1112 name = mdesc_get_property(hp, target, "name", NULL); 1113 if (!name || strcmp(name, "network")) 1114 continue; 1115 1116 local_mac = mdesc_get_property(hp, target, 1117 local_mac_prop, NULL); 1118 if (local_mac) 1119 break; 1120 } 1121 if (!local_mac) 1122 return ERR_PTR(-ENODEV); 1123 1124 return vnet_find_or_create(local_mac); 1125} 1126 1127static struct ldc_channel_config vnet_ldc_cfg = { 1128 .event = vnet_event, 1129 .mtu = 64, 1130 .mode = LDC_MODE_UNRELIABLE, 1131}; 1132 1133static struct vio_driver_ops vnet_vio_ops = { 1134 .send_attr = vnet_send_attr, 1135 .handle_attr = vnet_handle_attr, 1136 .handshake_complete = vnet_handshake_complete, 1137}; 1138 1139static void __devinit print_version(void) 1140{ 1141 static int version_printed; 1142 1143 if (version_printed++ == 0) 1144 printk(KERN_INFO "%s", version); 1145} 1146 1147const char *remote_macaddr_prop = "remote-mac-address"; 1148 1149static int __devinit vnet_port_probe(struct vio_dev *vdev, 1150 const struct vio_device_id *id) 1151{ 1152 struct mdesc_handle *hp; 1153 struct vnet_port *port; 1154 unsigned long flags; 1155 struct vnet *vp; 1156 const u64 *rmac; 1157 int len, i, err, switch_port; 1158 1159 print_version(); 1160 1161 hp = mdesc_grab(); 1162 1163 vp = vnet_find_parent(hp, vdev->mp); 1164 if (IS_ERR(vp)) { 1165 printk(KERN_ERR PFX "Cannot find port parent vnet.\n"); 1166 err = PTR_ERR(vp); 1167 goto err_out_put_mdesc; 1168 } 1169 1170 rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); 1171 err = -ENODEV; 1172 if (!rmac) { 1173 printk(KERN_ERR PFX "Port lacks %s property.\n", 1174 remote_macaddr_prop); 1175 goto err_out_put_mdesc; 1176 } 1177 1178 port = kzalloc(sizeof(*port), GFP_KERNEL); 1179 err = -ENOMEM; 1180 if (!port) { 1181 printk(KERN_ERR PFX "Cannot allocate vnet_port.\n"); 1182 goto err_out_put_mdesc; 1183 } 1184 1185 for (i = 0; i < ETH_ALEN; i++) 1186 port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff; 1187 1188 port->vp = vp; 1189 1190 err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK, 1191 vnet_versions, ARRAY_SIZE(vnet_versions), 1192 &vnet_vio_ops, vp->dev->name); 1193 if (err) 1194 goto err_out_free_port; 1195 1196 err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port); 1197 if (err) 1198 goto err_out_free_port; 1199 1200 err = vnet_port_alloc_tx_bufs(port); 1201 if (err) 1202 goto err_out_free_ldc; 1203 1204 INIT_HLIST_NODE(&port->hash); 1205 INIT_LIST_HEAD(&port->list); 1206 1207 switch_port = 0; 1208 if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL) 1209 switch_port = 1; 1210 port->switch_port = switch_port; 1211 1212 spin_lock_irqsave(&vp->lock, flags); 1213 if (switch_port) 1214 list_add(&port->list, &vp->port_list); 1215 else 1216 list_add_tail(&port->list, &vp->port_list); 1217 hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]); 1218 spin_unlock_irqrestore(&vp->lock, flags); 1219 1220 dev_set_drvdata(&vdev->dev, port); 1221 1222 printk(KERN_INFO "%s: PORT ( remote-mac %pM%s )\n", 1223 vp->dev->name, port->raddr, 1224 switch_port ? " switch-port" : ""); 1225 1226 vio_port_up(&port->vio); 1227 1228 mdesc_release(hp); 1229 1230 return 0; 1231 1232err_out_free_ldc: 1233 vio_ldc_free(&port->vio); 1234 1235err_out_free_port: 1236 kfree(port); 1237 1238err_out_put_mdesc: 1239 mdesc_release(hp); 1240 return err; 1241} 1242 1243static int vnet_port_remove(struct vio_dev *vdev) 1244{ 1245 struct vnet_port *port = dev_get_drvdata(&vdev->dev); 1246 1247 if (port) { 1248 struct vnet *vp = port->vp; 1249 unsigned long flags; 1250 1251 del_timer_sync(&port->vio.timer); 1252 1253 spin_lock_irqsave(&vp->lock, flags); 1254 list_del(&port->list); 1255 hlist_del(&port->hash); 1256 spin_unlock_irqrestore(&vp->lock, flags); 1257 1258 vnet_port_free_tx_bufs(port); 1259 vio_ldc_free(&port->vio); 1260 1261 dev_set_drvdata(&vdev->dev, NULL); 1262 1263 kfree(port); 1264 } 1265 return 0; 1266} 1267 1268static const struct vio_device_id vnet_port_match[] = { 1269 { 1270 .type = "vnet-port", 1271 }, 1272 {}, 1273}; 1274MODULE_DEVICE_TABLE(vio, vnet_port_match); 1275 1276static struct vio_driver vnet_port_driver = { 1277 .id_table = vnet_port_match, 1278 .probe = vnet_port_probe, 1279 .remove = vnet_port_remove, 1280 .driver = { 1281 .name = "vnet_port", 1282 .owner = THIS_MODULE, 1283 } 1284}; 1285 1286static int __init vnet_init(void) 1287{ 1288 return vio_register_driver(&vnet_port_driver); 1289} 1290 1291static void __exit vnet_exit(void) 1292{ 1293 vio_unregister_driver(&vnet_port_driver); 1294} 1295 1296module_init(vnet_init); 1297module_exit(vnet_exit);