Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.34 2152 lines 53 kB view raw
1/* 2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * This program is free software; you may redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 16 * SOFTWARE. 17 * 18 */ 19 20#include <linux/module.h> 21#include <linux/kernel.h> 22#include <linux/string.h> 23#include <linux/errno.h> 24#include <linux/types.h> 25#include <linux/init.h> 26#include <linux/workqueue.h> 27#include <linux/pci.h> 28#include <linux/netdevice.h> 29#include <linux/etherdevice.h> 30#include <linux/if_ether.h> 31#include <linux/if_vlan.h> 32#include <linux/ethtool.h> 33#include <linux/in.h> 34#include <linux/ip.h> 35#include <linux/ipv6.h> 36#include <linux/tcp.h> 37#include <net/ip6_checksum.h> 38 39#include "cq_enet_desc.h" 40#include "vnic_dev.h" 41#include "vnic_intr.h" 42#include "vnic_stats.h" 43#include "enic_res.h" 44#include "enic.h" 45 46#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) 47#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) 48#define MAX_TSO (1 << 16) 49#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) 50 51#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ 52 53/* Supported devices */ 54static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = { 55 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 56 { 0, } /* end of table */ 57}; 58 59MODULE_DESCRIPTION(DRV_DESCRIPTION); 60MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); 61MODULE_LICENSE("GPL"); 62MODULE_VERSION(DRV_VERSION); 63MODULE_DEVICE_TABLE(pci, enic_id_table); 64 65struct enic_stat { 66 char name[ETH_GSTRING_LEN]; 67 unsigned int offset; 68}; 69 70#define ENIC_TX_STAT(stat) \ 71 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 } 72#define ENIC_RX_STAT(stat) \ 73 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 } 74 75static const struct enic_stat enic_tx_stats[] = { 76 ENIC_TX_STAT(tx_frames_ok), 77 ENIC_TX_STAT(tx_unicast_frames_ok), 78 ENIC_TX_STAT(tx_multicast_frames_ok), 79 ENIC_TX_STAT(tx_broadcast_frames_ok), 80 ENIC_TX_STAT(tx_bytes_ok), 81 ENIC_TX_STAT(tx_unicast_bytes_ok), 82 ENIC_TX_STAT(tx_multicast_bytes_ok), 83 ENIC_TX_STAT(tx_broadcast_bytes_ok), 84 ENIC_TX_STAT(tx_drops), 85 ENIC_TX_STAT(tx_errors), 86 ENIC_TX_STAT(tx_tso), 87}; 88 89static const struct enic_stat enic_rx_stats[] = { 90 ENIC_RX_STAT(rx_frames_ok), 91 ENIC_RX_STAT(rx_frames_total), 92 ENIC_RX_STAT(rx_unicast_frames_ok), 93 ENIC_RX_STAT(rx_multicast_frames_ok), 94 ENIC_RX_STAT(rx_broadcast_frames_ok), 95 ENIC_RX_STAT(rx_bytes_ok), 96 ENIC_RX_STAT(rx_unicast_bytes_ok), 97 ENIC_RX_STAT(rx_multicast_bytes_ok), 98 ENIC_RX_STAT(rx_broadcast_bytes_ok), 99 ENIC_RX_STAT(rx_drop), 100 ENIC_RX_STAT(rx_no_bufs), 101 ENIC_RX_STAT(rx_errors), 102 ENIC_RX_STAT(rx_rss), 103 ENIC_RX_STAT(rx_crc_errors), 104 ENIC_RX_STAT(rx_frames_64), 105 ENIC_RX_STAT(rx_frames_127), 106 ENIC_RX_STAT(rx_frames_255), 107 ENIC_RX_STAT(rx_frames_511), 108 ENIC_RX_STAT(rx_frames_1023), 109 ENIC_RX_STAT(rx_frames_1518), 110 ENIC_RX_STAT(rx_frames_to_max), 111}; 112 113static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); 114static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); 115 116static int enic_get_settings(struct net_device *netdev, 117 struct ethtool_cmd *ecmd) 118{ 119 struct enic *enic = netdev_priv(netdev); 120 121 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 122 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 123 ecmd->port = PORT_FIBRE; 124 ecmd->transceiver = XCVR_EXTERNAL; 125 126 if (netif_carrier_ok(netdev)) { 127 ecmd->speed = vnic_dev_port_speed(enic->vdev); 128 ecmd->duplex = DUPLEX_FULL; 129 } else { 130 ecmd->speed = -1; 131 ecmd->duplex = -1; 132 } 133 134 ecmd->autoneg = AUTONEG_DISABLE; 135 136 return 0; 137} 138 139static void enic_get_drvinfo(struct net_device *netdev, 140 struct ethtool_drvinfo *drvinfo) 141{ 142 struct enic *enic = netdev_priv(netdev); 143 struct vnic_devcmd_fw_info *fw_info; 144 145 spin_lock(&enic->devcmd_lock); 146 vnic_dev_fw_info(enic->vdev, &fw_info); 147 spin_unlock(&enic->devcmd_lock); 148 149 strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 150 strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); 151 strncpy(drvinfo->fw_version, fw_info->fw_version, 152 sizeof(drvinfo->fw_version)); 153 strncpy(drvinfo->bus_info, pci_name(enic->pdev), 154 sizeof(drvinfo->bus_info)); 155} 156 157static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 158{ 159 unsigned int i; 160 161 switch (stringset) { 162 case ETH_SS_STATS: 163 for (i = 0; i < enic_n_tx_stats; i++) { 164 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); 165 data += ETH_GSTRING_LEN; 166 } 167 for (i = 0; i < enic_n_rx_stats; i++) { 168 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); 169 data += ETH_GSTRING_LEN; 170 } 171 break; 172 } 173} 174 175static int enic_get_sset_count(struct net_device *netdev, int sset) 176{ 177 switch (sset) { 178 case ETH_SS_STATS: 179 return enic_n_tx_stats + enic_n_rx_stats; 180 default: 181 return -EOPNOTSUPP; 182 } 183} 184 185static void enic_get_ethtool_stats(struct net_device *netdev, 186 struct ethtool_stats *stats, u64 *data) 187{ 188 struct enic *enic = netdev_priv(netdev); 189 struct vnic_stats *vstats; 190 unsigned int i; 191 192 spin_lock(&enic->devcmd_lock); 193 vnic_dev_stats_dump(enic->vdev, &vstats); 194 spin_unlock(&enic->devcmd_lock); 195 196 for (i = 0; i < enic_n_tx_stats; i++) 197 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; 198 for (i = 0; i < enic_n_rx_stats; i++) 199 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; 200} 201 202static u32 enic_get_rx_csum(struct net_device *netdev) 203{ 204 struct enic *enic = netdev_priv(netdev); 205 return enic->csum_rx_enabled; 206} 207 208static int enic_set_rx_csum(struct net_device *netdev, u32 data) 209{ 210 struct enic *enic = netdev_priv(netdev); 211 212 if (data && !ENIC_SETTING(enic, RXCSUM)) 213 return -EINVAL; 214 215 enic->csum_rx_enabled = !!data; 216 217 return 0; 218} 219 220static int enic_set_tx_csum(struct net_device *netdev, u32 data) 221{ 222 struct enic *enic = netdev_priv(netdev); 223 224 if (data && !ENIC_SETTING(enic, TXCSUM)) 225 return -EINVAL; 226 227 if (data) 228 netdev->features |= NETIF_F_HW_CSUM; 229 else 230 netdev->features &= ~NETIF_F_HW_CSUM; 231 232 return 0; 233} 234 235static int enic_set_tso(struct net_device *netdev, u32 data) 236{ 237 struct enic *enic = netdev_priv(netdev); 238 239 if (data && !ENIC_SETTING(enic, TSO)) 240 return -EINVAL; 241 242 if (data) 243 netdev->features |= 244 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; 245 else 246 netdev->features &= 247 ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN); 248 249 return 0; 250} 251 252static u32 enic_get_msglevel(struct net_device *netdev) 253{ 254 struct enic *enic = netdev_priv(netdev); 255 return enic->msg_enable; 256} 257 258static void enic_set_msglevel(struct net_device *netdev, u32 value) 259{ 260 struct enic *enic = netdev_priv(netdev); 261 enic->msg_enable = value; 262} 263 264static int enic_get_coalesce(struct net_device *netdev, 265 struct ethtool_coalesce *ecmd) 266{ 267 struct enic *enic = netdev_priv(netdev); 268 269 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; 270 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; 271 272 return 0; 273} 274 275static int enic_set_coalesce(struct net_device *netdev, 276 struct ethtool_coalesce *ecmd) 277{ 278 struct enic *enic = netdev_priv(netdev); 279 u32 tx_coalesce_usecs; 280 u32 rx_coalesce_usecs; 281 282 tx_coalesce_usecs = min_t(u32, 283 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX), 284 ecmd->tx_coalesce_usecs); 285 rx_coalesce_usecs = min_t(u32, 286 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX), 287 ecmd->rx_coalesce_usecs); 288 289 switch (vnic_dev_get_intr_mode(enic->vdev)) { 290 case VNIC_DEV_INTR_MODE_INTX: 291 if (tx_coalesce_usecs != rx_coalesce_usecs) 292 return -EINVAL; 293 294 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ], 295 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); 296 break; 297 case VNIC_DEV_INTR_MODE_MSI: 298 if (tx_coalesce_usecs != rx_coalesce_usecs) 299 return -EINVAL; 300 301 vnic_intr_coalescing_timer_set(&enic->intr[0], 302 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); 303 break; 304 case VNIC_DEV_INTR_MODE_MSIX: 305 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ], 306 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); 307 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ], 308 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs)); 309 break; 310 default: 311 break; 312 } 313 314 enic->tx_coalesce_usecs = tx_coalesce_usecs; 315 enic->rx_coalesce_usecs = rx_coalesce_usecs; 316 317 return 0; 318} 319 320static const struct ethtool_ops enic_ethtool_ops = { 321 .get_settings = enic_get_settings, 322 .get_drvinfo = enic_get_drvinfo, 323 .get_msglevel = enic_get_msglevel, 324 .set_msglevel = enic_set_msglevel, 325 .get_link = ethtool_op_get_link, 326 .get_strings = enic_get_strings, 327 .get_sset_count = enic_get_sset_count, 328 .get_ethtool_stats = enic_get_ethtool_stats, 329 .get_rx_csum = enic_get_rx_csum, 330 .set_rx_csum = enic_set_rx_csum, 331 .get_tx_csum = ethtool_op_get_tx_csum, 332 .set_tx_csum = enic_set_tx_csum, 333 .get_sg = ethtool_op_get_sg, 334 .set_sg = ethtool_op_set_sg, 335 .get_tso = ethtool_op_get_tso, 336 .set_tso = enic_set_tso, 337 .get_coalesce = enic_get_coalesce, 338 .set_coalesce = enic_set_coalesce, 339 .get_flags = ethtool_op_get_flags, 340 .set_flags = ethtool_op_set_flags, 341}; 342 343static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 344{ 345 struct enic *enic = vnic_dev_priv(wq->vdev); 346 347 if (buf->sop) 348 pci_unmap_single(enic->pdev, buf->dma_addr, 349 buf->len, PCI_DMA_TODEVICE); 350 else 351 pci_unmap_page(enic->pdev, buf->dma_addr, 352 buf->len, PCI_DMA_TODEVICE); 353 354 if (buf->os_buf) 355 dev_kfree_skb_any(buf->os_buf); 356} 357 358static void enic_wq_free_buf(struct vnic_wq *wq, 359 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) 360{ 361 enic_free_wq_buf(wq, buf); 362} 363 364static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, 365 u8 type, u16 q_number, u16 completed_index, void *opaque) 366{ 367 struct enic *enic = vnic_dev_priv(vdev); 368 369 spin_lock(&enic->wq_lock[q_number]); 370 371 vnic_wq_service(&enic->wq[q_number], cq_desc, 372 completed_index, enic_wq_free_buf, 373 opaque); 374 375 if (netif_queue_stopped(enic->netdev) && 376 vnic_wq_desc_avail(&enic->wq[q_number]) >= 377 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) 378 netif_wake_queue(enic->netdev); 379 380 spin_unlock(&enic->wq_lock[q_number]); 381 382 return 0; 383} 384 385static void enic_log_q_error(struct enic *enic) 386{ 387 unsigned int i; 388 u32 error_status; 389 390 for (i = 0; i < enic->wq_count; i++) { 391 error_status = vnic_wq_error_status(&enic->wq[i]); 392 if (error_status) 393 printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n", 394 enic->netdev->name, i, error_status); 395 } 396 397 for (i = 0; i < enic->rq_count; i++) { 398 error_status = vnic_rq_error_status(&enic->rq[i]); 399 if (error_status) 400 printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n", 401 enic->netdev->name, i, error_status); 402 } 403} 404 405static void enic_link_check(struct enic *enic) 406{ 407 int link_status = vnic_dev_link_status(enic->vdev); 408 int carrier_ok = netif_carrier_ok(enic->netdev); 409 410 if (link_status && !carrier_ok) { 411 printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name); 412 netif_carrier_on(enic->netdev); 413 } else if (!link_status && carrier_ok) { 414 printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name); 415 netif_carrier_off(enic->netdev); 416 } 417} 418 419static void enic_mtu_check(struct enic *enic) 420{ 421 u32 mtu = vnic_dev_mtu(enic->vdev); 422 423 if (mtu && mtu != enic->port_mtu) { 424 enic->port_mtu = mtu; 425 if (mtu < enic->netdev->mtu) 426 printk(KERN_WARNING PFX 427 "%s: interface MTU (%d) set higher " 428 "than switch port MTU (%d)\n", 429 enic->netdev->name, enic->netdev->mtu, mtu); 430 } 431} 432 433static void enic_msglvl_check(struct enic *enic) 434{ 435 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); 436 437 if (msg_enable != enic->msg_enable) { 438 printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n", 439 enic->netdev->name, enic->msg_enable, msg_enable); 440 enic->msg_enable = msg_enable; 441 } 442} 443 444static void enic_notify_check(struct enic *enic) 445{ 446 enic_msglvl_check(enic); 447 enic_mtu_check(enic); 448 enic_link_check(enic); 449} 450 451#define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) 452 453static irqreturn_t enic_isr_legacy(int irq, void *data) 454{ 455 struct net_device *netdev = data; 456 struct enic *enic = netdev_priv(netdev); 457 u32 pba; 458 459 vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]); 460 461 pba = vnic_intr_legacy_pba(enic->legacy_pba); 462 if (!pba) { 463 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); 464 return IRQ_NONE; /* not our interrupt */ 465 } 466 467 if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) { 468 vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_NOTIFY]); 469 enic_notify_check(enic); 470 } 471 472 if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) { 473 vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_ERR]); 474 enic_log_q_error(enic); 475 /* schedule recovery from WQ/RQ error */ 476 schedule_work(&enic->reset); 477 return IRQ_HANDLED; 478 } 479 480 if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { 481 if (napi_schedule_prep(&enic->napi)) 482 __napi_schedule(&enic->napi); 483 } else { 484 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); 485 } 486 487 return IRQ_HANDLED; 488} 489 490static irqreturn_t enic_isr_msi(int irq, void *data) 491{ 492 struct enic *enic = data; 493 494 /* With MSI, there is no sharing of interrupts, so this is 495 * our interrupt and there is no need to ack it. The device 496 * is not providing per-vector masking, so the OS will not 497 * write to PCI config space to mask/unmask the interrupt. 498 * We're using mask_on_assertion for MSI, so the device 499 * automatically masks the interrupt when the interrupt is 500 * generated. Later, when exiting polling, the interrupt 501 * will be unmasked (see enic_poll). 502 * 503 * Also, the device uses the same PCIe Traffic Class (TC) 504 * for Memory Write data and MSI, so there are no ordering 505 * issues; the MSI will always arrive at the Root Complex 506 * _after_ corresponding Memory Writes (i.e. descriptor 507 * writes). 508 */ 509 510 napi_schedule(&enic->napi); 511 512 return IRQ_HANDLED; 513} 514 515static irqreturn_t enic_isr_msix_rq(int irq, void *data) 516{ 517 struct enic *enic = data; 518 519 /* schedule NAPI polling for RQ cleanup */ 520 napi_schedule(&enic->napi); 521 522 return IRQ_HANDLED; 523} 524 525static irqreturn_t enic_isr_msix_wq(int irq, void *data) 526{ 527 struct enic *enic = data; 528 unsigned int wq_work_to_do = -1; /* no limit */ 529 unsigned int wq_work_done; 530 531 wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], 532 wq_work_to_do, enic_wq_service, NULL); 533 534 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ], 535 wq_work_done, 536 1 /* unmask intr */, 537 1 /* reset intr timer */); 538 539 return IRQ_HANDLED; 540} 541 542static irqreturn_t enic_isr_msix_err(int irq, void *data) 543{ 544 struct enic *enic = data; 545 546 vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_ERR]); 547 548 enic_log_q_error(enic); 549 550 /* schedule recovery from WQ/RQ error */ 551 schedule_work(&enic->reset); 552 553 return IRQ_HANDLED; 554} 555 556static irqreturn_t enic_isr_msix_notify(int irq, void *data) 557{ 558 struct enic *enic = data; 559 560 vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_NOTIFY]); 561 enic_notify_check(enic); 562 563 return IRQ_HANDLED; 564} 565 566static inline void enic_queue_wq_skb_cont(struct enic *enic, 567 struct vnic_wq *wq, struct sk_buff *skb, 568 unsigned int len_left) 569{ 570 skb_frag_t *frag; 571 572 /* Queue additional data fragments */ 573 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 574 len_left -= frag->size; 575 enic_queue_wq_desc_cont(wq, skb, 576 pci_map_page(enic->pdev, frag->page, 577 frag->page_offset, frag->size, 578 PCI_DMA_TODEVICE), 579 frag->size, 580 (len_left == 0)); /* EOP? */ 581 } 582} 583 584static inline void enic_queue_wq_skb_vlan(struct enic *enic, 585 struct vnic_wq *wq, struct sk_buff *skb, 586 int vlan_tag_insert, unsigned int vlan_tag) 587{ 588 unsigned int head_len = skb_headlen(skb); 589 unsigned int len_left = skb->len - head_len; 590 int eop = (len_left == 0); 591 592 /* Queue the main skb fragment. The fragments are no larger 593 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 594 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 595 * per fragment is queued. 596 */ 597 enic_queue_wq_desc(wq, skb, 598 pci_map_single(enic->pdev, skb->data, 599 head_len, PCI_DMA_TODEVICE), 600 head_len, 601 vlan_tag_insert, vlan_tag, 602 eop); 603 604 if (!eop) 605 enic_queue_wq_skb_cont(enic, wq, skb, len_left); 606} 607 608static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, 609 struct vnic_wq *wq, struct sk_buff *skb, 610 int vlan_tag_insert, unsigned int vlan_tag) 611{ 612 unsigned int head_len = skb_headlen(skb); 613 unsigned int len_left = skb->len - head_len; 614 unsigned int hdr_len = skb_transport_offset(skb); 615 unsigned int csum_offset = hdr_len + skb->csum_offset; 616 int eop = (len_left == 0); 617 618 /* Queue the main skb fragment. The fragments are no larger 619 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 620 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 621 * per fragment is queued. 622 */ 623 enic_queue_wq_desc_csum_l4(wq, skb, 624 pci_map_single(enic->pdev, skb->data, 625 head_len, PCI_DMA_TODEVICE), 626 head_len, 627 csum_offset, 628 hdr_len, 629 vlan_tag_insert, vlan_tag, 630 eop); 631 632 if (!eop) 633 enic_queue_wq_skb_cont(enic, wq, skb, len_left); 634} 635 636static inline void enic_queue_wq_skb_tso(struct enic *enic, 637 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, 638 int vlan_tag_insert, unsigned int vlan_tag) 639{ 640 unsigned int frag_len_left = skb_headlen(skb); 641 unsigned int len_left = skb->len - frag_len_left; 642 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 643 int eop = (len_left == 0); 644 unsigned int len; 645 dma_addr_t dma_addr; 646 unsigned int offset = 0; 647 skb_frag_t *frag; 648 649 /* Preload TCP csum field with IP pseudo hdr calculated 650 * with IP length set to zero. HW will later add in length 651 * to each TCP segment resulting from the TSO. 652 */ 653 654 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 655 ip_hdr(skb)->check = 0; 656 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 657 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 658 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 659 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 660 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); 661 } 662 663 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors 664 * for the main skb fragment 665 */ 666 while (frag_len_left) { 667 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); 668 dma_addr = pci_map_single(enic->pdev, skb->data + offset, 669 len, PCI_DMA_TODEVICE); 670 enic_queue_wq_desc_tso(wq, skb, 671 dma_addr, 672 len, 673 mss, hdr_len, 674 vlan_tag_insert, vlan_tag, 675 eop && (len == frag_len_left)); 676 frag_len_left -= len; 677 offset += len; 678 } 679 680 if (eop) 681 return; 682 683 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors 684 * for additional data fragments 685 */ 686 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 687 len_left -= frag->size; 688 frag_len_left = frag->size; 689 offset = frag->page_offset; 690 691 while (frag_len_left) { 692 len = min(frag_len_left, 693 (unsigned int)WQ_ENET_MAX_DESC_LEN); 694 dma_addr = pci_map_page(enic->pdev, frag->page, 695 offset, len, 696 PCI_DMA_TODEVICE); 697 enic_queue_wq_desc_cont(wq, skb, 698 dma_addr, 699 len, 700 (len_left == 0) && 701 (len == frag_len_left)); /* EOP? */ 702 frag_len_left -= len; 703 offset += len; 704 } 705 } 706} 707 708static inline void enic_queue_wq_skb(struct enic *enic, 709 struct vnic_wq *wq, struct sk_buff *skb) 710{ 711 unsigned int mss = skb_shinfo(skb)->gso_size; 712 unsigned int vlan_tag = 0; 713 int vlan_tag_insert = 0; 714 715 if (enic->vlan_group && vlan_tx_tag_present(skb)) { 716 /* VLAN tag from trunking driver */ 717 vlan_tag_insert = 1; 718 vlan_tag = vlan_tx_tag_get(skb); 719 } 720 721 if (mss) 722 enic_queue_wq_skb_tso(enic, wq, skb, mss, 723 vlan_tag_insert, vlan_tag); 724 else if (skb->ip_summed == CHECKSUM_PARTIAL) 725 enic_queue_wq_skb_csum_l4(enic, wq, skb, 726 vlan_tag_insert, vlan_tag); 727 else 728 enic_queue_wq_skb_vlan(enic, wq, skb, 729 vlan_tag_insert, vlan_tag); 730} 731 732/* netif_tx_lock held, process context with BHs disabled, or BH */ 733static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, 734 struct net_device *netdev) 735{ 736 struct enic *enic = netdev_priv(netdev); 737 struct vnic_wq *wq = &enic->wq[0]; 738 unsigned long flags; 739 740 if (skb->len <= 0) { 741 dev_kfree_skb(skb); 742 return NETDEV_TX_OK; 743 } 744 745 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, 746 * which is very likely. In the off chance it's going to take 747 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. 748 */ 749 750 if (skb_shinfo(skb)->gso_size == 0 && 751 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && 752 skb_linearize(skb)) { 753 dev_kfree_skb(skb); 754 return NETDEV_TX_OK; 755 } 756 757 spin_lock_irqsave(&enic->wq_lock[0], flags); 758 759 if (vnic_wq_desc_avail(wq) < 760 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { 761 netif_stop_queue(netdev); 762 /* This is a hard error, log it */ 763 printk(KERN_ERR PFX "%s: BUG! Tx ring full when " 764 "queue awake!\n", netdev->name); 765 spin_unlock_irqrestore(&enic->wq_lock[0], flags); 766 return NETDEV_TX_BUSY; 767 } 768 769 enic_queue_wq_skb(enic, wq, skb); 770 771 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) 772 netif_stop_queue(netdev); 773 774 spin_unlock_irqrestore(&enic->wq_lock[0], flags); 775 776 return NETDEV_TX_OK; 777} 778 779/* dev_base_lock rwlock held, nominally process context */ 780static struct net_device_stats *enic_get_stats(struct net_device *netdev) 781{ 782 struct enic *enic = netdev_priv(netdev); 783 struct net_device_stats *net_stats = &netdev->stats; 784 struct vnic_stats *stats; 785 786 spin_lock(&enic->devcmd_lock); 787 vnic_dev_stats_dump(enic->vdev, &stats); 788 spin_unlock(&enic->devcmd_lock); 789 790 net_stats->tx_packets = stats->tx.tx_frames_ok; 791 net_stats->tx_bytes = stats->tx.tx_bytes_ok; 792 net_stats->tx_errors = stats->tx.tx_errors; 793 net_stats->tx_dropped = stats->tx.tx_drops; 794 795 net_stats->rx_packets = stats->rx.rx_frames_ok; 796 net_stats->rx_bytes = stats->rx.rx_bytes_ok; 797 net_stats->rx_errors = stats->rx.rx_errors; 798 net_stats->multicast = stats->rx.rx_multicast_frames_ok; 799 net_stats->rx_over_errors = enic->rq_truncated_pkts; 800 net_stats->rx_crc_errors = enic->rq_bad_fcs; 801 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; 802 803 return net_stats; 804} 805 806static void enic_reset_mcaddrs(struct enic *enic) 807{ 808 enic->mc_count = 0; 809} 810 811static int enic_set_mac_addr(struct net_device *netdev, char *addr) 812{ 813 if (!is_valid_ether_addr(addr)) 814 return -EADDRNOTAVAIL; 815 816 memcpy(netdev->dev_addr, addr, netdev->addr_len); 817 818 return 0; 819} 820 821/* netif_tx_lock held, BHs disabled */ 822static void enic_set_multicast_list(struct net_device *netdev) 823{ 824 struct enic *enic = netdev_priv(netdev); 825 struct dev_mc_list *list; 826 int directed = 1; 827 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; 828 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; 829 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; 830 unsigned int mc_count = netdev_mc_count(netdev); 831 int allmulti = (netdev->flags & IFF_ALLMULTI) || 832 mc_count > ENIC_MULTICAST_PERFECT_FILTERS; 833 unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0); 834 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 835 unsigned int i, j; 836 837 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) 838 mc_count = ENIC_MULTICAST_PERFECT_FILTERS; 839 840 spin_lock(&enic->devcmd_lock); 841 842 if (enic->flags != flags) { 843 enic->flags = flags; 844 vnic_dev_packet_filter(enic->vdev, directed, 845 multicast, broadcast, promisc, allmulti); 846 } 847 848 /* Is there an easier way? Trying to minimize to 849 * calls to add/del multicast addrs. We keep the 850 * addrs from the last call in enic->mc_addr and 851 * look for changes to add/del. 852 */ 853 854 i = 0; 855 netdev_for_each_mc_addr(list, netdev) { 856 if (i == mc_count) 857 break; 858 memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN); 859 } 860 861 for (i = 0; i < enic->mc_count; i++) { 862 for (j = 0; j < mc_count; j++) 863 if (compare_ether_addr(enic->mc_addr[i], 864 mc_addr[j]) == 0) 865 break; 866 if (j == mc_count) 867 enic_del_multicast_addr(enic, enic->mc_addr[i]); 868 } 869 870 for (i = 0; i < mc_count; i++) { 871 for (j = 0; j < enic->mc_count; j++) 872 if (compare_ether_addr(mc_addr[i], 873 enic->mc_addr[j]) == 0) 874 break; 875 if (j == enic->mc_count) 876 enic_add_multicast_addr(enic, mc_addr[i]); 877 } 878 879 /* Save the list to compare against next time 880 */ 881 882 for (i = 0; i < mc_count; i++) 883 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN); 884 885 enic->mc_count = mc_count; 886 887 spin_unlock(&enic->devcmd_lock); 888} 889 890/* rtnl lock is held */ 891static void enic_vlan_rx_register(struct net_device *netdev, 892 struct vlan_group *vlan_group) 893{ 894 struct enic *enic = netdev_priv(netdev); 895 enic->vlan_group = vlan_group; 896} 897 898/* rtnl lock is held */ 899static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 900{ 901 struct enic *enic = netdev_priv(netdev); 902 903 spin_lock(&enic->devcmd_lock); 904 enic_add_vlan(enic, vid); 905 spin_unlock(&enic->devcmd_lock); 906} 907 908/* rtnl lock is held */ 909static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 910{ 911 struct enic *enic = netdev_priv(netdev); 912 913 spin_lock(&enic->devcmd_lock); 914 enic_del_vlan(enic, vid); 915 spin_unlock(&enic->devcmd_lock); 916} 917 918/* netif_tx_lock held, BHs disabled */ 919static void enic_tx_timeout(struct net_device *netdev) 920{ 921 struct enic *enic = netdev_priv(netdev); 922 schedule_work(&enic->reset); 923} 924 925static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) 926{ 927 struct enic *enic = vnic_dev_priv(rq->vdev); 928 929 if (!buf->os_buf) 930 return; 931 932 pci_unmap_single(enic->pdev, buf->dma_addr, 933 buf->len, PCI_DMA_FROMDEVICE); 934 dev_kfree_skb_any(buf->os_buf); 935} 936 937static int enic_rq_alloc_buf(struct vnic_rq *rq) 938{ 939 struct enic *enic = vnic_dev_priv(rq->vdev); 940 struct net_device *netdev = enic->netdev; 941 struct sk_buff *skb; 942 unsigned int len = netdev->mtu + ETH_HLEN; 943 unsigned int os_buf_index = 0; 944 dma_addr_t dma_addr; 945 946 skb = netdev_alloc_skb_ip_align(netdev, len); 947 if (!skb) 948 return -ENOMEM; 949 950 dma_addr = pci_map_single(enic->pdev, skb->data, 951 len, PCI_DMA_FROMDEVICE); 952 953 enic_queue_rq_desc(rq, skb, os_buf_index, 954 dma_addr, len); 955 956 return 0; 957} 958 959static int enic_rq_alloc_buf_a1(struct vnic_rq *rq) 960{ 961 struct rq_enet_desc *desc = vnic_rq_next_desc(rq); 962 963 if (vnic_rq_posting_soon(rq)) { 964 965 /* SW workaround for A0 HW erratum: if we're just about 966 * to write posted_index, insert a dummy desc 967 * of type resvd 968 */ 969 970 rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0); 971 vnic_rq_post(rq, 0, 0, 0, 0); 972 } else { 973 return enic_rq_alloc_buf(rq); 974 } 975 976 return 0; 977} 978 979static int enic_set_rq_alloc_buf(struct enic *enic) 980{ 981 enum vnic_dev_hw_version hw_ver; 982 int err; 983 984 err = vnic_dev_hw_version(enic->vdev, &hw_ver); 985 if (err) 986 return err; 987 988 switch (hw_ver) { 989 case VNIC_DEV_HW_VER_A1: 990 enic->rq_alloc_buf = enic_rq_alloc_buf_a1; 991 break; 992 case VNIC_DEV_HW_VER_A2: 993 case VNIC_DEV_HW_VER_UNKNOWN: 994 enic->rq_alloc_buf = enic_rq_alloc_buf; 995 break; 996 default: 997 return -ENODEV; 998 } 999 1000 return 0; 1001} 1002 1003static int enic_get_skb_header(struct sk_buff *skb, void **iphdr, 1004 void **tcph, u64 *hdr_flags, void *priv) 1005{ 1006 struct cq_enet_rq_desc *cq_desc = priv; 1007 unsigned int ip_len; 1008 struct iphdr *iph; 1009 1010 u8 type, color, eop, sop, ingress_port, vlan_stripped; 1011 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; 1012 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; 1013 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; 1014 u8 packet_error; 1015 u16 q_number, completed_index, bytes_written, vlan, checksum; 1016 u32 rss_hash; 1017 1018 cq_enet_rq_desc_dec(cq_desc, 1019 &type, &color, &q_number, &completed_index, 1020 &ingress_port, &fcoe, &eop, &sop, &rss_type, 1021 &csum_not_calc, &rss_hash, &bytes_written, 1022 &packet_error, &vlan_stripped, &vlan, &checksum, 1023 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, 1024 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, 1025 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, 1026 &fcs_ok); 1027 1028 if (!(ipv4 && tcp && !ipv4_fragment)) 1029 return -1; 1030 1031 skb_reset_network_header(skb); 1032 iph = ip_hdr(skb); 1033 1034 ip_len = ip_hdrlen(skb); 1035 skb_set_transport_header(skb, ip_len); 1036 1037 /* check if ip header and tcp header are complete */ 1038 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) 1039 return -1; 1040 1041 *hdr_flags = LRO_IPV4 | LRO_TCP; 1042 *tcph = tcp_hdr(skb); 1043 *iphdr = iph; 1044 1045 return 0; 1046} 1047 1048static void enic_rq_indicate_buf(struct vnic_rq *rq, 1049 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 1050 int skipped, void *opaque) 1051{ 1052 struct enic *enic = vnic_dev_priv(rq->vdev); 1053 struct net_device *netdev = enic->netdev; 1054 struct sk_buff *skb; 1055 1056 u8 type, color, eop, sop, ingress_port, vlan_stripped; 1057 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; 1058 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; 1059 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; 1060 u8 packet_error; 1061 u16 q_number, completed_index, bytes_written, vlan, checksum; 1062 u32 rss_hash; 1063 1064 if (skipped) 1065 return; 1066 1067 skb = buf->os_buf; 1068 prefetch(skb->data - NET_IP_ALIGN); 1069 pci_unmap_single(enic->pdev, buf->dma_addr, 1070 buf->len, PCI_DMA_FROMDEVICE); 1071 1072 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, 1073 &type, &color, &q_number, &completed_index, 1074 &ingress_port, &fcoe, &eop, &sop, &rss_type, 1075 &csum_not_calc, &rss_hash, &bytes_written, 1076 &packet_error, &vlan_stripped, &vlan, &checksum, 1077 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, 1078 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, 1079 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, 1080 &fcs_ok); 1081 1082 if (packet_error) { 1083 1084 if (!fcs_ok) { 1085 if (bytes_written > 0) 1086 enic->rq_bad_fcs++; 1087 else if (bytes_written == 0) 1088 enic->rq_truncated_pkts++; 1089 } 1090 1091 dev_kfree_skb_any(skb); 1092 1093 return; 1094 } 1095 1096 if (eop && bytes_written > 0) { 1097 1098 /* Good receive 1099 */ 1100 1101 skb_put(skb, bytes_written); 1102 skb->protocol = eth_type_trans(skb, netdev); 1103 1104 if (enic->csum_rx_enabled && !csum_not_calc) { 1105 skb->csum = htons(checksum); 1106 skb->ip_summed = CHECKSUM_COMPLETE; 1107 } 1108 1109 skb->dev = netdev; 1110 1111 if (enic->vlan_group && vlan_stripped) { 1112 1113 if ((netdev->features & NETIF_F_LRO) && ipv4) 1114 lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, 1115 skb, enic->vlan_group, 1116 vlan, cq_desc); 1117 else 1118 vlan_hwaccel_receive_skb(skb, 1119 enic->vlan_group, vlan); 1120 1121 } else { 1122 1123 if ((netdev->features & NETIF_F_LRO) && ipv4) 1124 lro_receive_skb(&enic->lro_mgr, skb, cq_desc); 1125 else 1126 netif_receive_skb(skb); 1127 1128 } 1129 1130 } else { 1131 1132 /* Buffer overflow 1133 */ 1134 1135 dev_kfree_skb_any(skb); 1136 } 1137} 1138 1139static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, 1140 u8 type, u16 q_number, u16 completed_index, void *opaque) 1141{ 1142 struct enic *enic = vnic_dev_priv(vdev); 1143 1144 vnic_rq_service(&enic->rq[q_number], cq_desc, 1145 completed_index, VNIC_RQ_RETURN_DESC, 1146 enic_rq_indicate_buf, opaque); 1147 1148 return 0; 1149} 1150 1151static int enic_poll(struct napi_struct *napi, int budget) 1152{ 1153 struct enic *enic = container_of(napi, struct enic, napi); 1154 struct net_device *netdev = enic->netdev; 1155 unsigned int rq_work_to_do = budget; 1156 unsigned int wq_work_to_do = -1; /* no limit */ 1157 unsigned int work_done, rq_work_done, wq_work_done; 1158 int err; 1159 1160 /* Service RQ (first) and WQ 1161 */ 1162 1163 rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], 1164 rq_work_to_do, enic_rq_service, NULL); 1165 1166 wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], 1167 wq_work_to_do, enic_wq_service, NULL); 1168 1169 /* Accumulate intr event credits for this polling 1170 * cycle. An intr event is the completion of a 1171 * a WQ or RQ packet. 1172 */ 1173 1174 work_done = rq_work_done + wq_work_done; 1175 1176 if (work_done > 0) 1177 vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ], 1178 work_done, 1179 0 /* don't unmask intr */, 1180 0 /* don't reset intr timer */); 1181 1182 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); 1183 1184 /* Buffer allocation failed. Stay in polling 1185 * mode so we can try to fill the ring again. 1186 */ 1187 1188 if (err) 1189 rq_work_done = rq_work_to_do; 1190 1191 if (rq_work_done < rq_work_to_do) { 1192 1193 /* Some work done, but not enough to stay in polling, 1194 * flush all LROs and exit polling 1195 */ 1196 1197 if (netdev->features & NETIF_F_LRO) 1198 lro_flush_all(&enic->lro_mgr); 1199 1200 napi_complete(napi); 1201 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); 1202 } 1203 1204 return rq_work_done; 1205} 1206 1207static int enic_poll_msix(struct napi_struct *napi, int budget) 1208{ 1209 struct enic *enic = container_of(napi, struct enic, napi); 1210 struct net_device *netdev = enic->netdev; 1211 unsigned int work_to_do = budget; 1212 unsigned int work_done; 1213 int err; 1214 1215 /* Service RQ 1216 */ 1217 1218 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], 1219 work_to_do, enic_rq_service, NULL); 1220 1221 /* Return intr event credits for this polling 1222 * cycle. An intr event is the completion of a 1223 * RQ packet. 1224 */ 1225 1226 if (work_done > 0) 1227 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], 1228 work_done, 1229 0 /* don't unmask intr */, 1230 0 /* don't reset intr timer */); 1231 1232 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); 1233 1234 /* Buffer allocation failed. Stay in polling mode 1235 * so we can try to fill the ring again. 1236 */ 1237 1238 if (err) 1239 work_done = work_to_do; 1240 1241 if (work_done < work_to_do) { 1242 1243 /* Some work done, but not enough to stay in polling, 1244 * flush all LROs and exit polling 1245 */ 1246 1247 if (netdev->features & NETIF_F_LRO) 1248 lro_flush_all(&enic->lro_mgr); 1249 1250 napi_complete(napi); 1251 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); 1252 } 1253 1254 return work_done; 1255} 1256 1257static void enic_notify_timer(unsigned long data) 1258{ 1259 struct enic *enic = (struct enic *)data; 1260 1261 enic_notify_check(enic); 1262 1263 mod_timer(&enic->notify_timer, 1264 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD)); 1265} 1266 1267static void enic_free_intr(struct enic *enic) 1268{ 1269 struct net_device *netdev = enic->netdev; 1270 unsigned int i; 1271 1272 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1273 case VNIC_DEV_INTR_MODE_INTX: 1274 free_irq(enic->pdev->irq, netdev); 1275 break; 1276 case VNIC_DEV_INTR_MODE_MSI: 1277 free_irq(enic->pdev->irq, enic); 1278 break; 1279 case VNIC_DEV_INTR_MODE_MSIX: 1280 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) 1281 if (enic->msix[i].requested) 1282 free_irq(enic->msix_entry[i].vector, 1283 enic->msix[i].devid); 1284 break; 1285 default: 1286 break; 1287 } 1288} 1289 1290static int enic_request_intr(struct enic *enic) 1291{ 1292 struct net_device *netdev = enic->netdev; 1293 unsigned int i; 1294 int err = 0; 1295 1296 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1297 1298 case VNIC_DEV_INTR_MODE_INTX: 1299 1300 err = request_irq(enic->pdev->irq, enic_isr_legacy, 1301 IRQF_SHARED, netdev->name, netdev); 1302 break; 1303 1304 case VNIC_DEV_INTR_MODE_MSI: 1305 1306 err = request_irq(enic->pdev->irq, enic_isr_msi, 1307 0, netdev->name, enic); 1308 break; 1309 1310 case VNIC_DEV_INTR_MODE_MSIX: 1311 1312 sprintf(enic->msix[ENIC_MSIX_RQ].devname, 1313 "%.11s-rx-0", netdev->name); 1314 enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq; 1315 enic->msix[ENIC_MSIX_RQ].devid = enic; 1316 1317 sprintf(enic->msix[ENIC_MSIX_WQ].devname, 1318 "%.11s-tx-0", netdev->name); 1319 enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq; 1320 enic->msix[ENIC_MSIX_WQ].devid = enic; 1321 1322 sprintf(enic->msix[ENIC_MSIX_ERR].devname, 1323 "%.11s-err", netdev->name); 1324 enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err; 1325 enic->msix[ENIC_MSIX_ERR].devid = enic; 1326 1327 sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname, 1328 "%.11s-notify", netdev->name); 1329 enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify; 1330 enic->msix[ENIC_MSIX_NOTIFY].devid = enic; 1331 1332 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) { 1333 err = request_irq(enic->msix_entry[i].vector, 1334 enic->msix[i].isr, 0, 1335 enic->msix[i].devname, 1336 enic->msix[i].devid); 1337 if (err) { 1338 enic_free_intr(enic); 1339 break; 1340 } 1341 enic->msix[i].requested = 1; 1342 } 1343 1344 break; 1345 1346 default: 1347 break; 1348 } 1349 1350 return err; 1351} 1352 1353static void enic_synchronize_irqs(struct enic *enic) 1354{ 1355 unsigned int i; 1356 1357 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1358 case VNIC_DEV_INTR_MODE_INTX: 1359 case VNIC_DEV_INTR_MODE_MSI: 1360 synchronize_irq(enic->pdev->irq); 1361 break; 1362 case VNIC_DEV_INTR_MODE_MSIX: 1363 for (i = 0; i < enic->intr_count; i++) 1364 synchronize_irq(enic->msix_entry[i].vector); 1365 break; 1366 default: 1367 break; 1368 } 1369} 1370 1371static int enic_notify_set(struct enic *enic) 1372{ 1373 int err; 1374 1375 spin_lock(&enic->devcmd_lock); 1376 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1377 case VNIC_DEV_INTR_MODE_INTX: 1378 err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY); 1379 break; 1380 case VNIC_DEV_INTR_MODE_MSIX: 1381 err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY); 1382 break; 1383 default: 1384 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); 1385 break; 1386 } 1387 spin_unlock(&enic->devcmd_lock); 1388 1389 return err; 1390} 1391 1392static void enic_notify_timer_start(struct enic *enic) 1393{ 1394 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1395 case VNIC_DEV_INTR_MODE_MSI: 1396 mod_timer(&enic->notify_timer, jiffies); 1397 break; 1398 default: 1399 /* Using intr for notification for INTx/MSI-X */ 1400 break; 1401 }; 1402} 1403 1404/* rtnl lock is held, process context */ 1405static int enic_open(struct net_device *netdev) 1406{ 1407 struct enic *enic = netdev_priv(netdev); 1408 unsigned int i; 1409 int err; 1410 1411 err = enic_request_intr(enic); 1412 if (err) { 1413 printk(KERN_ERR PFX "%s: Unable to request irq.\n", 1414 netdev->name); 1415 return err; 1416 } 1417 1418 err = enic_notify_set(enic); 1419 if (err) { 1420 printk(KERN_ERR PFX 1421 "%s: Failed to alloc notify buffer, aborting.\n", 1422 netdev->name); 1423 goto err_out_free_intr; 1424 } 1425 1426 for (i = 0; i < enic->rq_count; i++) { 1427 vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); 1428 /* Need at least one buffer on ring to get going */ 1429 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1430 printk(KERN_ERR PFX 1431 "%s: Unable to alloc receive buffers.\n", 1432 netdev->name); 1433 err = -ENOMEM; 1434 goto err_out_notify_unset; 1435 } 1436 } 1437 1438 for (i = 0; i < enic->wq_count; i++) 1439 vnic_wq_enable(&enic->wq[i]); 1440 for (i = 0; i < enic->rq_count; i++) 1441 vnic_rq_enable(&enic->rq[i]); 1442 1443 spin_lock(&enic->devcmd_lock); 1444 enic_add_station_addr(enic); 1445 spin_unlock(&enic->devcmd_lock); 1446 enic_set_multicast_list(netdev); 1447 1448 netif_wake_queue(netdev); 1449 napi_enable(&enic->napi); 1450 spin_lock(&enic->devcmd_lock); 1451 vnic_dev_enable(enic->vdev); 1452 spin_unlock(&enic->devcmd_lock); 1453 1454 for (i = 0; i < enic->intr_count; i++) 1455 vnic_intr_unmask(&enic->intr[i]); 1456 1457 enic_notify_timer_start(enic); 1458 1459 return 0; 1460 1461err_out_notify_unset: 1462 spin_lock(&enic->devcmd_lock); 1463 vnic_dev_notify_unset(enic->vdev); 1464 spin_unlock(&enic->devcmd_lock); 1465err_out_free_intr: 1466 enic_free_intr(enic); 1467 1468 return err; 1469} 1470 1471/* rtnl lock is held, process context */ 1472static int enic_stop(struct net_device *netdev) 1473{ 1474 struct enic *enic = netdev_priv(netdev); 1475 unsigned int i; 1476 int err; 1477 1478 for (i = 0; i < enic->intr_count; i++) 1479 vnic_intr_mask(&enic->intr[i]); 1480 1481 enic_synchronize_irqs(enic); 1482 1483 del_timer_sync(&enic->notify_timer); 1484 1485 spin_lock(&enic->devcmd_lock); 1486 vnic_dev_disable(enic->vdev); 1487 spin_unlock(&enic->devcmd_lock); 1488 napi_disable(&enic->napi); 1489 netif_carrier_off(netdev); 1490 netif_tx_disable(netdev); 1491 1492 for (i = 0; i < enic->wq_count; i++) { 1493 err = vnic_wq_disable(&enic->wq[i]); 1494 if (err) 1495 return err; 1496 } 1497 for (i = 0; i < enic->rq_count; i++) { 1498 err = vnic_rq_disable(&enic->rq[i]); 1499 if (err) 1500 return err; 1501 } 1502 1503 spin_lock(&enic->devcmd_lock); 1504 vnic_dev_notify_unset(enic->vdev); 1505 spin_unlock(&enic->devcmd_lock); 1506 enic_free_intr(enic); 1507 1508 for (i = 0; i < enic->wq_count; i++) 1509 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); 1510 for (i = 0; i < enic->rq_count; i++) 1511 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 1512 for (i = 0; i < enic->cq_count; i++) 1513 vnic_cq_clean(&enic->cq[i]); 1514 for (i = 0; i < enic->intr_count; i++) 1515 vnic_intr_clean(&enic->intr[i]); 1516 1517 return 0; 1518} 1519 1520static int enic_change_mtu(struct net_device *netdev, int new_mtu) 1521{ 1522 struct enic *enic = netdev_priv(netdev); 1523 int running = netif_running(netdev); 1524 1525 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) 1526 return -EINVAL; 1527 1528 if (running) 1529 enic_stop(netdev); 1530 1531 netdev->mtu = new_mtu; 1532 1533 if (netdev->mtu > enic->port_mtu) 1534 printk(KERN_WARNING PFX 1535 "%s: interface MTU (%d) set higher " 1536 "than port MTU (%d)\n", 1537 netdev->name, netdev->mtu, enic->port_mtu); 1538 1539 if (running) 1540 enic_open(netdev); 1541 1542 return 0; 1543} 1544 1545#ifdef CONFIG_NET_POLL_CONTROLLER 1546static void enic_poll_controller(struct net_device *netdev) 1547{ 1548 struct enic *enic = netdev_priv(netdev); 1549 struct vnic_dev *vdev = enic->vdev; 1550 1551 switch (vnic_dev_get_intr_mode(vdev)) { 1552 case VNIC_DEV_INTR_MODE_MSIX: 1553 enic_isr_msix_rq(enic->pdev->irq, enic); 1554 enic_isr_msix_wq(enic->pdev->irq, enic); 1555 break; 1556 case VNIC_DEV_INTR_MODE_MSI: 1557 enic_isr_msi(enic->pdev->irq, enic); 1558 break; 1559 case VNIC_DEV_INTR_MODE_INTX: 1560 enic_isr_legacy(enic->pdev->irq, netdev); 1561 break; 1562 default: 1563 break; 1564 } 1565} 1566#endif 1567 1568static int enic_dev_wait(struct vnic_dev *vdev, 1569 int (*start)(struct vnic_dev *, int), 1570 int (*finished)(struct vnic_dev *, int *), 1571 int arg) 1572{ 1573 unsigned long time; 1574 int done; 1575 int err; 1576 1577 BUG_ON(in_interrupt()); 1578 1579 err = start(vdev, arg); 1580 if (err) 1581 return err; 1582 1583 /* Wait for func to complete...2 seconds max 1584 */ 1585 1586 time = jiffies + (HZ * 2); 1587 do { 1588 1589 err = finished(vdev, &done); 1590 if (err) 1591 return err; 1592 1593 if (done) 1594 return 0; 1595 1596 schedule_timeout_uninterruptible(HZ / 10); 1597 1598 } while (time_after(time, jiffies)); 1599 1600 return -ETIMEDOUT; 1601} 1602 1603static int enic_dev_open(struct enic *enic) 1604{ 1605 int err; 1606 1607 err = enic_dev_wait(enic->vdev, vnic_dev_open, 1608 vnic_dev_open_done, 0); 1609 if (err) 1610 printk(KERN_ERR PFX 1611 "vNIC device open failed, err %d.\n", err); 1612 1613 return err; 1614} 1615 1616static int enic_dev_soft_reset(struct enic *enic) 1617{ 1618 int err; 1619 1620 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, 1621 vnic_dev_soft_reset_done, 0); 1622 if (err) 1623 printk(KERN_ERR PFX 1624 "vNIC soft reset failed, err %d.\n", err); 1625 1626 return err; 1627} 1628 1629static int enic_set_niccfg(struct enic *enic) 1630{ 1631 const u8 rss_default_cpu = 0; 1632 const u8 rss_hash_type = 0; 1633 const u8 rss_hash_bits = 0; 1634 const u8 rss_base_cpu = 0; 1635 const u8 rss_enable = 0; 1636 const u8 tso_ipid_split_en = 0; 1637 const u8 ig_vlan_strip_en = 1; 1638 1639 /* Enable VLAN tag stripping. RSS not enabled (yet). 1640 */ 1641 1642 return enic_set_nic_cfg(enic, 1643 rss_default_cpu, rss_hash_type, 1644 rss_hash_bits, rss_base_cpu, 1645 rss_enable, tso_ipid_split_en, 1646 ig_vlan_strip_en); 1647} 1648 1649static void enic_reset(struct work_struct *work) 1650{ 1651 struct enic *enic = container_of(work, struct enic, reset); 1652 1653 if (!netif_running(enic->netdev)) 1654 return; 1655 1656 rtnl_lock(); 1657 1658 spin_lock(&enic->devcmd_lock); 1659 vnic_dev_hang_notify(enic->vdev); 1660 spin_unlock(&enic->devcmd_lock); 1661 1662 enic_stop(enic->netdev); 1663 enic_dev_soft_reset(enic); 1664 vnic_dev_init(enic->vdev, 0); 1665 enic_reset_mcaddrs(enic); 1666 enic_init_vnic_resources(enic); 1667 enic_set_niccfg(enic); 1668 enic_open(enic->netdev); 1669 1670 rtnl_unlock(); 1671} 1672 1673static int enic_set_intr_mode(struct enic *enic) 1674{ 1675 unsigned int n = 1; 1676 unsigned int m = 1; 1677 unsigned int i; 1678 1679 /* Set interrupt mode (INTx, MSI, MSI-X) depending 1680 * system capabilities. 1681 * 1682 * Try MSI-X first 1683 * 1684 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs 1685 * (the second to last INTR is used for WQ/RQ errors) 1686 * (the last INTR is used for notifications) 1687 */ 1688 1689 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); 1690 for (i = 0; i < n + m + 2; i++) 1691 enic->msix_entry[i].entry = i; 1692 1693 if (enic->config.intr_mode < 1 && 1694 enic->rq_count >= n && 1695 enic->wq_count >= m && 1696 enic->cq_count >= n + m && 1697 enic->intr_count >= n + m + 2 && 1698 !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { 1699 1700 enic->rq_count = n; 1701 enic->wq_count = m; 1702 enic->cq_count = n + m; 1703 enic->intr_count = n + m + 2; 1704 1705 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); 1706 1707 return 0; 1708 } 1709 1710 /* Next try MSI 1711 * 1712 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR 1713 */ 1714 1715 if (enic->config.intr_mode < 2 && 1716 enic->rq_count >= 1 && 1717 enic->wq_count >= 1 && 1718 enic->cq_count >= 2 && 1719 enic->intr_count >= 1 && 1720 !pci_enable_msi(enic->pdev)) { 1721 1722 enic->rq_count = 1; 1723 enic->wq_count = 1; 1724 enic->cq_count = 2; 1725 enic->intr_count = 1; 1726 1727 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); 1728 1729 return 0; 1730 } 1731 1732 /* Next try INTx 1733 * 1734 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs 1735 * (the first INTR is used for WQ/RQ) 1736 * (the second INTR is used for WQ/RQ errors) 1737 * (the last INTR is used for notifications) 1738 */ 1739 1740 if (enic->config.intr_mode < 3 && 1741 enic->rq_count >= 1 && 1742 enic->wq_count >= 1 && 1743 enic->cq_count >= 2 && 1744 enic->intr_count >= 3) { 1745 1746 enic->rq_count = 1; 1747 enic->wq_count = 1; 1748 enic->cq_count = 2; 1749 enic->intr_count = 3; 1750 1751 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); 1752 1753 return 0; 1754 } 1755 1756 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); 1757 1758 return -EINVAL; 1759} 1760 1761static void enic_clear_intr_mode(struct enic *enic) 1762{ 1763 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1764 case VNIC_DEV_INTR_MODE_MSIX: 1765 pci_disable_msix(enic->pdev); 1766 break; 1767 case VNIC_DEV_INTR_MODE_MSI: 1768 pci_disable_msi(enic->pdev); 1769 break; 1770 default: 1771 break; 1772 } 1773 1774 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); 1775} 1776 1777static const struct net_device_ops enic_netdev_ops = { 1778 .ndo_open = enic_open, 1779 .ndo_stop = enic_stop, 1780 .ndo_start_xmit = enic_hard_start_xmit, 1781 .ndo_get_stats = enic_get_stats, 1782 .ndo_validate_addr = eth_validate_addr, 1783 .ndo_set_mac_address = eth_mac_addr, 1784 .ndo_set_multicast_list = enic_set_multicast_list, 1785 .ndo_change_mtu = enic_change_mtu, 1786 .ndo_vlan_rx_register = enic_vlan_rx_register, 1787 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 1788 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, 1789 .ndo_tx_timeout = enic_tx_timeout, 1790#ifdef CONFIG_NET_POLL_CONTROLLER 1791 .ndo_poll_controller = enic_poll_controller, 1792#endif 1793}; 1794 1795void enic_dev_deinit(struct enic *enic) 1796{ 1797 netif_napi_del(&enic->napi); 1798 enic_free_vnic_resources(enic); 1799 enic_clear_intr_mode(enic); 1800} 1801 1802int enic_dev_init(struct enic *enic) 1803{ 1804 struct net_device *netdev = enic->netdev; 1805 int err; 1806 1807 /* Get vNIC configuration 1808 */ 1809 1810 err = enic_get_vnic_config(enic); 1811 if (err) { 1812 printk(KERN_ERR PFX 1813 "Get vNIC configuration failed, aborting.\n"); 1814 return err; 1815 } 1816 1817 /* Get available resource counts 1818 */ 1819 1820 enic_get_res_counts(enic); 1821 1822 /* Set interrupt mode based on resource counts and system 1823 * capabilities 1824 */ 1825 1826 err = enic_set_intr_mode(enic); 1827 if (err) { 1828 printk(KERN_ERR PFX 1829 "Failed to set intr mode based on resource " 1830 "counts and system capabilities, aborting.\n"); 1831 return err; 1832 } 1833 1834 /* Allocate and configure vNIC resources 1835 */ 1836 1837 err = enic_alloc_vnic_resources(enic); 1838 if (err) { 1839 printk(KERN_ERR PFX 1840 "Failed to alloc vNIC resources, aborting.\n"); 1841 goto err_out_free_vnic_resources; 1842 } 1843 1844 enic_init_vnic_resources(enic); 1845 1846 err = enic_set_rq_alloc_buf(enic); 1847 if (err) { 1848 printk(KERN_ERR PFX 1849 "Failed to set RQ buffer allocator, aborting.\n"); 1850 goto err_out_free_vnic_resources; 1851 } 1852 1853 err = enic_set_niccfg(enic); 1854 if (err) { 1855 printk(KERN_ERR PFX 1856 "Failed to config nic, aborting.\n"); 1857 goto err_out_free_vnic_resources; 1858 } 1859 1860 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1861 default: 1862 netif_napi_add(netdev, &enic->napi, enic_poll, 64); 1863 break; 1864 case VNIC_DEV_INTR_MODE_MSIX: 1865 netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64); 1866 break; 1867 } 1868 1869 return 0; 1870 1871err_out_free_vnic_resources: 1872 enic_clear_intr_mode(enic); 1873 enic_free_vnic_resources(enic); 1874 1875 return err; 1876} 1877 1878static void enic_iounmap(struct enic *enic) 1879{ 1880 unsigned int i; 1881 1882 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) 1883 if (enic->bar[i].vaddr) 1884 iounmap(enic->bar[i].vaddr); 1885} 1886 1887static int __devinit enic_probe(struct pci_dev *pdev, 1888 const struct pci_device_id *ent) 1889{ 1890 struct net_device *netdev; 1891 struct enic *enic; 1892 int using_dac = 0; 1893 unsigned int i; 1894 int err; 1895 1896 /* Allocate net device structure and initialize. Private 1897 * instance data is initialized to zero. 1898 */ 1899 1900 netdev = alloc_etherdev(sizeof(struct enic)); 1901 if (!netdev) { 1902 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 1903 return -ENOMEM; 1904 } 1905 1906 pci_set_drvdata(pdev, netdev); 1907 1908 SET_NETDEV_DEV(netdev, &pdev->dev); 1909 1910 enic = netdev_priv(netdev); 1911 enic->netdev = netdev; 1912 enic->pdev = pdev; 1913 1914 /* Setup PCI resources 1915 */ 1916 1917 err = pci_enable_device(pdev); 1918 if (err) { 1919 printk(KERN_ERR PFX 1920 "Cannot enable PCI device, aborting.\n"); 1921 goto err_out_free_netdev; 1922 } 1923 1924 err = pci_request_regions(pdev, DRV_NAME); 1925 if (err) { 1926 printk(KERN_ERR PFX 1927 "Cannot request PCI regions, aborting.\n"); 1928 goto err_out_disable_device; 1929 } 1930 1931 pci_set_master(pdev); 1932 1933 /* Query PCI controller on system for DMA addressing 1934 * limitation for the device. Try 40-bit first, and 1935 * fail to 32-bit. 1936 */ 1937 1938 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); 1939 if (err) { 1940 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1941 if (err) { 1942 printk(KERN_ERR PFX 1943 "No usable DMA configuration, aborting.\n"); 1944 goto err_out_release_regions; 1945 } 1946 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1947 if (err) { 1948 printk(KERN_ERR PFX 1949 "Unable to obtain 32-bit DMA " 1950 "for consistent allocations, aborting.\n"); 1951 goto err_out_release_regions; 1952 } 1953 } else { 1954 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); 1955 if (err) { 1956 printk(KERN_ERR PFX 1957 "Unable to obtain 40-bit DMA " 1958 "for consistent allocations, aborting.\n"); 1959 goto err_out_release_regions; 1960 } 1961 using_dac = 1; 1962 } 1963 1964 /* Map vNIC resources from BAR0-5 1965 */ 1966 1967 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { 1968 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) 1969 continue; 1970 enic->bar[i].len = pci_resource_len(pdev, i); 1971 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); 1972 if (!enic->bar[i].vaddr) { 1973 printk(KERN_ERR PFX 1974 "Cannot memory-map BAR %d, aborting.\n", i); 1975 err = -ENODEV; 1976 goto err_out_iounmap; 1977 } 1978 enic->bar[i].bus_addr = pci_resource_start(pdev, i); 1979 } 1980 1981 /* Register vNIC device 1982 */ 1983 1984 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, 1985 ARRAY_SIZE(enic->bar)); 1986 if (!enic->vdev) { 1987 printk(KERN_ERR PFX 1988 "vNIC registration failed, aborting.\n"); 1989 err = -ENODEV; 1990 goto err_out_iounmap; 1991 } 1992 1993 /* Issue device open to get device in known state 1994 */ 1995 1996 err = enic_dev_open(enic); 1997 if (err) { 1998 printk(KERN_ERR PFX 1999 "vNIC dev open failed, aborting.\n"); 2000 goto err_out_vnic_unregister; 2001 } 2002 2003 /* Issue device init to initialize the vnic-to-switch link. 2004 * We'll start with carrier off and wait for link UP 2005 * notification later to turn on carrier. We don't need 2006 * to wait here for the vnic-to-switch link initialization 2007 * to complete; link UP notification is the indication that 2008 * the process is complete. 2009 */ 2010 2011 netif_carrier_off(netdev); 2012 2013 err = vnic_dev_init(enic->vdev, 0); 2014 if (err) { 2015 printk(KERN_ERR PFX 2016 "vNIC dev init failed, aborting.\n"); 2017 goto err_out_dev_close; 2018 } 2019 2020 err = enic_dev_init(enic); 2021 if (err) { 2022 printk(KERN_ERR PFX 2023 "Device initialization failed, aborting.\n"); 2024 goto err_out_dev_close; 2025 } 2026 2027 /* Setup notification timer, HW reset task, and locks 2028 */ 2029 2030 init_timer(&enic->notify_timer); 2031 enic->notify_timer.function = enic_notify_timer; 2032 enic->notify_timer.data = (unsigned long)enic; 2033 2034 INIT_WORK(&enic->reset, enic_reset); 2035 2036 for (i = 0; i < enic->wq_count; i++) 2037 spin_lock_init(&enic->wq_lock[i]); 2038 2039 spin_lock_init(&enic->devcmd_lock); 2040 2041 /* Register net device 2042 */ 2043 2044 enic->port_mtu = enic->config.mtu; 2045 (void)enic_change_mtu(netdev, enic->port_mtu); 2046 2047 err = enic_set_mac_addr(netdev, enic->mac_addr); 2048 if (err) { 2049 printk(KERN_ERR PFX 2050 "Invalid MAC address, aborting.\n"); 2051 goto err_out_dev_deinit; 2052 } 2053 2054 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; 2055 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; 2056 2057 netdev->netdev_ops = &enic_netdev_ops; 2058 netdev->watchdog_timeo = 2 * HZ; 2059 netdev->ethtool_ops = &enic_ethtool_ops; 2060 2061 netdev->features |= NETIF_F_HW_VLAN_TX | 2062 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; 2063 if (ENIC_SETTING(enic, TXCSUM)) 2064 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2065 if (ENIC_SETTING(enic, TSO)) 2066 netdev->features |= NETIF_F_TSO | 2067 NETIF_F_TSO6 | NETIF_F_TSO_ECN; 2068 if (ENIC_SETTING(enic, LRO)) 2069 netdev->features |= NETIF_F_LRO; 2070 if (using_dac) 2071 netdev->features |= NETIF_F_HIGHDMA; 2072 2073 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); 2074 2075 enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR; 2076 enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC; 2077 enic->lro_mgr.lro_arr = enic->lro_desc; 2078 enic->lro_mgr.get_skb_header = enic_get_skb_header; 2079 enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; 2080 enic->lro_mgr.dev = netdev; 2081 enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE; 2082 enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 2083 2084 err = register_netdev(netdev); 2085 if (err) { 2086 printk(KERN_ERR PFX 2087 "Cannot register net device, aborting.\n"); 2088 goto err_out_dev_deinit; 2089 } 2090 2091 return 0; 2092 2093err_out_dev_deinit: 2094 enic_dev_deinit(enic); 2095err_out_dev_close: 2096 vnic_dev_close(enic->vdev); 2097err_out_vnic_unregister: 2098 vnic_dev_unregister(enic->vdev); 2099err_out_iounmap: 2100 enic_iounmap(enic); 2101err_out_release_regions: 2102 pci_release_regions(pdev); 2103err_out_disable_device: 2104 pci_disable_device(pdev); 2105err_out_free_netdev: 2106 pci_set_drvdata(pdev, NULL); 2107 free_netdev(netdev); 2108 2109 return err; 2110} 2111 2112static void __devexit enic_remove(struct pci_dev *pdev) 2113{ 2114 struct net_device *netdev = pci_get_drvdata(pdev); 2115 2116 if (netdev) { 2117 struct enic *enic = netdev_priv(netdev); 2118 2119 flush_scheduled_work(); 2120 unregister_netdev(netdev); 2121 enic_dev_deinit(enic); 2122 vnic_dev_close(enic->vdev); 2123 vnic_dev_unregister(enic->vdev); 2124 enic_iounmap(enic); 2125 pci_release_regions(pdev); 2126 pci_disable_device(pdev); 2127 pci_set_drvdata(pdev, NULL); 2128 free_netdev(netdev); 2129 } 2130} 2131 2132static struct pci_driver enic_driver = { 2133 .name = DRV_NAME, 2134 .id_table = enic_id_table, 2135 .probe = enic_probe, 2136 .remove = __devexit_p(enic_remove), 2137}; 2138 2139static int __init enic_init_module(void) 2140{ 2141 printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); 2142 2143 return pci_register_driver(&enic_driver); 2144} 2145 2146static void __exit enic_cleanup_module(void) 2147{ 2148 pci_unregister_driver(&enic_driver); 2149} 2150 2151module_init(enic_init_module); 2152module_exit(enic_cleanup_module);