Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.33-rc2 4532 lines 122 kB view raw
1/****************************************************************************** 2* This software may be used and distributed according to the terms of 3* the GNU General Public License (GPL), incorporated herein by reference. 4* Drivers based on or derived from this code fall under the GPL and must 5* retain the authorship, copyright and license notice. This file is not 6* a complete program and may only be used when the entire operating 7* system is licensed under the GPL. 8* See the file COPYING in this distribution for more information. 9* 10* vxge-main.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O 11* Virtualized Server Adapter. 12* Copyright(c) 2002-2009 Neterion Inc. 13* 14* The module loadable parameters that are supported by the driver and a brief 15* explanation of all the variables: 16* vlan_tag_strip: 17* Strip VLAN Tag enable/disable. Instructs the device to remove 18* the VLAN tag from all received tagged frames that are not 19* replicated at the internal L2 switch. 20* 0 - Do not strip the VLAN tag. 21* 1 - Strip the VLAN tag. 22* 23* addr_learn_en: 24* Enable learning the mac address of the guest OS interface in 25* a virtualization environment. 26* 0 - DISABLE 27* 1 - ENABLE 28* 29* max_config_port: 30* Maximum number of port to be supported. 31* MIN -1 and MAX - 2 32* 33* max_config_vpath: 34* This configures the maximum no of VPATH configures for each 35* device function. 36* MIN - 1 and MAX - 17 37* 38* max_config_dev: 39* This configures maximum no of Device function to be enabled. 40* MIN - 1 and MAX - 17 41* 42******************************************************************************/ 43 44#include <linux/if_vlan.h> 45#include <linux/pci.h> 46#include <linux/tcp.h> 47#include <net/ip.h> 48#include <linux/netdevice.h> 49#include <linux/etherdevice.h> 50#include "vxge-main.h" 51#include "vxge-reg.h" 52 53MODULE_LICENSE("Dual BSD/GPL"); 54MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O" 55 "Virtualized Server Adapter"); 56 57static struct pci_device_id vxge_id_table[] __devinitdata = { 58 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID, 59 PCI_ANY_ID}, 60 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID, 61 PCI_ANY_ID}, 62 {0} 63}; 64 65MODULE_DEVICE_TABLE(pci, vxge_id_table); 66 67VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE); 68VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT); 69VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT); 70VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT); 71VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT); 72VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV); 73 74static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] = 75 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31}; 76static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] = 77 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF}; 78module_param_array(bw_percentage, uint, NULL, 0); 79 80static struct vxge_drv_config *driver_config; 81 82static inline int is_vxge_card_up(struct vxgedev *vdev) 83{ 84 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); 85} 86 87static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) 88{ 89 unsigned long flags = 0; 90 struct sk_buff **skb_ptr = NULL; 91 struct sk_buff **temp; 92#define NR_SKB_COMPLETED 128 93 struct sk_buff *completed[NR_SKB_COMPLETED]; 94 int more; 95 96 do { 97 more = 0; 98 skb_ptr = completed; 99 100 if (spin_trylock_irqsave(&fifo->tx_lock, flags)) { 101 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr, 102 NR_SKB_COMPLETED, &more); 103 spin_unlock_irqrestore(&fifo->tx_lock, flags); 104 } 105 /* free SKBs */ 106 for (temp = completed; temp != skb_ptr; temp++) 107 dev_kfree_skb_irq(*temp); 108 } while (more) ; 109} 110 111static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev) 112{ 113 int i; 114 115 /* Complete all transmits */ 116 for (i = 0; i < vdev->no_of_vpath; i++) 117 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo); 118} 119 120static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) 121{ 122 int i; 123 struct vxge_ring *ring; 124 125 /* Complete all receives*/ 126 for (i = 0; i < vdev->no_of_vpath; i++) { 127 ring = &vdev->vpaths[i].ring; 128 vxge_hw_vpath_poll_rx(ring->handle); 129 } 130} 131 132/* 133 * MultiQ manipulation helper functions 134 */ 135void vxge_stop_all_tx_queue(struct vxgedev *vdev) 136{ 137 int i; 138 struct net_device *dev = vdev->ndev; 139 140 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) { 141 for (i = 0; i < vdev->no_of_vpath; i++) 142 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_STOP; 143 } 144 netif_tx_stop_all_queues(dev); 145} 146 147void vxge_stop_tx_queue(struct vxge_fifo *fifo) 148{ 149 struct net_device *dev = fifo->ndev; 150 151 struct netdev_queue *txq = NULL; 152 if (fifo->tx_steering_type == TX_MULTIQ_STEERING) 153 txq = netdev_get_tx_queue(dev, fifo->driver_id); 154 else { 155 txq = netdev_get_tx_queue(dev, 0); 156 fifo->queue_state = VPATH_QUEUE_STOP; 157 } 158 159 netif_tx_stop_queue(txq); 160} 161 162void vxge_start_all_tx_queue(struct vxgedev *vdev) 163{ 164 int i; 165 struct net_device *dev = vdev->ndev; 166 167 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) { 168 for (i = 0; i < vdev->no_of_vpath; i++) 169 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START; 170 } 171 netif_tx_start_all_queues(dev); 172} 173 174static void vxge_wake_all_tx_queue(struct vxgedev *vdev) 175{ 176 int i; 177 struct net_device *dev = vdev->ndev; 178 179 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) { 180 for (i = 0; i < vdev->no_of_vpath; i++) 181 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START; 182 } 183 netif_tx_wake_all_queues(dev); 184} 185 186void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb) 187{ 188 struct net_device *dev = fifo->ndev; 189 190 int vpath_no = fifo->driver_id; 191 struct netdev_queue *txq = NULL; 192 if (fifo->tx_steering_type == TX_MULTIQ_STEERING) { 193 txq = netdev_get_tx_queue(dev, vpath_no); 194 if (netif_tx_queue_stopped(txq)) 195 netif_tx_wake_queue(txq); 196 } else { 197 txq = netdev_get_tx_queue(dev, 0); 198 if (fifo->queue_state == VPATH_QUEUE_STOP) 199 if (netif_tx_queue_stopped(txq)) { 200 fifo->queue_state = VPATH_QUEUE_START; 201 netif_tx_wake_queue(txq); 202 } 203 } 204} 205 206/* 207 * vxge_callback_link_up 208 * 209 * This function is called during interrupt context to notify link up state 210 * change. 211 */ 212void 213vxge_callback_link_up(struct __vxge_hw_device *hldev) 214{ 215 struct net_device *dev = hldev->ndev; 216 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 217 218 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 219 vdev->ndev->name, __func__, __LINE__); 220 printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name); 221 vdev->stats.link_up++; 222 223 netif_carrier_on(vdev->ndev); 224 vxge_wake_all_tx_queue(vdev); 225 226 vxge_debug_entryexit(VXGE_TRACE, 227 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); 228} 229 230/* 231 * vxge_callback_link_down 232 * 233 * This function is called during interrupt context to notify link down state 234 * change. 235 */ 236void 237vxge_callback_link_down(struct __vxge_hw_device *hldev) 238{ 239 struct net_device *dev = hldev->ndev; 240 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 241 242 vxge_debug_entryexit(VXGE_TRACE, 243 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); 244 printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name); 245 246 vdev->stats.link_down++; 247 netif_carrier_off(vdev->ndev); 248 vxge_stop_all_tx_queue(vdev); 249 250 vxge_debug_entryexit(VXGE_TRACE, 251 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); 252} 253 254/* 255 * vxge_rx_alloc 256 * 257 * Allocate SKB. 258 */ 259static struct sk_buff* 260vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) 261{ 262 struct net_device *dev; 263 struct sk_buff *skb; 264 struct vxge_rx_priv *rx_priv; 265 266 dev = ring->ndev; 267 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 268 ring->ndev->name, __func__, __LINE__); 269 270 rx_priv = vxge_hw_ring_rxd_private_get(dtrh); 271 272 /* try to allocate skb first. this one may fail */ 273 skb = netdev_alloc_skb(dev, skb_size + 274 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); 275 if (skb == NULL) { 276 vxge_debug_mem(VXGE_ERR, 277 "%s: out of memory to allocate SKB", dev->name); 278 ring->stats.skb_alloc_fail++; 279 return NULL; 280 } 281 282 vxge_debug_mem(VXGE_TRACE, 283 "%s: %s:%d Skb : 0x%p", ring->ndev->name, 284 __func__, __LINE__, skb); 285 286 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); 287 288 rx_priv->skb = skb; 289 rx_priv->skb_data = NULL; 290 rx_priv->data_size = skb_size; 291 vxge_debug_entryexit(VXGE_TRACE, 292 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); 293 294 return skb; 295} 296 297/* 298 * vxge_rx_map 299 */ 300static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) 301{ 302 struct vxge_rx_priv *rx_priv; 303 dma_addr_t dma_addr; 304 305 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 306 ring->ndev->name, __func__, __LINE__); 307 rx_priv = vxge_hw_ring_rxd_private_get(dtrh); 308 309 rx_priv->skb_data = rx_priv->skb->data; 310 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, 311 rx_priv->data_size, PCI_DMA_FROMDEVICE); 312 313 if (dma_addr == 0) { 314 ring->stats.pci_map_fail++; 315 return -EIO; 316 } 317 vxge_debug_mem(VXGE_TRACE, 318 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx", 319 ring->ndev->name, __func__, __LINE__, 320 (unsigned long long)dma_addr); 321 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size); 322 323 rx_priv->data_dma = dma_addr; 324 vxge_debug_entryexit(VXGE_TRACE, 325 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); 326 327 return 0; 328} 329 330/* 331 * vxge_rx_initial_replenish 332 * Allocation of RxD as an initial replenish procedure. 333 */ 334static enum vxge_hw_status 335vxge_rx_initial_replenish(void *dtrh, void *userdata) 336{ 337 struct vxge_ring *ring = (struct vxge_ring *)userdata; 338 struct vxge_rx_priv *rx_priv; 339 340 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 341 ring->ndev->name, __func__, __LINE__); 342 if (vxge_rx_alloc(dtrh, ring, 343 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL) 344 return VXGE_HW_FAIL; 345 346 if (vxge_rx_map(dtrh, ring)) { 347 rx_priv = vxge_hw_ring_rxd_private_get(dtrh); 348 dev_kfree_skb(rx_priv->skb); 349 350 return VXGE_HW_FAIL; 351 } 352 vxge_debug_entryexit(VXGE_TRACE, 353 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); 354 355 return VXGE_HW_OK; 356} 357 358static inline void 359vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, 360 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info) 361{ 362 363 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 364 ring->ndev->name, __func__, __LINE__); 365 skb_record_rx_queue(skb, ring->driver_id); 366 skb->protocol = eth_type_trans(skb, ring->ndev); 367 368 ring->stats.rx_frms++; 369 ring->stats.rx_bytes += pkt_length; 370 371 if (skb->pkt_type == PACKET_MULTICAST) 372 ring->stats.rx_mcast++; 373 374 vxge_debug_rx(VXGE_TRACE, 375 "%s: %s:%d skb protocol = %d", 376 ring->ndev->name, __func__, __LINE__, skb->protocol); 377 378 if (ring->gro_enable) { 379 if (ring->vlgrp && ext_info->vlan && 380 (ring->vlan_tag_strip == 381 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) 382 vlan_gro_receive(ring->napi_p, ring->vlgrp, 383 ext_info->vlan, skb); 384 else 385 napi_gro_receive(ring->napi_p, skb); 386 } else { 387 if (ring->vlgrp && vlan && 388 (ring->vlan_tag_strip == 389 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) 390 vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan); 391 else 392 netif_receive_skb(skb); 393 } 394 vxge_debug_entryexit(VXGE_TRACE, 395 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); 396} 397 398static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring, 399 struct vxge_rx_priv *rx_priv) 400{ 401 pci_dma_sync_single_for_device(ring->pdev, 402 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE); 403 404 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size); 405 vxge_hw_ring_rxd_pre_post(ring->handle, dtr); 406} 407 408static inline void vxge_post(int *dtr_cnt, void **first_dtr, 409 void *post_dtr, struct __vxge_hw_ring *ringh) 410{ 411 int dtr_count = *dtr_cnt; 412 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) { 413 if (*first_dtr) 414 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr); 415 *first_dtr = post_dtr; 416 } else 417 vxge_hw_ring_rxd_post_post(ringh, post_dtr); 418 dtr_count++; 419 *dtr_cnt = dtr_count; 420} 421 422/* 423 * vxge_rx_1b_compl 424 * 425 * If the interrupt is because of a received frame or if the receive ring 426 * contains fresh as yet un-processed frames, this function is called. 427 */ 428enum vxge_hw_status 429vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, 430 u8 t_code, void *userdata) 431{ 432 struct vxge_ring *ring = (struct vxge_ring *)userdata; 433 struct net_device *dev = ring->ndev; 434 unsigned int dma_sizes; 435 void *first_dtr = NULL; 436 int dtr_cnt = 0; 437 int data_size; 438 dma_addr_t data_dma; 439 int pkt_length; 440 struct sk_buff *skb; 441 struct vxge_rx_priv *rx_priv; 442 struct vxge_hw_ring_rxd_info ext_info; 443 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 444 ring->ndev->name, __func__, __LINE__); 445 ring->pkts_processed = 0; 446 447 vxge_hw_ring_replenish(ringh, 0); 448 449 do { 450 prefetch((char *)dtr + L1_CACHE_BYTES); 451 rx_priv = vxge_hw_ring_rxd_private_get(dtr); 452 skb = rx_priv->skb; 453 data_size = rx_priv->data_size; 454 data_dma = rx_priv->data_dma; 455 prefetch(rx_priv->skb_data); 456 457 vxge_debug_rx(VXGE_TRACE, 458 "%s: %s:%d skb = 0x%p", 459 ring->ndev->name, __func__, __LINE__, skb); 460 461 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes); 462 pkt_length = dma_sizes; 463 464 pkt_length -= ETH_FCS_LEN; 465 466 vxge_debug_rx(VXGE_TRACE, 467 "%s: %s:%d Packet Length = %d", 468 ring->ndev->name, __func__, __LINE__, pkt_length); 469 470 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info); 471 472 /* check skb validity */ 473 vxge_assert(skb); 474 475 prefetch((char *)skb + L1_CACHE_BYTES); 476 if (unlikely(t_code)) { 477 478 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != 479 VXGE_HW_OK) { 480 481 ring->stats.rx_errors++; 482 vxge_debug_rx(VXGE_TRACE, 483 "%s: %s :%d Rx T_code is %d", 484 ring->ndev->name, __func__, 485 __LINE__, t_code); 486 487 /* If the t_code is not supported and if the 488 * t_code is other than 0x5 (unparseable packet 489 * such as unknown UPV6 header), Drop it !!! 490 */ 491 vxge_re_pre_post(dtr, ring, rx_priv); 492 493 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); 494 ring->stats.rx_dropped++; 495 continue; 496 } 497 } 498 499 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { 500 501 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { 502 503 if (!vxge_rx_map(dtr, ring)) { 504 skb_put(skb, pkt_length); 505 506 pci_unmap_single(ring->pdev, data_dma, 507 data_size, PCI_DMA_FROMDEVICE); 508 509 vxge_hw_ring_rxd_pre_post(ringh, dtr); 510 vxge_post(&dtr_cnt, &first_dtr, dtr, 511 ringh); 512 } else { 513 dev_kfree_skb(rx_priv->skb); 514 rx_priv->skb = skb; 515 rx_priv->data_size = data_size; 516 vxge_re_pre_post(dtr, ring, rx_priv); 517 518 vxge_post(&dtr_cnt, &first_dtr, dtr, 519 ringh); 520 ring->stats.rx_dropped++; 521 break; 522 } 523 } else { 524 vxge_re_pre_post(dtr, ring, rx_priv); 525 526 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); 527 ring->stats.rx_dropped++; 528 break; 529 } 530 } else { 531 struct sk_buff *skb_up; 532 533 skb_up = netdev_alloc_skb(dev, pkt_length + 534 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); 535 if (skb_up != NULL) { 536 skb_reserve(skb_up, 537 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); 538 539 pci_dma_sync_single_for_cpu(ring->pdev, 540 data_dma, data_size, 541 PCI_DMA_FROMDEVICE); 542 543 vxge_debug_mem(VXGE_TRACE, 544 "%s: %s:%d skb_up = %p", 545 ring->ndev->name, __func__, 546 __LINE__, skb); 547 memcpy(skb_up->data, skb->data, pkt_length); 548 549 vxge_re_pre_post(dtr, ring, rx_priv); 550 551 vxge_post(&dtr_cnt, &first_dtr, dtr, 552 ringh); 553 /* will netif_rx small SKB instead */ 554 skb = skb_up; 555 skb_put(skb, pkt_length); 556 } else { 557 vxge_re_pre_post(dtr, ring, rx_priv); 558 559 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); 560 vxge_debug_rx(VXGE_ERR, 561 "%s: vxge_rx_1b_compl: out of " 562 "memory", dev->name); 563 ring->stats.skb_alloc_fail++; 564 break; 565 } 566 } 567 568 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) && 569 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) && 570 ring->rx_csum && /* Offload Rx side CSUM */ 571 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK && 572 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) 573 skb->ip_summed = CHECKSUM_UNNECESSARY; 574 else 575 skb->ip_summed = CHECKSUM_NONE; 576 577 vxge_rx_complete(ring, skb, ext_info.vlan, 578 pkt_length, &ext_info); 579 580 ring->budget--; 581 ring->pkts_processed++; 582 if (!ring->budget) 583 break; 584 585 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr, 586 &t_code) == VXGE_HW_OK); 587 588 if (first_dtr) 589 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr); 590 591 vxge_debug_entryexit(VXGE_TRACE, 592 "%s:%d Exiting...", 593 __func__, __LINE__); 594 return VXGE_HW_OK; 595} 596 597/* 598 * vxge_xmit_compl 599 * 600 * If an interrupt was raised to indicate DMA complete of the Tx packet, 601 * this function is called. It identifies the last TxD whose buffer was 602 * freed and frees all skbs whose data have already DMA'ed into the NICs 603 * internal memory. 604 */ 605enum vxge_hw_status 606vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, 607 enum vxge_hw_fifo_tcode t_code, void *userdata, 608 struct sk_buff ***skb_ptr, int nr_skb, int *more) 609{ 610 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; 611 struct sk_buff *skb, **done_skb = *skb_ptr; 612 int pkt_cnt = 0; 613 614 vxge_debug_entryexit(VXGE_TRACE, 615 "%s:%d Entered....", __func__, __LINE__); 616 617 do { 618 int frg_cnt; 619 skb_frag_t *frag; 620 int i = 0, j; 621 struct vxge_tx_priv *txd_priv = 622 vxge_hw_fifo_txdl_private_get(dtr); 623 624 skb = txd_priv->skb; 625 frg_cnt = skb_shinfo(skb)->nr_frags; 626 frag = &skb_shinfo(skb)->frags[0]; 627 628 vxge_debug_tx(VXGE_TRACE, 629 "%s: %s:%d fifo_hw = %p dtr = %p " 630 "tcode = 0x%x", fifo->ndev->name, __func__, 631 __LINE__, fifo_hw, dtr, t_code); 632 /* check skb validity */ 633 vxge_assert(skb); 634 vxge_debug_tx(VXGE_TRACE, 635 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d", 636 fifo->ndev->name, __func__, __LINE__, 637 skb, txd_priv, frg_cnt); 638 if (unlikely(t_code)) { 639 fifo->stats.tx_errors++; 640 vxge_debug_tx(VXGE_ERR, 641 "%s: tx: dtr %p completed due to " 642 "error t_code %01x", fifo->ndev->name, 643 dtr, t_code); 644 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code); 645 } 646 647 /* for unfragmented skb */ 648 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], 649 skb_headlen(skb), PCI_DMA_TODEVICE); 650 651 for (j = 0; j < frg_cnt; j++) { 652 pci_unmap_page(fifo->pdev, 653 txd_priv->dma_buffers[i++], 654 frag->size, PCI_DMA_TODEVICE); 655 frag += 1; 656 } 657 658 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 659 660 /* Updating the statistics block */ 661 fifo->stats.tx_frms++; 662 fifo->stats.tx_bytes += skb->len; 663 664 *done_skb++ = skb; 665 666 if (--nr_skb <= 0) { 667 *more = 1; 668 break; 669 } 670 671 pkt_cnt++; 672 if (pkt_cnt > fifo->indicate_max_pkts) 673 break; 674 675 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw, 676 &dtr, &t_code) == VXGE_HW_OK); 677 678 *skb_ptr = done_skb; 679 vxge_wake_tx_queue(fifo, skb); 680 681 vxge_debug_entryexit(VXGE_TRACE, 682 "%s: %s:%d Exiting...", 683 fifo->ndev->name, __func__, __LINE__); 684 return VXGE_HW_OK; 685} 686 687/* select a vpath to transmit the packet */ 688static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb, 689 int *do_lock) 690{ 691 u16 queue_len, counter = 0; 692 if (skb->protocol == htons(ETH_P_IP)) { 693 struct iphdr *ip; 694 struct tcphdr *th; 695 696 ip = ip_hdr(skb); 697 698 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) { 699 th = (struct tcphdr *)(((unsigned char *)ip) + 700 ip->ihl*4); 701 702 queue_len = vdev->no_of_vpath; 703 counter = (ntohs(th->source) + 704 ntohs(th->dest)) & 705 vdev->vpath_selector[queue_len - 1]; 706 if (counter >= queue_len) 707 counter = queue_len - 1; 708 709 if (ip->protocol == IPPROTO_UDP) { 710#ifdef NETIF_F_LLTX 711 *do_lock = 0; 712#endif 713 } 714 } 715 } 716 return counter; 717} 718 719static enum vxge_hw_status vxge_search_mac_addr_in_list( 720 struct vxge_vpath *vpath, u64 del_mac) 721{ 722 struct list_head *entry, *next; 723 list_for_each_safe(entry, next, &vpath->mac_addr_list) { 724 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) 725 return TRUE; 726 } 727 return FALSE; 728} 729 730static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) 731{ 732 struct macInfo mac_info; 733 u8 *mac_address = NULL; 734 u64 mac_addr = 0, vpath_vector = 0; 735 int vpath_idx = 0; 736 enum vxge_hw_status status = VXGE_HW_OK; 737 struct vxge_vpath *vpath = NULL; 738 struct __vxge_hw_device *hldev; 739 740 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 741 742 mac_address = (u8 *)&mac_addr; 743 memcpy(mac_address, mac_header, ETH_ALEN); 744 745 /* Is this mac address already in the list? */ 746 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { 747 vpath = &vdev->vpaths[vpath_idx]; 748 if (vxge_search_mac_addr_in_list(vpath, mac_addr)) 749 return vpath_idx; 750 } 751 752 memset(&mac_info, 0, sizeof(struct macInfo)); 753 memcpy(mac_info.macaddr, mac_header, ETH_ALEN); 754 755 /* Any vpath has room to add mac address to its da table? */ 756 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { 757 vpath = &vdev->vpaths[vpath_idx]; 758 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) { 759 /* Add this mac address to this vpath */ 760 mac_info.vpath_no = vpath_idx; 761 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; 762 status = vxge_add_mac_addr(vdev, &mac_info); 763 if (status != VXGE_HW_OK) 764 return -EPERM; 765 return vpath_idx; 766 } 767 } 768 769 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST; 770 vpath_idx = 0; 771 mac_info.vpath_no = vpath_idx; 772 /* Is the first vpath already selected as catch-basin ? */ 773 vpath = &vdev->vpaths[vpath_idx]; 774 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) { 775 /* Add this mac address to this vpath */ 776 if (FALSE == vxge_mac_list_add(vpath, &mac_info)) 777 return -EPERM; 778 return vpath_idx; 779 } 780 781 /* Select first vpath as catch-basin */ 782 vpath_vector = vxge_mBIT(vpath->device_id); 783 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh, 784 vxge_hw_mgmt_reg_type_mrpcim, 785 0, 786 (ulong)offsetof( 787 struct vxge_hw_mrpcim_reg, 788 rts_mgr_cbasin_cfg), 789 vpath_vector); 790 if (status != VXGE_HW_OK) { 791 vxge_debug_tx(VXGE_ERR, 792 "%s: Unable to set the vpath-%d in catch-basin mode", 793 VXGE_DRIVER_NAME, vpath->device_id); 794 return -EPERM; 795 } 796 797 if (FALSE == vxge_mac_list_add(vpath, &mac_info)) 798 return -EPERM; 799 800 return vpath_idx; 801} 802 803/** 804 * vxge_xmit 805 * @skb : the socket buffer containing the Tx data. 806 * @dev : device pointer. 807 * 808 * This function is the Tx entry point of the driver. Neterion NIC supports 809 * certain protocol assist features on Tx side, namely CSO, S/G, LSO. 810 * NOTE: when device cant queue the pkt, just the trans_start variable will 811 * not be upadted. 812*/ 813static netdev_tx_t 814vxge_xmit(struct sk_buff *skb, struct net_device *dev) 815{ 816 struct vxge_fifo *fifo = NULL; 817 void *dtr_priv; 818 void *dtr = NULL; 819 struct vxgedev *vdev = NULL; 820 enum vxge_hw_status status; 821 int frg_cnt, first_frg_len; 822 skb_frag_t *frag; 823 int i = 0, j = 0, avail; 824 u64 dma_pointer; 825 struct vxge_tx_priv *txdl_priv = NULL; 826 struct __vxge_hw_fifo *fifo_hw; 827 int offload_type; 828 unsigned long flags = 0; 829 int vpath_no = 0; 830 int do_spin_tx_lock = 1; 831 832 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 833 dev->name, __func__, __LINE__); 834 835 /* A buffer with no data will be dropped */ 836 if (unlikely(skb->len <= 0)) { 837 vxge_debug_tx(VXGE_ERR, 838 "%s: Buffer has no data..", dev->name); 839 dev_kfree_skb(skb); 840 return NETDEV_TX_OK; 841 } 842 843 vdev = (struct vxgedev *)netdev_priv(dev); 844 845 if (unlikely(!is_vxge_card_up(vdev))) { 846 vxge_debug_tx(VXGE_ERR, 847 "%s: vdev not initialized", dev->name); 848 dev_kfree_skb(skb); 849 return NETDEV_TX_OK; 850 } 851 852 if (vdev->config.addr_learn_en) { 853 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN); 854 if (vpath_no == -EPERM) { 855 vxge_debug_tx(VXGE_ERR, 856 "%s: Failed to store the mac address", 857 dev->name); 858 dev_kfree_skb(skb); 859 return NETDEV_TX_OK; 860 } 861 } 862 863 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) 864 vpath_no = skb_get_queue_mapping(skb); 865 else if (vdev->config.tx_steering_type == TX_PORT_STEERING) 866 vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock); 867 868 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no); 869 870 if (vpath_no >= vdev->no_of_vpath) 871 vpath_no = 0; 872 873 fifo = &vdev->vpaths[vpath_no].fifo; 874 fifo_hw = fifo->handle; 875 876 if (do_spin_tx_lock) 877 spin_lock_irqsave(&fifo->tx_lock, flags); 878 else { 879 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) 880 return NETDEV_TX_LOCKED; 881 } 882 883 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) { 884 if (netif_subqueue_stopped(dev, skb)) { 885 spin_unlock_irqrestore(&fifo->tx_lock, flags); 886 return NETDEV_TX_BUSY; 887 } 888 } else if (unlikely(fifo->queue_state == VPATH_QUEUE_STOP)) { 889 if (netif_queue_stopped(dev)) { 890 spin_unlock_irqrestore(&fifo->tx_lock, flags); 891 return NETDEV_TX_BUSY; 892 } 893 } 894 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw); 895 if (avail == 0) { 896 vxge_debug_tx(VXGE_ERR, 897 "%s: No free TXDs available", dev->name); 898 fifo->stats.txd_not_free++; 899 vxge_stop_tx_queue(fifo); 900 goto _exit2; 901 } 902 903 /* Last TXD? Stop tx queue to avoid dropping packets. TX 904 * completion will resume the queue. 905 */ 906 if (avail == 1) 907 vxge_stop_tx_queue(fifo); 908 909 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv); 910 if (unlikely(status != VXGE_HW_OK)) { 911 vxge_debug_tx(VXGE_ERR, 912 "%s: Out of descriptors .", dev->name); 913 fifo->stats.txd_out_of_desc++; 914 vxge_stop_tx_queue(fifo); 915 goto _exit2; 916 } 917 918 vxge_debug_tx(VXGE_TRACE, 919 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p", 920 dev->name, __func__, __LINE__, 921 fifo_hw, dtr, dtr_priv); 922 923 if (vdev->vlgrp && vlan_tx_tag_present(skb)) { 924 u16 vlan_tag = vlan_tx_tag_get(skb); 925 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); 926 } 927 928 first_frg_len = skb_headlen(skb); 929 930 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len, 931 PCI_DMA_TODEVICE); 932 933 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { 934 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 935 vxge_stop_tx_queue(fifo); 936 fifo->stats.pci_map_fail++; 937 goto _exit2; 938 } 939 940 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr); 941 txdl_priv->skb = skb; 942 txdl_priv->dma_buffers[j] = dma_pointer; 943 944 frg_cnt = skb_shinfo(skb)->nr_frags; 945 vxge_debug_tx(VXGE_TRACE, 946 "%s: %s:%d skb = %p txdl_priv = %p " 947 "frag_cnt = %d dma_pointer = 0x%llx", dev->name, 948 __func__, __LINE__, skb, txdl_priv, 949 frg_cnt, (unsigned long long)dma_pointer); 950 951 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, 952 first_frg_len); 953 954 frag = &skb_shinfo(skb)->frags[0]; 955 for (i = 0; i < frg_cnt; i++) { 956 /* ignore 0 length fragment */ 957 if (!frag->size) 958 continue; 959 960 dma_pointer = 961 (u64)pci_map_page(fifo->pdev, frag->page, 962 frag->page_offset, frag->size, 963 PCI_DMA_TODEVICE); 964 965 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) 966 goto _exit0; 967 vxge_debug_tx(VXGE_TRACE, 968 "%s: %s:%d frag = %d dma_pointer = 0x%llx", 969 dev->name, __func__, __LINE__, i, 970 (unsigned long long)dma_pointer); 971 972 txdl_priv->dma_buffers[j] = dma_pointer; 973 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, 974 frag->size); 975 frag += 1; 976 } 977 978 offload_type = vxge_offload_type(skb); 979 980 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 981 982 int mss = vxge_tcp_mss(skb); 983 if (mss) { 984 vxge_debug_tx(VXGE_TRACE, 985 "%s: %s:%d mss = %d", 986 dev->name, __func__, __LINE__, mss); 987 vxge_hw_fifo_txdl_mss_set(dtr, mss); 988 } else { 989 vxge_assert(skb->len <= 990 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE); 991 vxge_assert(0); 992 goto _exit1; 993 } 994 } 995 996 if (skb->ip_summed == CHECKSUM_PARTIAL) 997 vxge_hw_fifo_txdl_cksum_set_bits(dtr, 998 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN | 999 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN | 1000 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN); 1001 1002 vxge_hw_fifo_txdl_post(fifo_hw, dtr); 1003#ifdef NETIF_F_LLTX 1004 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ 1005#endif 1006 spin_unlock_irqrestore(&fifo->tx_lock, flags); 1007 1008 VXGE_COMPLETE_VPATH_TX(fifo); 1009 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", 1010 dev->name, __func__, __LINE__); 1011 return NETDEV_TX_OK; 1012 1013_exit0: 1014 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name); 1015 1016_exit1: 1017 j = 0; 1018 frag = &skb_shinfo(skb)->frags[0]; 1019 1020 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++], 1021 skb_headlen(skb), PCI_DMA_TODEVICE); 1022 1023 for (; j < i; j++) { 1024 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j], 1025 frag->size, PCI_DMA_TODEVICE); 1026 frag += 1; 1027 } 1028 1029 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 1030_exit2: 1031 dev_kfree_skb(skb); 1032 spin_unlock_irqrestore(&fifo->tx_lock, flags); 1033 VXGE_COMPLETE_VPATH_TX(fifo); 1034 1035 return NETDEV_TX_OK; 1036} 1037 1038/* 1039 * vxge_rx_term 1040 * 1041 * Function will be called by hw function to abort all outstanding receive 1042 * descriptors. 1043 */ 1044static void 1045vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata) 1046{ 1047 struct vxge_ring *ring = (struct vxge_ring *)userdata; 1048 struct vxge_rx_priv *rx_priv = 1049 vxge_hw_ring_rxd_private_get(dtrh); 1050 1051 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 1052 ring->ndev->name, __func__, __LINE__); 1053 if (state != VXGE_HW_RXD_STATE_POSTED) 1054 return; 1055 1056 pci_unmap_single(ring->pdev, rx_priv->data_dma, 1057 rx_priv->data_size, PCI_DMA_FROMDEVICE); 1058 1059 dev_kfree_skb(rx_priv->skb); 1060 rx_priv->skb_data = NULL; 1061 1062 vxge_debug_entryexit(VXGE_TRACE, 1063 "%s: %s:%d Exiting...", 1064 ring->ndev->name, __func__, __LINE__); 1065} 1066 1067/* 1068 * vxge_tx_term 1069 * 1070 * Function will be called to abort all outstanding tx descriptors 1071 */ 1072static void 1073vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) 1074{ 1075 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; 1076 skb_frag_t *frag; 1077 int i = 0, j, frg_cnt; 1078 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh); 1079 struct sk_buff *skb = txd_priv->skb; 1080 1081 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1082 1083 if (state != VXGE_HW_TXDL_STATE_POSTED) 1084 return; 1085 1086 /* check skb validity */ 1087 vxge_assert(skb); 1088 frg_cnt = skb_shinfo(skb)->nr_frags; 1089 frag = &skb_shinfo(skb)->frags[0]; 1090 1091 /* for unfragmented skb */ 1092 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], 1093 skb_headlen(skb), PCI_DMA_TODEVICE); 1094 1095 for (j = 0; j < frg_cnt; j++) { 1096 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++], 1097 frag->size, PCI_DMA_TODEVICE); 1098 frag += 1; 1099 } 1100 1101 dev_kfree_skb(skb); 1102 1103 vxge_debug_entryexit(VXGE_TRACE, 1104 "%s:%d Exiting...", __func__, __LINE__); 1105} 1106 1107/** 1108 * vxge_set_multicast 1109 * @dev: pointer to the device structure 1110 * 1111 * Entry point for multicast address enable/disable 1112 * This function is a driver entry point which gets called by the kernel 1113 * whenever multicast addresses must be enabled/disabled. This also gets 1114 * called to set/reset promiscuous mode. Depending on the deivce flag, we 1115 * determine, if multicast address must be enabled or if promiscuous mode 1116 * is to be disabled etc. 1117 */ 1118static void vxge_set_multicast(struct net_device *dev) 1119{ 1120 struct dev_mc_list *mclist; 1121 struct vxgedev *vdev; 1122 int i, mcast_cnt = 0; 1123 struct __vxge_hw_device *hldev; 1124 enum vxge_hw_status status = VXGE_HW_OK; 1125 struct macInfo mac_info; 1126 int vpath_idx = 0; 1127 struct vxge_mac_addrs *mac_entry; 1128 struct list_head *list_head; 1129 struct list_head *entry, *next; 1130 u8 *mac_address = NULL; 1131 1132 vxge_debug_entryexit(VXGE_TRACE, 1133 "%s:%d", __func__, __LINE__); 1134 1135 vdev = (struct vxgedev *)netdev_priv(dev); 1136 hldev = (struct __vxge_hw_device *)vdev->devh; 1137 1138 if (unlikely(!is_vxge_card_up(vdev))) 1139 return; 1140 1141 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) { 1142 for (i = 0; i < vdev->no_of_vpath; i++) { 1143 vxge_assert(vdev->vpaths[i].is_open); 1144 status = vxge_hw_vpath_mcast_enable( 1145 vdev->vpaths[i].handle); 1146 vdev->all_multi_flg = 1; 1147 } 1148 } else if ((dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) { 1149 for (i = 0; i < vdev->no_of_vpath; i++) { 1150 vxge_assert(vdev->vpaths[i].is_open); 1151 status = vxge_hw_vpath_mcast_disable( 1152 vdev->vpaths[i].handle); 1153 vdev->all_multi_flg = 1; 1154 } 1155 } 1156 1157 if (status != VXGE_HW_OK) 1158 vxge_debug_init(VXGE_ERR, 1159 "failed to %s multicast, status %d", 1160 dev->flags & IFF_ALLMULTI ? 1161 "enable" : "disable", status); 1162 1163 if (!vdev->config.addr_learn_en) { 1164 if (dev->flags & IFF_PROMISC) { 1165 for (i = 0; i < vdev->no_of_vpath; i++) { 1166 vxge_assert(vdev->vpaths[i].is_open); 1167 status = vxge_hw_vpath_promisc_enable( 1168 vdev->vpaths[i].handle); 1169 } 1170 } else { 1171 for (i = 0; i < vdev->no_of_vpath; i++) { 1172 vxge_assert(vdev->vpaths[i].is_open); 1173 status = vxge_hw_vpath_promisc_disable( 1174 vdev->vpaths[i].handle); 1175 } 1176 } 1177 } 1178 1179 memset(&mac_info, 0, sizeof(struct macInfo)); 1180 /* Update individual M_CAST address list */ 1181 if ((!vdev->all_multi_flg) && dev->mc_count) { 1182 1183 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; 1184 list_head = &vdev->vpaths[0].mac_addr_list; 1185 if ((dev->mc_count + 1186 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) > 1187 vdev->vpaths[0].max_mac_addr_cnt) 1188 goto _set_all_mcast; 1189 1190 /* Delete previous MC's */ 1191 for (i = 0; i < mcast_cnt; i++) { 1192 if (!list_empty(list_head)) 1193 mac_entry = (struct vxge_mac_addrs *) 1194 list_first_entry(list_head, 1195 struct vxge_mac_addrs, 1196 item); 1197 1198 list_for_each_safe(entry, next, list_head) { 1199 1200 mac_entry = (struct vxge_mac_addrs *) entry; 1201 /* Copy the mac address to delete */ 1202 mac_address = (u8 *)&mac_entry->macaddr; 1203 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1204 1205 /* Is this a multicast address */ 1206 if (0x01 & mac_info.macaddr[0]) { 1207 for (vpath_idx = 0; vpath_idx < 1208 vdev->no_of_vpath; 1209 vpath_idx++) { 1210 mac_info.vpath_no = vpath_idx; 1211 status = vxge_del_mac_addr( 1212 vdev, 1213 &mac_info); 1214 } 1215 } 1216 } 1217 } 1218 1219 /* Add new ones */ 1220 for (i = 0, mclist = dev->mc_list; i < dev->mc_count; 1221 i++, mclist = mclist->next) { 1222 1223 memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN); 1224 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; 1225 vpath_idx++) { 1226 mac_info.vpath_no = vpath_idx; 1227 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; 1228 status = vxge_add_mac_addr(vdev, &mac_info); 1229 if (status != VXGE_HW_OK) { 1230 vxge_debug_init(VXGE_ERR, 1231 "%s:%d Setting individual" 1232 "multicast address failed", 1233 __func__, __LINE__); 1234 goto _set_all_mcast; 1235 } 1236 } 1237 } 1238 1239 return; 1240_set_all_mcast: 1241 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; 1242 /* Delete previous MC's */ 1243 for (i = 0; i < mcast_cnt; i++) { 1244 1245 list_for_each_safe(entry, next, list_head) { 1246 1247 mac_entry = (struct vxge_mac_addrs *) entry; 1248 /* Copy the mac address to delete */ 1249 mac_address = (u8 *)&mac_entry->macaddr; 1250 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1251 1252 /* Is this a multicast address */ 1253 if (0x01 & mac_info.macaddr[0]) 1254 break; 1255 } 1256 1257 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; 1258 vpath_idx++) { 1259 mac_info.vpath_no = vpath_idx; 1260 status = vxge_del_mac_addr(vdev, &mac_info); 1261 } 1262 } 1263 1264 /* Enable all multicast */ 1265 for (i = 0; i < vdev->no_of_vpath; i++) { 1266 vxge_assert(vdev->vpaths[i].is_open); 1267 status = vxge_hw_vpath_mcast_enable( 1268 vdev->vpaths[i].handle); 1269 if (status != VXGE_HW_OK) { 1270 vxge_debug_init(VXGE_ERR, 1271 "%s:%d Enabling all multicasts failed", 1272 __func__, __LINE__); 1273 } 1274 vdev->all_multi_flg = 1; 1275 } 1276 dev->flags |= IFF_ALLMULTI; 1277 } 1278 1279 vxge_debug_entryexit(VXGE_TRACE, 1280 "%s:%d Exiting...", __func__, __LINE__); 1281} 1282 1283/** 1284 * vxge_set_mac_addr 1285 * @dev: pointer to the device structure 1286 * 1287 * Update entry "0" (default MAC addr) 1288 */ 1289static int vxge_set_mac_addr(struct net_device *dev, void *p) 1290{ 1291 struct sockaddr *addr = p; 1292 struct vxgedev *vdev; 1293 struct __vxge_hw_device *hldev; 1294 enum vxge_hw_status status = VXGE_HW_OK; 1295 struct macInfo mac_info_new, mac_info_old; 1296 int vpath_idx = 0; 1297 1298 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1299 1300 vdev = (struct vxgedev *)netdev_priv(dev); 1301 hldev = vdev->devh; 1302 1303 if (!is_valid_ether_addr(addr->sa_data)) 1304 return -EINVAL; 1305 1306 memset(&mac_info_new, 0, sizeof(struct macInfo)); 1307 memset(&mac_info_old, 0, sizeof(struct macInfo)); 1308 1309 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", 1310 __func__, __LINE__); 1311 1312 /* Get the old address */ 1313 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len); 1314 1315 /* Copy the new address */ 1316 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len); 1317 1318 /* First delete the old mac address from all the vpaths 1319 as we can't specify the index while adding new mac address */ 1320 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { 1321 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx]; 1322 if (!vpath->is_open) { 1323 /* This can happen when this interface is added/removed 1324 to the bonding interface. Delete this station address 1325 from the linked list */ 1326 vxge_mac_list_del(vpath, &mac_info_old); 1327 1328 /* Add this new address to the linked list 1329 for later restoring */ 1330 vxge_mac_list_add(vpath, &mac_info_new); 1331 1332 continue; 1333 } 1334 /* Delete the station address */ 1335 mac_info_old.vpath_no = vpath_idx; 1336 status = vxge_del_mac_addr(vdev, &mac_info_old); 1337 } 1338 1339 if (unlikely(!is_vxge_card_up(vdev))) { 1340 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1341 return VXGE_HW_OK; 1342 } 1343 1344 /* Set this mac address to all the vpaths */ 1345 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { 1346 mac_info_new.vpath_no = vpath_idx; 1347 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; 1348 status = vxge_add_mac_addr(vdev, &mac_info_new); 1349 if (status != VXGE_HW_OK) 1350 return -EINVAL; 1351 } 1352 1353 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1354 1355 return status; 1356} 1357 1358/* 1359 * vxge_vpath_intr_enable 1360 * @vdev: pointer to vdev 1361 * @vp_id: vpath for which to enable the interrupts 1362 * 1363 * Enables the interrupts for the vpath 1364*/ 1365void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) 1366{ 1367 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1368 int msix_id, alarm_msix_id; 1369 int tim_msix_id[4] = {[0 ...3] = 0}; 1370 1371 vxge_hw_vpath_intr_enable(vpath->handle); 1372 1373 if (vdev->config.intr_type == INTA) 1374 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); 1375 else { 1376 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE; 1377 alarm_msix_id = 1378 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; 1379 1380 tim_msix_id[0] = msix_id; 1381 tim_msix_id[1] = msix_id + 1; 1382 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, 1383 alarm_msix_id); 1384 1385 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); 1386 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); 1387 1388 /* enable the alarm vector */ 1389 vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id); 1390 } 1391} 1392 1393/* 1394 * vxge_vpath_intr_disable 1395 * @vdev: pointer to vdev 1396 * @vp_id: vpath for which to disable the interrupts 1397 * 1398 * Disables the interrupts for the vpath 1399*/ 1400void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) 1401{ 1402 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1403 int msix_id; 1404 1405 vxge_hw_vpath_intr_disable(vpath->handle); 1406 1407 if (vdev->config.intr_type == INTA) 1408 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); 1409 else { 1410 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE; 1411 vxge_hw_vpath_msix_mask(vpath->handle, msix_id); 1412 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); 1413 1414 /* disable the alarm vector */ 1415 msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; 1416 vxge_hw_vpath_msix_mask(vpath->handle, msix_id); 1417 } 1418} 1419 1420/* 1421 * vxge_reset_vpath 1422 * @vdev: pointer to vdev 1423 * @vp_id: vpath to reset 1424 * 1425 * Resets the vpath 1426*/ 1427static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) 1428{ 1429 enum vxge_hw_status status = VXGE_HW_OK; 1430 int ret = 0; 1431 1432 /* check if device is down already */ 1433 if (unlikely(!is_vxge_card_up(vdev))) 1434 return 0; 1435 1436 /* is device reset already scheduled */ 1437 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) 1438 return 0; 1439 1440 if (vdev->vpaths[vp_id].handle) { 1441 if (vxge_hw_vpath_reset(vdev->vpaths[vp_id].handle) 1442 == VXGE_HW_OK) { 1443 if (is_vxge_card_up(vdev) && 1444 vxge_hw_vpath_recover_from_reset( 1445 vdev->vpaths[vp_id].handle) 1446 != VXGE_HW_OK) { 1447 vxge_debug_init(VXGE_ERR, 1448 "vxge_hw_vpath_recover_from_reset" 1449 "failed for vpath:%d", vp_id); 1450 return status; 1451 } 1452 } else { 1453 vxge_debug_init(VXGE_ERR, 1454 "vxge_hw_vpath_reset failed for" 1455 "vpath:%d", vp_id); 1456 return status; 1457 } 1458 } else 1459 return VXGE_HW_FAIL; 1460 1461 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]); 1462 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]); 1463 1464 /* Enable all broadcast */ 1465 vxge_hw_vpath_bcast_enable(vdev->vpaths[vp_id].handle); 1466 1467 /* Enable the interrupts */ 1468 vxge_vpath_intr_enable(vdev, vp_id); 1469 1470 smp_wmb(); 1471 1472 /* Enable the flow of traffic through the vpath */ 1473 vxge_hw_vpath_enable(vdev->vpaths[vp_id].handle); 1474 1475 smp_wmb(); 1476 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[vp_id].handle); 1477 vdev->vpaths[vp_id].ring.last_status = VXGE_HW_OK; 1478 1479 /* Vpath reset done */ 1480 clear_bit(vp_id, &vdev->vp_reset); 1481 1482 /* Start the vpath queue */ 1483 vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo, NULL); 1484 1485 return ret; 1486} 1487 1488static int do_vxge_reset(struct vxgedev *vdev, int event) 1489{ 1490 enum vxge_hw_status status; 1491 int ret = 0, vp_id, i; 1492 1493 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1494 1495 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) { 1496 /* check if device is down already */ 1497 if (unlikely(!is_vxge_card_up(vdev))) 1498 return 0; 1499 1500 /* is reset already scheduled */ 1501 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) 1502 return 0; 1503 } 1504 1505 if (event == VXGE_LL_FULL_RESET) { 1506 /* wait for all the vpath reset to complete */ 1507 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 1508 while (test_bit(vp_id, &vdev->vp_reset)) 1509 msleep(50); 1510 } 1511 1512 /* if execution mode is set to debug, don't reset the adapter */ 1513 if (unlikely(vdev->exec_mode)) { 1514 vxge_debug_init(VXGE_ERR, 1515 "%s: execution mode is debug, returning..", 1516 vdev->ndev->name); 1517 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); 1518 vxge_stop_all_tx_queue(vdev); 1519 return 0; 1520 } 1521 } 1522 1523 if (event == VXGE_LL_FULL_RESET) { 1524 vxge_hw_device_intr_disable(vdev->devh); 1525 1526 switch (vdev->cric_err_event) { 1527 case VXGE_HW_EVENT_UNKNOWN: 1528 vxge_stop_all_tx_queue(vdev); 1529 vxge_debug_init(VXGE_ERR, 1530 "fatal: %s: Disabling device due to" 1531 "unknown error", 1532 vdev->ndev->name); 1533 ret = -EPERM; 1534 goto out; 1535 case VXGE_HW_EVENT_RESET_START: 1536 break; 1537 case VXGE_HW_EVENT_RESET_COMPLETE: 1538 case VXGE_HW_EVENT_LINK_DOWN: 1539 case VXGE_HW_EVENT_LINK_UP: 1540 case VXGE_HW_EVENT_ALARM_CLEARED: 1541 case VXGE_HW_EVENT_ECCERR: 1542 case VXGE_HW_EVENT_MRPCIM_ECCERR: 1543 ret = -EPERM; 1544 goto out; 1545 case VXGE_HW_EVENT_FIFO_ERR: 1546 case VXGE_HW_EVENT_VPATH_ERR: 1547 break; 1548 case VXGE_HW_EVENT_CRITICAL_ERR: 1549 vxge_stop_all_tx_queue(vdev); 1550 vxge_debug_init(VXGE_ERR, 1551 "fatal: %s: Disabling device due to" 1552 "serious error", 1553 vdev->ndev->name); 1554 /* SOP or device reset required */ 1555 /* This event is not currently used */ 1556 ret = -EPERM; 1557 goto out; 1558 case VXGE_HW_EVENT_SERR: 1559 vxge_stop_all_tx_queue(vdev); 1560 vxge_debug_init(VXGE_ERR, 1561 "fatal: %s: Disabling device due to" 1562 "serious error", 1563 vdev->ndev->name); 1564 ret = -EPERM; 1565 goto out; 1566 case VXGE_HW_EVENT_SRPCIM_SERR: 1567 case VXGE_HW_EVENT_MRPCIM_SERR: 1568 ret = -EPERM; 1569 goto out; 1570 case VXGE_HW_EVENT_SLOT_FREEZE: 1571 vxge_stop_all_tx_queue(vdev); 1572 vxge_debug_init(VXGE_ERR, 1573 "fatal: %s: Disabling device due to" 1574 "slot freeze", 1575 vdev->ndev->name); 1576 ret = -EPERM; 1577 goto out; 1578 default: 1579 break; 1580 1581 } 1582 } 1583 1584 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) 1585 vxge_stop_all_tx_queue(vdev); 1586 1587 if (event == VXGE_LL_FULL_RESET) { 1588 status = vxge_reset_all_vpaths(vdev); 1589 if (status != VXGE_HW_OK) { 1590 vxge_debug_init(VXGE_ERR, 1591 "fatal: %s: can not reset vpaths", 1592 vdev->ndev->name); 1593 ret = -EPERM; 1594 goto out; 1595 } 1596 } 1597 1598 if (event == VXGE_LL_COMPL_RESET) { 1599 for (i = 0; i < vdev->no_of_vpath; i++) 1600 if (vdev->vpaths[i].handle) { 1601 if (vxge_hw_vpath_recover_from_reset( 1602 vdev->vpaths[i].handle) 1603 != VXGE_HW_OK) { 1604 vxge_debug_init(VXGE_ERR, 1605 "vxge_hw_vpath_recover_" 1606 "from_reset failed for vpath: " 1607 "%d", i); 1608 ret = -EPERM; 1609 goto out; 1610 } 1611 } else { 1612 vxge_debug_init(VXGE_ERR, 1613 "vxge_hw_vpath_reset failed for " 1614 "vpath:%d", i); 1615 ret = -EPERM; 1616 goto out; 1617 } 1618 } 1619 1620 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) { 1621 /* Reprogram the DA table with populated mac addresses */ 1622 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 1623 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]); 1624 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]); 1625 } 1626 1627 /* enable vpath interrupts */ 1628 for (i = 0; i < vdev->no_of_vpath; i++) 1629 vxge_vpath_intr_enable(vdev, i); 1630 1631 vxge_hw_device_intr_enable(vdev->devh); 1632 1633 smp_wmb(); 1634 1635 /* Indicate card up */ 1636 set_bit(__VXGE_STATE_CARD_UP, &vdev->state); 1637 1638 /* Get the traffic to flow through the vpaths */ 1639 for (i = 0; i < vdev->no_of_vpath; i++) { 1640 vxge_hw_vpath_enable(vdev->vpaths[i].handle); 1641 smp_wmb(); 1642 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); 1643 } 1644 1645 vxge_wake_all_tx_queue(vdev); 1646 } 1647 1648out: 1649 vxge_debug_entryexit(VXGE_TRACE, 1650 "%s:%d Exiting...", __func__, __LINE__); 1651 1652 /* Indicate reset done */ 1653 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) 1654 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); 1655 return ret; 1656} 1657 1658/* 1659 * vxge_reset 1660 * @vdev: pointer to ll device 1661 * 1662 * driver may reset the chip on events of serr, eccerr, etc 1663 */ 1664int vxge_reset(struct vxgedev *vdev) 1665{ 1666 do_vxge_reset(vdev, VXGE_LL_FULL_RESET); 1667 return 0; 1668} 1669 1670/** 1671 * vxge_poll - Receive handler when Receive Polling is used. 1672 * @dev: pointer to the device structure. 1673 * @budget: Number of packets budgeted to be processed in this iteration. 1674 * 1675 * This function comes into picture only if Receive side is being handled 1676 * through polling (called NAPI in linux). It mostly does what the normal 1677 * Rx interrupt handler does in terms of descriptor and packet processing 1678 * but not in an interrupt context. Also it will process a specified number 1679 * of packets at most in one iteration. This value is passed down by the 1680 * kernel as the function argument 'budget'. 1681 */ 1682static int vxge_poll_msix(struct napi_struct *napi, int budget) 1683{ 1684 struct vxge_ring *ring = 1685 container_of(napi, struct vxge_ring, napi); 1686 int budget_org = budget; 1687 ring->budget = budget; 1688 1689 vxge_hw_vpath_poll_rx(ring->handle); 1690 1691 if (ring->pkts_processed < budget_org) { 1692 napi_complete(napi); 1693 /* Re enable the Rx interrupts for the vpath */ 1694 vxge_hw_channel_msix_unmask( 1695 (struct __vxge_hw_channel *)ring->handle, 1696 ring->rx_vector_no); 1697 } 1698 1699 return ring->pkts_processed; 1700} 1701 1702static int vxge_poll_inta(struct napi_struct *napi, int budget) 1703{ 1704 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi); 1705 int pkts_processed = 0; 1706 int i; 1707 int budget_org = budget; 1708 struct vxge_ring *ring; 1709 1710 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 1711 pci_get_drvdata(vdev->pdev); 1712 1713 for (i = 0; i < vdev->no_of_vpath; i++) { 1714 ring = &vdev->vpaths[i].ring; 1715 ring->budget = budget; 1716 vxge_hw_vpath_poll_rx(ring->handle); 1717 pkts_processed += ring->pkts_processed; 1718 budget -= ring->pkts_processed; 1719 if (budget <= 0) 1720 break; 1721 } 1722 1723 VXGE_COMPLETE_ALL_TX(vdev); 1724 1725 if (pkts_processed < budget_org) { 1726 napi_complete(napi); 1727 /* Re enable the Rx interrupts for the ring */ 1728 vxge_hw_device_unmask_all(hldev); 1729 vxge_hw_device_flush_io(hldev); 1730 } 1731 1732 return pkts_processed; 1733} 1734 1735#ifdef CONFIG_NET_POLL_CONTROLLER 1736/** 1737 * vxge_netpoll - netpoll event handler entry point 1738 * @dev : pointer to the device structure. 1739 * Description: 1740 * This function will be called by upper layer to check for events on the 1741 * interface in situations where interrupts are disabled. It is used for 1742 * specific in-kernel networking tasks, such as remote consoles and kernel 1743 * debugging over the network (example netdump in RedHat). 1744 */ 1745static void vxge_netpoll(struct net_device *dev) 1746{ 1747 struct __vxge_hw_device *hldev; 1748 struct vxgedev *vdev; 1749 1750 vdev = (struct vxgedev *)netdev_priv(dev); 1751 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); 1752 1753 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1754 1755 if (pci_channel_offline(vdev->pdev)) 1756 return; 1757 1758 disable_irq(dev->irq); 1759 vxge_hw_device_clear_tx_rx(hldev); 1760 1761 vxge_hw_device_clear_tx_rx(hldev); 1762 VXGE_COMPLETE_ALL_RX(vdev); 1763 VXGE_COMPLETE_ALL_TX(vdev); 1764 1765 enable_irq(dev->irq); 1766 1767 vxge_debug_entryexit(VXGE_TRACE, 1768 "%s:%d Exiting...", __func__, __LINE__); 1769 return; 1770} 1771#endif 1772 1773/* RTH configuration */ 1774static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) 1775{ 1776 enum vxge_hw_status status = VXGE_HW_OK; 1777 struct vxge_hw_rth_hash_types hash_types; 1778 u8 itable[256] = {0}; /* indirection table */ 1779 u8 mtable[256] = {0}; /* CPU to vpath mapping */ 1780 int index; 1781 1782 /* 1783 * Filling 1784 * - itable with bucket numbers 1785 * - mtable with bucket-to-vpath mapping 1786 */ 1787 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) { 1788 itable[index] = index; 1789 mtable[index] = index % vdev->no_of_vpath; 1790 } 1791 1792 /* Fill RTH hash types */ 1793 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4; 1794 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4; 1795 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6; 1796 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6; 1797 hash_types.hash_type_tcpipv6ex_en = 1798 vdev->config.rth_hash_type_tcpipv6ex; 1799 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex; 1800 1801 /* set indirection table, bucket-to-vpath mapping */ 1802 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, 1803 vdev->no_of_vpath, 1804 mtable, itable, 1805 vdev->config.rth_bkt_sz); 1806 if (status != VXGE_HW_OK) { 1807 vxge_debug_init(VXGE_ERR, 1808 "RTH indirection table configuration failed " 1809 "for vpath:%d", vdev->vpaths[0].device_id); 1810 return status; 1811 } 1812 1813 /* 1814 * Because the itable_set() method uses the active_table field 1815 * for the target virtual path the RTH config should be updated 1816 * for all VPATHs. The h/w only uses the lowest numbered VPATH 1817 * when steering frames. 1818 */ 1819 for (index = 0; index < vdev->no_of_vpath; index++) { 1820 status = vxge_hw_vpath_rts_rth_set( 1821 vdev->vpaths[index].handle, 1822 vdev->config.rth_algorithm, 1823 &hash_types, 1824 vdev->config.rth_bkt_sz); 1825 1826 if (status != VXGE_HW_OK) { 1827 vxge_debug_init(VXGE_ERR, 1828 "RTH configuration failed for vpath:%d", 1829 vdev->vpaths[index].device_id); 1830 return status; 1831 } 1832 } 1833 1834 return status; 1835} 1836 1837int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) 1838{ 1839 struct vxge_mac_addrs *new_mac_entry; 1840 u8 *mac_address = NULL; 1841 1842 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT) 1843 return TRUE; 1844 1845 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC); 1846 if (!new_mac_entry) { 1847 vxge_debug_mem(VXGE_ERR, 1848 "%s: memory allocation failed", 1849 VXGE_DRIVER_NAME); 1850 return FALSE; 1851 } 1852 1853 list_add(&new_mac_entry->item, &vpath->mac_addr_list); 1854 1855 /* Copy the new mac address to the list */ 1856 mac_address = (u8 *)&new_mac_entry->macaddr; 1857 memcpy(mac_address, mac->macaddr, ETH_ALEN); 1858 1859 new_mac_entry->state = mac->state; 1860 vpath->mac_addr_cnt++; 1861 1862 /* Is this a multicast address */ 1863 if (0x01 & mac->macaddr[0]) 1864 vpath->mcast_addr_cnt++; 1865 1866 return TRUE; 1867} 1868 1869/* Add a mac address to DA table */ 1870enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac) 1871{ 1872 enum vxge_hw_status status = VXGE_HW_OK; 1873 struct vxge_vpath *vpath; 1874 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode; 1875 1876 if (0x01 & mac->macaddr[0]) /* multicast address */ 1877 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE; 1878 else 1879 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE; 1880 1881 vpath = &vdev->vpaths[mac->vpath_no]; 1882 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr, 1883 mac->macmask, duplicate_mode); 1884 if (status != VXGE_HW_OK) { 1885 vxge_debug_init(VXGE_ERR, 1886 "DA config add entry failed for vpath:%d", 1887 vpath->device_id); 1888 } else 1889 if (FALSE == vxge_mac_list_add(vpath, mac)) 1890 status = -EPERM; 1891 1892 return status; 1893} 1894 1895int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) 1896{ 1897 struct list_head *entry, *next; 1898 u64 del_mac = 0; 1899 u8 *mac_address = (u8 *) (&del_mac); 1900 1901 /* Copy the mac address to delete from the list */ 1902 memcpy(mac_address, mac->macaddr, ETH_ALEN); 1903 1904 list_for_each_safe(entry, next, &vpath->mac_addr_list) { 1905 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { 1906 list_del(entry); 1907 kfree((struct vxge_mac_addrs *)entry); 1908 vpath->mac_addr_cnt--; 1909 1910 /* Is this a multicast address */ 1911 if (0x01 & mac->macaddr[0]) 1912 vpath->mcast_addr_cnt--; 1913 return TRUE; 1914 } 1915 } 1916 1917 return FALSE; 1918} 1919/* delete a mac address from DA table */ 1920enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac) 1921{ 1922 enum vxge_hw_status status = VXGE_HW_OK; 1923 struct vxge_vpath *vpath; 1924 1925 vpath = &vdev->vpaths[mac->vpath_no]; 1926 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr, 1927 mac->macmask); 1928 if (status != VXGE_HW_OK) { 1929 vxge_debug_init(VXGE_ERR, 1930 "DA config delete entry failed for vpath:%d", 1931 vpath->device_id); 1932 } else 1933 vxge_mac_list_del(vpath, mac); 1934 return status; 1935} 1936 1937/* list all mac addresses from DA table */ 1938enum vxge_hw_status 1939static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, 1940 struct macInfo *mac) 1941{ 1942 enum vxge_hw_status status = VXGE_HW_OK; 1943 unsigned char macmask[ETH_ALEN]; 1944 unsigned char macaddr[ETH_ALEN]; 1945 1946 status = vxge_hw_vpath_mac_addr_get(vpath->handle, 1947 macaddr, macmask); 1948 if (status != VXGE_HW_OK) { 1949 vxge_debug_init(VXGE_ERR, 1950 "DA config list entry failed for vpath:%d", 1951 vpath->device_id); 1952 return status; 1953 } 1954 1955 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) { 1956 1957 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle, 1958 macaddr, macmask); 1959 if (status != VXGE_HW_OK) 1960 break; 1961 } 1962 1963 return status; 1964} 1965 1966/* Store all vlan ids from the list to the vid table */ 1967enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) 1968{ 1969 enum vxge_hw_status status = VXGE_HW_OK; 1970 struct vxgedev *vdev = vpath->vdev; 1971 u16 vid; 1972 1973 if (vdev->vlgrp && vpath->is_open) { 1974 1975 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1976 if (!vlan_group_get_device(vdev->vlgrp, vid)) 1977 continue; 1978 /* Add these vlan to the vid table */ 1979 status = vxge_hw_vpath_vid_add(vpath->handle, vid); 1980 } 1981 } 1982 1983 return status; 1984} 1985 1986/* Store all mac addresses from the list to the DA table */ 1987enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) 1988{ 1989 enum vxge_hw_status status = VXGE_HW_OK; 1990 struct macInfo mac_info; 1991 u8 *mac_address = NULL; 1992 struct list_head *entry, *next; 1993 1994 memset(&mac_info, 0, sizeof(struct macInfo)); 1995 1996 if (vpath->is_open) { 1997 1998 list_for_each_safe(entry, next, &vpath->mac_addr_list) { 1999 mac_address = 2000 (u8 *)& 2001 ((struct vxge_mac_addrs *)entry)->macaddr; 2002 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 2003 ((struct vxge_mac_addrs *)entry)->state = 2004 VXGE_LL_MAC_ADDR_IN_DA_TABLE; 2005 /* does this mac address already exist in da table? */ 2006 status = vxge_search_mac_addr_in_da_table(vpath, 2007 &mac_info); 2008 if (status != VXGE_HW_OK) { 2009 /* Add this mac address to the DA table */ 2010 status = vxge_hw_vpath_mac_addr_add( 2011 vpath->handle, mac_info.macaddr, 2012 mac_info.macmask, 2013 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE); 2014 if (status != VXGE_HW_OK) { 2015 vxge_debug_init(VXGE_ERR, 2016 "DA add entry failed for vpath:%d", 2017 vpath->device_id); 2018 ((struct vxge_mac_addrs *)entry)->state 2019 = VXGE_LL_MAC_ADDR_IN_LIST; 2020 } 2021 } 2022 } 2023 } 2024 2025 return status; 2026} 2027 2028/* reset vpaths */ 2029enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) 2030{ 2031 int i; 2032 enum vxge_hw_status status = VXGE_HW_OK; 2033 2034 for (i = 0; i < vdev->no_of_vpath; i++) 2035 if (vdev->vpaths[i].handle) { 2036 if (vxge_hw_vpath_reset(vdev->vpaths[i].handle) 2037 == VXGE_HW_OK) { 2038 if (is_vxge_card_up(vdev) && 2039 vxge_hw_vpath_recover_from_reset( 2040 vdev->vpaths[i].handle) 2041 != VXGE_HW_OK) { 2042 vxge_debug_init(VXGE_ERR, 2043 "vxge_hw_vpath_recover_" 2044 "from_reset failed for vpath: " 2045 "%d", i); 2046 return status; 2047 } 2048 } else { 2049 vxge_debug_init(VXGE_ERR, 2050 "vxge_hw_vpath_reset failed for " 2051 "vpath:%d", i); 2052 return status; 2053 } 2054 } 2055 return status; 2056} 2057 2058/* close vpaths */ 2059void vxge_close_vpaths(struct vxgedev *vdev, int index) 2060{ 2061 int i; 2062 for (i = index; i < vdev->no_of_vpath; i++) { 2063 if (vdev->vpaths[i].handle && vdev->vpaths[i].is_open) { 2064 vxge_hw_vpath_close(vdev->vpaths[i].handle); 2065 vdev->stats.vpaths_open--; 2066 } 2067 vdev->vpaths[i].is_open = 0; 2068 vdev->vpaths[i].handle = NULL; 2069 } 2070} 2071 2072/* open vpaths */ 2073int vxge_open_vpaths(struct vxgedev *vdev) 2074{ 2075 enum vxge_hw_status status; 2076 int i; 2077 u32 vp_id = 0; 2078 struct vxge_hw_vpath_attr attr; 2079 2080 for (i = 0; i < vdev->no_of_vpath; i++) { 2081 vxge_assert(vdev->vpaths[i].is_configured); 2082 attr.vp_id = vdev->vpaths[i].device_id; 2083 attr.fifo_attr.callback = vxge_xmit_compl; 2084 attr.fifo_attr.txdl_term = vxge_tx_term; 2085 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv); 2086 attr.fifo_attr.userdata = (void *)&vdev->vpaths[i].fifo; 2087 2088 attr.ring_attr.callback = vxge_rx_1b_compl; 2089 attr.ring_attr.rxd_init = vxge_rx_initial_replenish; 2090 attr.ring_attr.rxd_term = vxge_rx_term; 2091 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv); 2092 attr.ring_attr.userdata = (void *)&vdev->vpaths[i].ring; 2093 2094 vdev->vpaths[i].ring.ndev = vdev->ndev; 2095 vdev->vpaths[i].ring.pdev = vdev->pdev; 2096 status = vxge_hw_vpath_open(vdev->devh, &attr, 2097 &(vdev->vpaths[i].handle)); 2098 if (status == VXGE_HW_OK) { 2099 vdev->vpaths[i].fifo.handle = 2100 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata; 2101 vdev->vpaths[i].ring.handle = 2102 (struct __vxge_hw_ring *)attr.ring_attr.userdata; 2103 vdev->vpaths[i].fifo.tx_steering_type = 2104 vdev->config.tx_steering_type; 2105 vdev->vpaths[i].fifo.ndev = vdev->ndev; 2106 vdev->vpaths[i].fifo.pdev = vdev->pdev; 2107 vdev->vpaths[i].fifo.indicate_max_pkts = 2108 vdev->config.fifo_indicate_max_pkts; 2109 vdev->vpaths[i].ring.rx_vector_no = 0; 2110 vdev->vpaths[i].ring.rx_csum = vdev->rx_csum; 2111 vdev->vpaths[i].is_open = 1; 2112 vdev->vp_handles[i] = vdev->vpaths[i].handle; 2113 vdev->vpaths[i].ring.gro_enable = 2114 vdev->config.gro_enable; 2115 vdev->vpaths[i].ring.vlan_tag_strip = 2116 vdev->vlan_tag_strip; 2117 vdev->stats.vpaths_open++; 2118 } else { 2119 vdev->stats.vpath_open_fail++; 2120 vxge_debug_init(VXGE_ERR, 2121 "%s: vpath: %d failed to open " 2122 "with status: %d", 2123 vdev->ndev->name, vdev->vpaths[i].device_id, 2124 status); 2125 vxge_close_vpaths(vdev, 0); 2126 return -EPERM; 2127 } 2128 2129 vp_id = 2130 ((struct __vxge_hw_vpath_handle *)vdev->vpaths[i].handle)-> 2131 vpath->vp_id; 2132 vdev->vpaths_deployed |= vxge_mBIT(vp_id); 2133 } 2134 return VXGE_HW_OK; 2135} 2136 2137/* 2138 * vxge_isr_napi 2139 * @irq: the irq of the device. 2140 * @dev_id: a void pointer to the hldev structure of the Titan device 2141 * @ptregs: pointer to the registers pushed on the stack. 2142 * 2143 * This function is the ISR handler of the device when napi is enabled. It 2144 * identifies the reason for the interrupt and calls the relevant service 2145 * routines. 2146 */ 2147static irqreturn_t vxge_isr_napi(int irq, void *dev_id) 2148{ 2149 struct net_device *dev; 2150 struct __vxge_hw_device *hldev; 2151 u64 reason; 2152 enum vxge_hw_status status; 2153 struct vxgedev *vdev = (struct vxgedev *) dev_id;; 2154 2155 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); 2156 2157 dev = vdev->ndev; 2158 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); 2159 2160 if (pci_channel_offline(vdev->pdev)) 2161 return IRQ_NONE; 2162 2163 if (unlikely(!is_vxge_card_up(vdev))) 2164 return IRQ_NONE; 2165 2166 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, 2167 &reason); 2168 if (status == VXGE_HW_OK) { 2169 vxge_hw_device_mask_all(hldev); 2170 2171 if (reason & 2172 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT( 2173 vdev->vpaths_deployed >> 2174 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) { 2175 2176 vxge_hw_device_clear_tx_rx(hldev); 2177 napi_schedule(&vdev->napi); 2178 vxge_debug_intr(VXGE_TRACE, 2179 "%s:%d Exiting...", __func__, __LINE__); 2180 return IRQ_HANDLED; 2181 } else 2182 vxge_hw_device_unmask_all(hldev); 2183 } else if (unlikely((status == VXGE_HW_ERR_VPATH) || 2184 (status == VXGE_HW_ERR_CRITICAL) || 2185 (status == VXGE_HW_ERR_FIFO))) { 2186 vxge_hw_device_mask_all(hldev); 2187 vxge_hw_device_flush_io(hldev); 2188 return IRQ_HANDLED; 2189 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE)) 2190 return IRQ_HANDLED; 2191 2192 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); 2193 return IRQ_NONE; 2194} 2195 2196#ifdef CONFIG_PCI_MSI 2197 2198static irqreturn_t 2199vxge_tx_msix_handle(int irq, void *dev_id) 2200{ 2201 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; 2202 2203 VXGE_COMPLETE_VPATH_TX(fifo); 2204 2205 return IRQ_HANDLED; 2206} 2207 2208static irqreturn_t 2209vxge_rx_msix_napi_handle(int irq, void *dev_id) 2210{ 2211 struct vxge_ring *ring = (struct vxge_ring *)dev_id; 2212 2213 /* MSIX_IDX for Rx is 1 */ 2214 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, 2215 ring->rx_vector_no); 2216 2217 napi_schedule(&ring->napi); 2218 return IRQ_HANDLED; 2219} 2220 2221static irqreturn_t 2222vxge_alarm_msix_handle(int irq, void *dev_id) 2223{ 2224 int i; 2225 enum vxge_hw_status status; 2226 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; 2227 struct vxgedev *vdev = vpath->vdev; 2228 int alarm_msix_id = 2229 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; 2230 2231 for (i = 0; i < vdev->no_of_vpath; i++) { 2232 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, 2233 alarm_msix_id); 2234 2235 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, 2236 vdev->exec_mode); 2237 if (status == VXGE_HW_OK) { 2238 2239 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, 2240 alarm_msix_id); 2241 continue; 2242 } 2243 vxge_debug_intr(VXGE_ERR, 2244 "%s: vxge_hw_vpath_alarm_process failed %x ", 2245 VXGE_DRIVER_NAME, status); 2246 } 2247 return IRQ_HANDLED; 2248} 2249 2250static int vxge_alloc_msix(struct vxgedev *vdev) 2251{ 2252 int j, i, ret = 0; 2253 int intr_cnt = 0; 2254 int alarm_msix_id = 0, msix_intr_vect = 0; 2255 vdev->intr_cnt = 0; 2256 2257 /* Tx/Rx MSIX Vectors count */ 2258 vdev->intr_cnt = vdev->no_of_vpath * 2; 2259 2260 /* Alarm MSIX Vectors count */ 2261 vdev->intr_cnt++; 2262 2263 intr_cnt = (vdev->max_vpath_supported * 2) + 1; 2264 vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry), 2265 GFP_KERNEL); 2266 if (!vdev->entries) { 2267 vxge_debug_init(VXGE_ERR, 2268 "%s: memory allocation failed", 2269 VXGE_DRIVER_NAME); 2270 return -ENOMEM; 2271 } 2272 2273 vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry), 2274 GFP_KERNEL); 2275 if (!vdev->vxge_entries) { 2276 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", 2277 VXGE_DRIVER_NAME); 2278 kfree(vdev->entries); 2279 return -ENOMEM; 2280 } 2281 2282 /* Last vector in the list is used for alarm */ 2283 alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; 2284 for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) { 2285 2286 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; 2287 2288 /* Initialize the fifo vector */ 2289 vdev->entries[j].entry = msix_intr_vect; 2290 vdev->vxge_entries[j].entry = msix_intr_vect; 2291 vdev->vxge_entries[j].in_use = 0; 2292 j++; 2293 2294 /* Initialize the ring vector */ 2295 vdev->entries[j].entry = msix_intr_vect + 1; 2296 vdev->vxge_entries[j].entry = msix_intr_vect + 1; 2297 vdev->vxge_entries[j].in_use = 0; 2298 j++; 2299 } 2300 2301 /* Initialize the alarm vector */ 2302 vdev->entries[j].entry = alarm_msix_id; 2303 vdev->vxge_entries[j].entry = alarm_msix_id; 2304 vdev->vxge_entries[j].in_use = 0; 2305 2306 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt); 2307 /* if driver request exceeeds available irq's, request with a small 2308 * number. 2309 */ 2310 if (ret > 0) { 2311 vxge_debug_init(VXGE_ERR, 2312 "%s: MSI-X enable failed for %d vectors, available: %d", 2313 VXGE_DRIVER_NAME, intr_cnt, ret); 2314 vdev->max_vpath_supported = vdev->no_of_vpath; 2315 intr_cnt = (vdev->max_vpath_supported * 2) + 1; 2316 2317 /* Reset the alarm vector setting */ 2318 vdev->entries[j].entry = 0; 2319 vdev->vxge_entries[j].entry = 0; 2320 2321 /* Initialize the alarm vector with new setting */ 2322 vdev->entries[intr_cnt - 1].entry = alarm_msix_id; 2323 vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id; 2324 vdev->vxge_entries[intr_cnt - 1].in_use = 0; 2325 2326 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt); 2327 if (!ret) 2328 vxge_debug_init(VXGE_ERR, 2329 "%s: MSI-X enabled for %d vectors", 2330 VXGE_DRIVER_NAME, intr_cnt); 2331 } 2332 2333 if (ret) { 2334 vxge_debug_init(VXGE_ERR, 2335 "%s: MSI-X enable failed for %d vectors, ret: %d", 2336 VXGE_DRIVER_NAME, intr_cnt, ret); 2337 kfree(vdev->entries); 2338 kfree(vdev->vxge_entries); 2339 vdev->entries = NULL; 2340 vdev->vxge_entries = NULL; 2341 return -ENODEV; 2342 } 2343 return 0; 2344} 2345 2346static int vxge_enable_msix(struct vxgedev *vdev) 2347{ 2348 2349 int i, ret = 0; 2350 enum vxge_hw_status status; 2351 /* 0 - Tx, 1 - Rx */ 2352 int tim_msix_id[4]; 2353 int alarm_msix_id = 0, msix_intr_vect = 0; 2354 vdev->intr_cnt = 0; 2355 2356 /* allocate msix vectors */ 2357 ret = vxge_alloc_msix(vdev); 2358 if (!ret) { 2359 /* Last vector in the list is used for alarm */ 2360 alarm_msix_id = 2361 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; 2362 for (i = 0; i < vdev->no_of_vpath; i++) { 2363 2364 /* If fifo or ring are not enabled 2365 the MSIX vector for that should be set to 0 2366 Hence initializeing this array to all 0s. 2367 */ 2368 memset(tim_msix_id, 0, sizeof(tim_msix_id)); 2369 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; 2370 tim_msix_id[0] = msix_intr_vect; 2371 2372 tim_msix_id[1] = msix_intr_vect + 1; 2373 vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1]; 2374 2375 status = vxge_hw_vpath_msix_set( 2376 vdev->vpaths[i].handle, 2377 tim_msix_id, alarm_msix_id); 2378 if (status != VXGE_HW_OK) { 2379 vxge_debug_init(VXGE_ERR, 2380 "vxge_hw_vpath_msix_set " 2381 "failed with status : %x", status); 2382 kfree(vdev->entries); 2383 kfree(vdev->vxge_entries); 2384 pci_disable_msix(vdev->pdev); 2385 return -ENODEV; 2386 } 2387 } 2388 } 2389 2390 return ret; 2391} 2392 2393static void vxge_rem_msix_isr(struct vxgedev *vdev) 2394{ 2395 int intr_cnt; 2396 2397 for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1); 2398 intr_cnt++) { 2399 if (vdev->vxge_entries[intr_cnt].in_use) { 2400 synchronize_irq(vdev->entries[intr_cnt].vector); 2401 free_irq(vdev->entries[intr_cnt].vector, 2402 vdev->vxge_entries[intr_cnt].arg); 2403 vdev->vxge_entries[intr_cnt].in_use = 0; 2404 } 2405 } 2406 2407 kfree(vdev->entries); 2408 kfree(vdev->vxge_entries); 2409 vdev->entries = NULL; 2410 vdev->vxge_entries = NULL; 2411 2412 if (vdev->config.intr_type == MSI_X) 2413 pci_disable_msix(vdev->pdev); 2414} 2415#endif 2416 2417static void vxge_rem_isr(struct vxgedev *vdev) 2418{ 2419 struct __vxge_hw_device *hldev; 2420 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2421 2422#ifdef CONFIG_PCI_MSI 2423 if (vdev->config.intr_type == MSI_X) { 2424 vxge_rem_msix_isr(vdev); 2425 } else 2426#endif 2427 if (vdev->config.intr_type == INTA) { 2428 synchronize_irq(vdev->pdev->irq); 2429 free_irq(vdev->pdev->irq, vdev); 2430 } 2431} 2432 2433static int vxge_add_isr(struct vxgedev *vdev) 2434{ 2435 int ret = 0; 2436#ifdef CONFIG_PCI_MSI 2437 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; 2438 int pci_fun = PCI_FUNC(vdev->pdev->devfn); 2439 2440 if (vdev->config.intr_type == MSI_X) 2441 ret = vxge_enable_msix(vdev); 2442 2443 if (ret) { 2444 vxge_debug_init(VXGE_ERR, 2445 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME); 2446 vxge_debug_init(VXGE_ERR, 2447 "%s: Defaulting to INTA", VXGE_DRIVER_NAME); 2448 vdev->config.intr_type = INTA; 2449 } 2450 2451 if (vdev->config.intr_type == MSI_X) { 2452 for (intr_idx = 0; 2453 intr_idx < (vdev->no_of_vpath * 2454 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) { 2455 2456 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE; 2457 irq_req = 0; 2458 2459 switch (msix_idx) { 2460 case 0: 2461 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2462 "%s:vxge fn: %d vpath: %d Tx MSI-X: %d", 2463 vdev->ndev->name, pci_fun, vp_idx, 2464 vdev->entries[intr_cnt].entry); 2465 ret = request_irq( 2466 vdev->entries[intr_cnt].vector, 2467 vxge_tx_msix_handle, 0, 2468 vdev->desc[intr_cnt], 2469 &vdev->vpaths[vp_idx].fifo); 2470 vdev->vxge_entries[intr_cnt].arg = 2471 &vdev->vpaths[vp_idx].fifo; 2472 irq_req = 1; 2473 break; 2474 case 1: 2475 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2476 "%s:vxge fn: %d vpath: %d Rx MSI-X: %d", 2477 vdev->ndev->name, pci_fun, vp_idx, 2478 vdev->entries[intr_cnt].entry); 2479 ret = request_irq( 2480 vdev->entries[intr_cnt].vector, 2481 vxge_rx_msix_napi_handle, 2482 0, 2483 vdev->desc[intr_cnt], 2484 &vdev->vpaths[vp_idx].ring); 2485 vdev->vxge_entries[intr_cnt].arg = 2486 &vdev->vpaths[vp_idx].ring; 2487 irq_req = 1; 2488 break; 2489 } 2490 2491 if (ret) { 2492 vxge_debug_init(VXGE_ERR, 2493 "%s: MSIX - %d Registration failed", 2494 vdev->ndev->name, intr_cnt); 2495 vxge_rem_msix_isr(vdev); 2496 vdev->config.intr_type = INTA; 2497 vxge_debug_init(VXGE_ERR, 2498 "%s: Defaulting to INTA" 2499 , vdev->ndev->name); 2500 goto INTA_MODE; 2501 } 2502 2503 if (irq_req) { 2504 /* We requested for this msix interrupt */ 2505 vdev->vxge_entries[intr_cnt].in_use = 1; 2506 vxge_hw_vpath_msix_unmask( 2507 vdev->vpaths[vp_idx].handle, 2508 intr_idx); 2509 intr_cnt++; 2510 } 2511 2512 /* Point to next vpath handler */ 2513 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) && 2514 (vp_idx < (vdev->no_of_vpath - 1))) 2515 vp_idx++; 2516 } 2517 2518 intr_cnt = vdev->max_vpath_supported * 2; 2519 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2520 "%s:vxge Alarm fn: %d MSI-X: %d", 2521 vdev->ndev->name, pci_fun, 2522 vdev->entries[intr_cnt].entry); 2523 /* For Alarm interrupts */ 2524 ret = request_irq(vdev->entries[intr_cnt].vector, 2525 vxge_alarm_msix_handle, 0, 2526 vdev->desc[intr_cnt], 2527 &vdev->vpaths[vp_idx]); 2528 if (ret) { 2529 vxge_debug_init(VXGE_ERR, 2530 "%s: MSIX - %d Registration failed", 2531 vdev->ndev->name, intr_cnt); 2532 vxge_rem_msix_isr(vdev); 2533 vdev->config.intr_type = INTA; 2534 vxge_debug_init(VXGE_ERR, 2535 "%s: Defaulting to INTA", 2536 vdev->ndev->name); 2537 goto INTA_MODE; 2538 } 2539 2540 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, 2541 intr_idx - 2); 2542 vdev->vxge_entries[intr_cnt].in_use = 1; 2543 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx]; 2544 } 2545INTA_MODE: 2546#endif 2547 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name); 2548 2549 if (vdev->config.intr_type == INTA) { 2550 vxge_hw_device_set_intr_type(vdev->devh, 2551 VXGE_HW_INTR_MODE_IRQLINE); 2552 vxge_hw_vpath_tti_ci_set(vdev->devh, 2553 vdev->vpaths[0].device_id); 2554 ret = request_irq((int) vdev->pdev->irq, 2555 vxge_isr_napi, 2556 IRQF_SHARED, vdev->desc[0], vdev); 2557 if (ret) { 2558 vxge_debug_init(VXGE_ERR, 2559 "%s %s-%d: ISR registration failed", 2560 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq); 2561 return -ENODEV; 2562 } 2563 vxge_debug_init(VXGE_TRACE, 2564 "new %s-%d line allocated", 2565 "IRQ", vdev->pdev->irq); 2566 } 2567 2568 return VXGE_HW_OK; 2569} 2570 2571static void vxge_poll_vp_reset(unsigned long data) 2572{ 2573 struct vxgedev *vdev = (struct vxgedev *)data; 2574 int i, j = 0; 2575 2576 for (i = 0; i < vdev->no_of_vpath; i++) { 2577 if (test_bit(i, &vdev->vp_reset)) { 2578 vxge_reset_vpath(vdev, i); 2579 j++; 2580 } 2581 } 2582 if (j && (vdev->config.intr_type != MSI_X)) { 2583 vxge_hw_device_unmask_all(vdev->devh); 2584 vxge_hw_device_flush_io(vdev->devh); 2585 } 2586 2587 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2); 2588} 2589 2590static void vxge_poll_vp_lockup(unsigned long data) 2591{ 2592 struct vxgedev *vdev = (struct vxgedev *)data; 2593 int i; 2594 struct vxge_ring *ring; 2595 enum vxge_hw_status status = VXGE_HW_OK; 2596 2597 for (i = 0; i < vdev->no_of_vpath; i++) { 2598 ring = &vdev->vpaths[i].ring; 2599 /* Did this vpath received any packets */ 2600 if (ring->stats.prev_rx_frms == ring->stats.rx_frms) { 2601 status = vxge_hw_vpath_check_leak(ring->handle); 2602 2603 /* Did it received any packets last time */ 2604 if ((VXGE_HW_FAIL == status) && 2605 (VXGE_HW_FAIL == ring->last_status)) { 2606 2607 /* schedule vpath reset */ 2608 if (!test_and_set_bit(i, &vdev->vp_reset)) { 2609 2610 /* disable interrupts for this vpath */ 2611 vxge_vpath_intr_disable(vdev, i); 2612 2613 /* stop the queue for this vpath */ 2614 vxge_stop_tx_queue(&vdev->vpaths[i]. 2615 fifo); 2616 continue; 2617 } 2618 } 2619 } 2620 ring->stats.prev_rx_frms = ring->stats.rx_frms; 2621 ring->last_status = status; 2622 } 2623 2624 /* Check every 1 milli second */ 2625 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); 2626} 2627 2628/** 2629 * vxge_open 2630 * @dev: pointer to the device structure. 2631 * 2632 * This function is the open entry point of the driver. It mainly calls a 2633 * function to allocate Rx buffers and inserts them into the buffer 2634 * descriptors and then enables the Rx part of the NIC. 2635 * Return value: '0' on success and an appropriate (-)ve integer as 2636 * defined in errno.h file on failure. 2637 */ 2638int 2639vxge_open(struct net_device *dev) 2640{ 2641 enum vxge_hw_status status; 2642 struct vxgedev *vdev; 2643 struct __vxge_hw_device *hldev; 2644 int ret = 0; 2645 int i; 2646 u64 val64, function_mode; 2647 vxge_debug_entryexit(VXGE_TRACE, 2648 "%s: %s:%d", dev->name, __func__, __LINE__); 2649 2650 vdev = (struct vxgedev *)netdev_priv(dev); 2651 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2652 function_mode = vdev->config.device_hw_info.function_mode; 2653 2654 /* make sure you have link off by default every time Nic is 2655 * initialized */ 2656 netif_carrier_off(dev); 2657 2658 /* Open VPATHs */ 2659 status = vxge_open_vpaths(vdev); 2660 if (status != VXGE_HW_OK) { 2661 vxge_debug_init(VXGE_ERR, 2662 "%s: fatal: Vpath open failed", vdev->ndev->name); 2663 ret = -EPERM; 2664 goto out0; 2665 } 2666 2667 vdev->mtu = dev->mtu; 2668 2669 status = vxge_add_isr(vdev); 2670 if (status != VXGE_HW_OK) { 2671 vxge_debug_init(VXGE_ERR, 2672 "%s: fatal: ISR add failed", dev->name); 2673 ret = -EPERM; 2674 goto out1; 2675 } 2676 2677 2678 if (vdev->config.intr_type != MSI_X) { 2679 netif_napi_add(dev, &vdev->napi, vxge_poll_inta, 2680 vdev->config.napi_weight); 2681 napi_enable(&vdev->napi); 2682 for (i = 0; i < vdev->no_of_vpath; i++) 2683 vdev->vpaths[i].ring.napi_p = &vdev->napi; 2684 } else { 2685 for (i = 0; i < vdev->no_of_vpath; i++) { 2686 netif_napi_add(dev, &vdev->vpaths[i].ring.napi, 2687 vxge_poll_msix, vdev->config.napi_weight); 2688 napi_enable(&vdev->vpaths[i].ring.napi); 2689 vdev->vpaths[i].ring.napi_p = 2690 &vdev->vpaths[i].ring.napi; 2691 } 2692 } 2693 2694 /* configure RTH */ 2695 if (vdev->config.rth_steering) { 2696 status = vxge_rth_configure(vdev); 2697 if (status != VXGE_HW_OK) { 2698 vxge_debug_init(VXGE_ERR, 2699 "%s: fatal: RTH configuration failed", 2700 dev->name); 2701 ret = -EPERM; 2702 goto out2; 2703 } 2704 } 2705 2706 for (i = 0; i < vdev->no_of_vpath; i++) { 2707 /* set initial mtu before enabling the device */ 2708 status = vxge_hw_vpath_mtu_set(vdev->vpaths[i].handle, 2709 vdev->mtu); 2710 if (status != VXGE_HW_OK) { 2711 vxge_debug_init(VXGE_ERR, 2712 "%s: fatal: can not set new MTU", dev->name); 2713 ret = -EPERM; 2714 goto out2; 2715 } 2716 } 2717 2718 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev); 2719 vxge_debug_init(vdev->level_trace, 2720 "%s: MTU is %d", vdev->ndev->name, vdev->mtu); 2721 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev); 2722 2723 /* Reprogram the DA table with populated mac addresses */ 2724 for (i = 0; i < vdev->no_of_vpath; i++) { 2725 vxge_restore_vpath_mac_addr(&vdev->vpaths[i]); 2726 vxge_restore_vpath_vid_table(&vdev->vpaths[i]); 2727 } 2728 2729 /* Enable vpath to sniff all unicast/multicast traffic that not 2730 * addressed to them. We allow promiscous mode for PF only 2731 */ 2732 2733 val64 = 0; 2734 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) 2735 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i); 2736 2737 vxge_hw_mgmt_reg_write(vdev->devh, 2738 vxge_hw_mgmt_reg_type_mrpcim, 2739 0, 2740 (ulong)offsetof(struct vxge_hw_mrpcim_reg, 2741 rxmac_authorize_all_addr), 2742 val64); 2743 2744 vxge_hw_mgmt_reg_write(vdev->devh, 2745 vxge_hw_mgmt_reg_type_mrpcim, 2746 0, 2747 (ulong)offsetof(struct vxge_hw_mrpcim_reg, 2748 rxmac_authorize_all_vid), 2749 val64); 2750 2751 vxge_set_multicast(dev); 2752 2753 /* Enabling Bcast and mcast for all vpath */ 2754 for (i = 0; i < vdev->no_of_vpath; i++) { 2755 status = vxge_hw_vpath_bcast_enable(vdev->vpaths[i].handle); 2756 if (status != VXGE_HW_OK) 2757 vxge_debug_init(VXGE_ERR, 2758 "%s : Can not enable bcast for vpath " 2759 "id %d", dev->name, i); 2760 if (vdev->config.addr_learn_en) { 2761 status = 2762 vxge_hw_vpath_mcast_enable(vdev->vpaths[i].handle); 2763 if (status != VXGE_HW_OK) 2764 vxge_debug_init(VXGE_ERR, 2765 "%s : Can not enable mcast for vpath " 2766 "id %d", dev->name, i); 2767 } 2768 } 2769 2770 vxge_hw_device_setpause_data(vdev->devh, 0, 2771 vdev->config.tx_pause_enable, 2772 vdev->config.rx_pause_enable); 2773 2774 if (vdev->vp_reset_timer.function == NULL) 2775 vxge_os_timer(vdev->vp_reset_timer, 2776 vxge_poll_vp_reset, vdev, (HZ/2)); 2777 2778 if (vdev->vp_lockup_timer.function == NULL) 2779 vxge_os_timer(vdev->vp_lockup_timer, 2780 vxge_poll_vp_lockup, vdev, (HZ/2)); 2781 2782 set_bit(__VXGE_STATE_CARD_UP, &vdev->state); 2783 2784 smp_wmb(); 2785 2786 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) { 2787 netif_carrier_on(vdev->ndev); 2788 printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name); 2789 vdev->stats.link_up++; 2790 } 2791 2792 vxge_hw_device_intr_enable(vdev->devh); 2793 2794 smp_wmb(); 2795 2796 for (i = 0; i < vdev->no_of_vpath; i++) { 2797 vxge_hw_vpath_enable(vdev->vpaths[i].handle); 2798 smp_wmb(); 2799 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); 2800 } 2801 2802 vxge_start_all_tx_queue(vdev); 2803 goto out0; 2804 2805out2: 2806 vxge_rem_isr(vdev); 2807 2808 /* Disable napi */ 2809 if (vdev->config.intr_type != MSI_X) 2810 napi_disable(&vdev->napi); 2811 else { 2812 for (i = 0; i < vdev->no_of_vpath; i++) 2813 napi_disable(&vdev->vpaths[i].ring.napi); 2814 } 2815 2816out1: 2817 vxge_close_vpaths(vdev, 0); 2818out0: 2819 vxge_debug_entryexit(VXGE_TRACE, 2820 "%s: %s:%d Exiting...", 2821 dev->name, __func__, __LINE__); 2822 return ret; 2823} 2824 2825/* Loop throught the mac address list and delete all the entries */ 2826void vxge_free_mac_add_list(struct vxge_vpath *vpath) 2827{ 2828 2829 struct list_head *entry, *next; 2830 if (list_empty(&vpath->mac_addr_list)) 2831 return; 2832 2833 list_for_each_safe(entry, next, &vpath->mac_addr_list) { 2834 list_del(entry); 2835 kfree((struct vxge_mac_addrs *)entry); 2836 } 2837} 2838 2839static void vxge_napi_del_all(struct vxgedev *vdev) 2840{ 2841 int i; 2842 if (vdev->config.intr_type != MSI_X) 2843 netif_napi_del(&vdev->napi); 2844 else { 2845 for (i = 0; i < vdev->no_of_vpath; i++) 2846 netif_napi_del(&vdev->vpaths[i].ring.napi); 2847 } 2848 return; 2849} 2850 2851int do_vxge_close(struct net_device *dev, int do_io) 2852{ 2853 enum vxge_hw_status status; 2854 struct vxgedev *vdev; 2855 struct __vxge_hw_device *hldev; 2856 int i; 2857 u64 val64, vpath_vector; 2858 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 2859 dev->name, __func__, __LINE__); 2860 2861 vdev = (struct vxgedev *)netdev_priv(dev); 2862 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2863 2864 if (unlikely(!is_vxge_card_up(vdev))) 2865 return 0; 2866 2867 /* If vxge_handle_crit_err task is executing, 2868 * wait till it completes. */ 2869 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) 2870 msleep(50); 2871 2872 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); 2873 if (do_io) { 2874 /* Put the vpath back in normal mode */ 2875 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); 2876 status = vxge_hw_mgmt_reg_read(vdev->devh, 2877 vxge_hw_mgmt_reg_type_mrpcim, 2878 0, 2879 (ulong)offsetof( 2880 struct vxge_hw_mrpcim_reg, 2881 rts_mgr_cbasin_cfg), 2882 &val64); 2883 2884 if (status == VXGE_HW_OK) { 2885 val64 &= ~vpath_vector; 2886 status = vxge_hw_mgmt_reg_write(vdev->devh, 2887 vxge_hw_mgmt_reg_type_mrpcim, 2888 0, 2889 (ulong)offsetof( 2890 struct vxge_hw_mrpcim_reg, 2891 rts_mgr_cbasin_cfg), 2892 val64); 2893 } 2894 2895 /* Remove the function 0 from promiscous mode */ 2896 vxge_hw_mgmt_reg_write(vdev->devh, 2897 vxge_hw_mgmt_reg_type_mrpcim, 2898 0, 2899 (ulong)offsetof(struct vxge_hw_mrpcim_reg, 2900 rxmac_authorize_all_addr), 2901 0); 2902 2903 vxge_hw_mgmt_reg_write(vdev->devh, 2904 vxge_hw_mgmt_reg_type_mrpcim, 2905 0, 2906 (ulong)offsetof(struct vxge_hw_mrpcim_reg, 2907 rxmac_authorize_all_vid), 2908 0); 2909 2910 smp_wmb(); 2911 } 2912 del_timer_sync(&vdev->vp_lockup_timer); 2913 2914 del_timer_sync(&vdev->vp_reset_timer); 2915 2916 /* Disable napi */ 2917 if (vdev->config.intr_type != MSI_X) 2918 napi_disable(&vdev->napi); 2919 else { 2920 for (i = 0; i < vdev->no_of_vpath; i++) 2921 napi_disable(&vdev->vpaths[i].ring.napi); 2922 } 2923 2924 netif_carrier_off(vdev->ndev); 2925 printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name); 2926 vxge_stop_all_tx_queue(vdev); 2927 2928 /* Note that at this point xmit() is stopped by upper layer */ 2929 if (do_io) 2930 vxge_hw_device_intr_disable(vdev->devh); 2931 2932 mdelay(1000); 2933 2934 vxge_rem_isr(vdev); 2935 2936 vxge_napi_del_all(vdev); 2937 2938 if (do_io) 2939 vxge_reset_all_vpaths(vdev); 2940 2941 vxge_close_vpaths(vdev, 0); 2942 2943 vxge_debug_entryexit(VXGE_TRACE, 2944 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); 2945 2946 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); 2947 2948 return 0; 2949} 2950 2951/** 2952 * vxge_close 2953 * @dev: device pointer. 2954 * 2955 * This is the stop entry point of the driver. It needs to undo exactly 2956 * whatever was done by the open entry point, thus it's usually referred to 2957 * as the close function.Among other things this function mainly stops the 2958 * Rx side of the NIC and frees all the Rx buffers in the Rx rings. 2959 * Return value: '0' on success and an appropriate (-)ve integer as 2960 * defined in errno.h file on failure. 2961 */ 2962int 2963vxge_close(struct net_device *dev) 2964{ 2965 do_vxge_close(dev, 1); 2966 return 0; 2967} 2968 2969/** 2970 * vxge_change_mtu 2971 * @dev: net device pointer. 2972 * @new_mtu :the new MTU size for the device. 2973 * 2974 * A driver entry point to change MTU size for the device. Before changing 2975 * the MTU the device must be stopped. 2976 */ 2977static int vxge_change_mtu(struct net_device *dev, int new_mtu) 2978{ 2979 struct vxgedev *vdev = netdev_priv(dev); 2980 2981 vxge_debug_entryexit(vdev->level_trace, 2982 "%s:%d", __func__, __LINE__); 2983 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) { 2984 vxge_debug_init(vdev->level_err, 2985 "%s: mtu size is invalid", dev->name); 2986 return -EPERM; 2987 } 2988 2989 /* check if device is down already */ 2990 if (unlikely(!is_vxge_card_up(vdev))) { 2991 /* just store new value, will use later on open() */ 2992 dev->mtu = new_mtu; 2993 vxge_debug_init(vdev->level_err, 2994 "%s", "device is down on MTU change"); 2995 return 0; 2996 } 2997 2998 vxge_debug_init(vdev->level_trace, 2999 "trying to apply new MTU %d", new_mtu); 3000 3001 if (vxge_close(dev)) 3002 return -EIO; 3003 3004 dev->mtu = new_mtu; 3005 vdev->mtu = new_mtu; 3006 3007 if (vxge_open(dev)) 3008 return -EIO; 3009 3010 vxge_debug_init(vdev->level_trace, 3011 "%s: MTU changed to %d", vdev->ndev->name, new_mtu); 3012 3013 vxge_debug_entryexit(vdev->level_trace, 3014 "%s:%d Exiting...", __func__, __LINE__); 3015 3016 return 0; 3017} 3018 3019/** 3020 * vxge_get_stats 3021 * @dev: pointer to the device structure 3022 * 3023 * Updates the device statistics structure. This function updates the device 3024 * statistics structure in the net_device structure and returns a pointer 3025 * to the same. 3026 */ 3027static struct net_device_stats * 3028vxge_get_stats(struct net_device *dev) 3029{ 3030 struct vxgedev *vdev; 3031 struct net_device_stats *net_stats; 3032 int k; 3033 3034 vdev = netdev_priv(dev); 3035 3036 net_stats = &vdev->stats.net_stats; 3037 3038 memset(net_stats, 0, sizeof(struct net_device_stats)); 3039 3040 for (k = 0; k < vdev->no_of_vpath; k++) { 3041 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms; 3042 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; 3043 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; 3044 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; 3045 net_stats->rx_dropped += 3046 vdev->vpaths[k].ring.stats.rx_dropped; 3047 3048 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; 3049 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; 3050 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; 3051 } 3052 3053 return net_stats; 3054} 3055 3056/** 3057 * vxge_ioctl 3058 * @dev: Device pointer. 3059 * @ifr: An IOCTL specific structure, that can contain a pointer to 3060 * a proprietary structure used to pass information to the driver. 3061 * @cmd: This is used to distinguish between the different commands that 3062 * can be passed to the IOCTL functions. 3063 * 3064 * Entry point for the Ioctl. 3065 */ 3066static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3067{ 3068 return -EOPNOTSUPP; 3069} 3070 3071/** 3072 * vxge_tx_watchdog 3073 * @dev: pointer to net device structure 3074 * 3075 * Watchdog for transmit side. 3076 * This function is triggered if the Tx Queue is stopped 3077 * for a pre-defined amount of time when the Interface is still up. 3078 */ 3079static void 3080vxge_tx_watchdog(struct net_device *dev) 3081{ 3082 struct vxgedev *vdev; 3083 3084 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3085 3086 vdev = (struct vxgedev *)netdev_priv(dev); 3087 3088 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; 3089 3090 vxge_reset(vdev); 3091 vxge_debug_entryexit(VXGE_TRACE, 3092 "%s:%d Exiting...", __func__, __LINE__); 3093} 3094 3095/** 3096 * vxge_vlan_rx_register 3097 * @dev: net device pointer. 3098 * @grp: vlan group 3099 * 3100 * Vlan group registration 3101 */ 3102static void 3103vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 3104{ 3105 struct vxgedev *vdev; 3106 struct vxge_vpath *vpath; 3107 int vp; 3108 u64 vid; 3109 enum vxge_hw_status status; 3110 int i; 3111 3112 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3113 3114 vdev = (struct vxgedev *)netdev_priv(dev); 3115 3116 vpath = &vdev->vpaths[0]; 3117 if ((NULL == grp) && (vpath->is_open)) { 3118 /* Get the first vlan */ 3119 status = vxge_hw_vpath_vid_get(vpath->handle, &vid); 3120 3121 while (status == VXGE_HW_OK) { 3122 3123 /* Delete this vlan from the vid table */ 3124 for (vp = 0; vp < vdev->no_of_vpath; vp++) { 3125 vpath = &vdev->vpaths[vp]; 3126 if (!vpath->is_open) 3127 continue; 3128 3129 vxge_hw_vpath_vid_delete(vpath->handle, vid); 3130 } 3131 3132 /* Get the next vlan to be deleted */ 3133 vpath = &vdev->vpaths[0]; 3134 status = vxge_hw_vpath_vid_get(vpath->handle, &vid); 3135 } 3136 } 3137 3138 vdev->vlgrp = grp; 3139 3140 for (i = 0; i < vdev->no_of_vpath; i++) { 3141 if (vdev->vpaths[i].is_configured) 3142 vdev->vpaths[i].ring.vlgrp = grp; 3143 } 3144 3145 vxge_debug_entryexit(VXGE_TRACE, 3146 "%s:%d Exiting...", __func__, __LINE__); 3147} 3148 3149/** 3150 * vxge_vlan_rx_add_vid 3151 * @dev: net device pointer. 3152 * @vid: vid 3153 * 3154 * Add the vlan id to the devices vlan id table 3155 */ 3156static void 3157vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 3158{ 3159 struct vxgedev *vdev; 3160 struct vxge_vpath *vpath; 3161 int vp_id; 3162 3163 vdev = (struct vxgedev *)netdev_priv(dev); 3164 3165 /* Add these vlan to the vid table */ 3166 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 3167 vpath = &vdev->vpaths[vp_id]; 3168 if (!vpath->is_open) 3169 continue; 3170 vxge_hw_vpath_vid_add(vpath->handle, vid); 3171 } 3172} 3173 3174/** 3175 * vxge_vlan_rx_add_vid 3176 * @dev: net device pointer. 3177 * @vid: vid 3178 * 3179 * Remove the vlan id from the device's vlan id table 3180 */ 3181static void 3182vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 3183{ 3184 struct vxgedev *vdev; 3185 struct vxge_vpath *vpath; 3186 int vp_id; 3187 3188 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3189 3190 vdev = (struct vxgedev *)netdev_priv(dev); 3191 3192 vlan_group_set_device(vdev->vlgrp, vid, NULL); 3193 3194 /* Delete this vlan from the vid table */ 3195 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 3196 vpath = &vdev->vpaths[vp_id]; 3197 if (!vpath->is_open) 3198 continue; 3199 vxge_hw_vpath_vid_delete(vpath->handle, vid); 3200 } 3201 vxge_debug_entryexit(VXGE_TRACE, 3202 "%s:%d Exiting...", __func__, __LINE__); 3203} 3204 3205static const struct net_device_ops vxge_netdev_ops = { 3206 .ndo_open = vxge_open, 3207 .ndo_stop = vxge_close, 3208 .ndo_get_stats = vxge_get_stats, 3209 .ndo_start_xmit = vxge_xmit, 3210 .ndo_validate_addr = eth_validate_addr, 3211 .ndo_set_multicast_list = vxge_set_multicast, 3212 3213 .ndo_do_ioctl = vxge_ioctl, 3214 3215 .ndo_set_mac_address = vxge_set_mac_addr, 3216 .ndo_change_mtu = vxge_change_mtu, 3217 .ndo_vlan_rx_register = vxge_vlan_rx_register, 3218 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, 3219 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, 3220 3221 .ndo_tx_timeout = vxge_tx_watchdog, 3222#ifdef CONFIG_NET_POLL_CONTROLLER 3223 .ndo_poll_controller = vxge_netpoll, 3224#endif 3225}; 3226 3227int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3228 struct vxge_config *config, 3229 int high_dma, int no_of_vpath, 3230 struct vxgedev **vdev_out) 3231{ 3232 struct net_device *ndev; 3233 enum vxge_hw_status status = VXGE_HW_OK; 3234 struct vxgedev *vdev; 3235 int i, ret = 0, no_of_queue = 1; 3236 u64 stat; 3237 3238 *vdev_out = NULL; 3239 if (config->tx_steering_type == TX_MULTIQ_STEERING) 3240 no_of_queue = no_of_vpath; 3241 3242 ndev = alloc_etherdev_mq(sizeof(struct vxgedev), 3243 no_of_queue); 3244 if (ndev == NULL) { 3245 vxge_debug_init( 3246 vxge_hw_device_trace_level_get(hldev), 3247 "%s : device allocation failed", __func__); 3248 ret = -ENODEV; 3249 goto _out0; 3250 } 3251 3252 vxge_debug_entryexit( 3253 vxge_hw_device_trace_level_get(hldev), 3254 "%s: %s:%d Entering...", 3255 ndev->name, __func__, __LINE__); 3256 3257 vdev = netdev_priv(ndev); 3258 memset(vdev, 0, sizeof(struct vxgedev)); 3259 3260 vdev->ndev = ndev; 3261 vdev->devh = hldev; 3262 vdev->pdev = hldev->pdev; 3263 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3264 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ 3265 3266 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3267 3268 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 3269 NETIF_F_HW_VLAN_FILTER; 3270 /* Driver entry points */ 3271 ndev->irq = vdev->pdev->irq; 3272 ndev->base_addr = (unsigned long) hldev->bar0; 3273 3274 ndev->netdev_ops = &vxge_netdev_ops; 3275 3276 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; 3277 3278 initialize_ethtool_ops(ndev); 3279 3280 /* Allocate memory for vpath */ 3281 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * 3282 no_of_vpath, GFP_KERNEL); 3283 if (!vdev->vpaths) { 3284 vxge_debug_init(VXGE_ERR, 3285 "%s: vpath memory allocation failed", 3286 vdev->ndev->name); 3287 ret = -ENODEV; 3288 goto _out1; 3289 } 3290 3291 ndev->features |= NETIF_F_SG; 3292 3293 ndev->features |= NETIF_F_HW_CSUM; 3294 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3295 "%s : checksuming enabled", __func__); 3296 3297 if (high_dma) { 3298 ndev->features |= NETIF_F_HIGHDMA; 3299 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3300 "%s : using High DMA", __func__); 3301 } 3302 3303 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; 3304 3305 if (vdev->config.gro_enable) 3306 ndev->features |= NETIF_F_GRO; 3307 3308 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) 3309 ndev->real_num_tx_queues = no_of_vpath; 3310 3311#ifdef NETIF_F_LLTX 3312 ndev->features |= NETIF_F_LLTX; 3313#endif 3314 3315 for (i = 0; i < no_of_vpath; i++) 3316 spin_lock_init(&vdev->vpaths[i].fifo.tx_lock); 3317 3318 if (register_netdev(ndev)) { 3319 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3320 "%s: %s : device registration failed!", 3321 ndev->name, __func__); 3322 ret = -ENODEV; 3323 goto _out2; 3324 } 3325 3326 /* Set the factory defined MAC address initially */ 3327 ndev->addr_len = ETH_ALEN; 3328 3329 /* Make Link state as off at this point, when the Link change 3330 * interrupt comes the state will be automatically changed to 3331 * the right state. 3332 */ 3333 netif_carrier_off(ndev); 3334 3335 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3336 "%s: Ethernet device registered", 3337 ndev->name); 3338 3339 *vdev_out = vdev; 3340 3341 /* Resetting the Device stats */ 3342 status = vxge_hw_mrpcim_stats_access( 3343 hldev, 3344 VXGE_HW_STATS_OP_CLEAR_ALL_STATS, 3345 0, 3346 0, 3347 &stat); 3348 3349 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION) 3350 vxge_debug_init( 3351 vxge_hw_device_trace_level_get(hldev), 3352 "%s: device stats clear returns" 3353 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name); 3354 3355 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev), 3356 "%s: %s:%d Exiting...", 3357 ndev->name, __func__, __LINE__); 3358 3359 return ret; 3360_out2: 3361 kfree(vdev->vpaths); 3362_out1: 3363 free_netdev(ndev); 3364_out0: 3365 return ret; 3366} 3367 3368/* 3369 * vxge_device_unregister 3370 * 3371 * This function will unregister and free network device 3372 */ 3373void 3374vxge_device_unregister(struct __vxge_hw_device *hldev) 3375{ 3376 struct vxgedev *vdev; 3377 struct net_device *dev; 3378 char buf[IFNAMSIZ]; 3379#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ 3380 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) 3381 u32 level_trace; 3382#endif 3383 3384 dev = hldev->ndev; 3385 vdev = netdev_priv(dev); 3386#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ 3387 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) 3388 level_trace = vdev->level_trace; 3389#endif 3390 vxge_debug_entryexit(level_trace, 3391 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); 3392 3393 memcpy(buf, vdev->ndev->name, IFNAMSIZ); 3394 3395 /* in 2.6 will call stop() if device is up */ 3396 unregister_netdev(dev); 3397 3398 flush_scheduled_work(); 3399 3400 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf); 3401 vxge_debug_entryexit(level_trace, 3402 "%s: %s:%d Exiting...", buf, __func__, __LINE__); 3403} 3404 3405/* 3406 * vxge_callback_crit_err 3407 * 3408 * This function is called by the alarm handler in interrupt context. 3409 * Driver must analyze it based on the event type. 3410 */ 3411static void 3412vxge_callback_crit_err(struct __vxge_hw_device *hldev, 3413 enum vxge_hw_event type, u64 vp_id) 3414{ 3415 struct net_device *dev = hldev->ndev; 3416 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 3417 int vpath_idx; 3418 3419 vxge_debug_entryexit(vdev->level_trace, 3420 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); 3421 3422 /* Note: This event type should be used for device wide 3423 * indications only - Serious errors, Slot freeze and critical errors 3424 */ 3425 vdev->cric_err_event = type; 3426 3427 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) 3428 if (vdev->vpaths[vpath_idx].device_id == vp_id) 3429 break; 3430 3431 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) { 3432 if (type == VXGE_HW_EVENT_SLOT_FREEZE) { 3433 vxge_debug_init(VXGE_ERR, 3434 "%s: Slot is frozen", vdev->ndev->name); 3435 } else if (type == VXGE_HW_EVENT_SERR) { 3436 vxge_debug_init(VXGE_ERR, 3437 "%s: Encountered Serious Error", 3438 vdev->ndev->name); 3439 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) 3440 vxge_debug_init(VXGE_ERR, 3441 "%s: Encountered Critical Error", 3442 vdev->ndev->name); 3443 } 3444 3445 if ((type == VXGE_HW_EVENT_SERR) || 3446 (type == VXGE_HW_EVENT_SLOT_FREEZE)) { 3447 if (unlikely(vdev->exec_mode)) 3448 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); 3449 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) { 3450 vxge_hw_device_mask_all(hldev); 3451 if (unlikely(vdev->exec_mode)) 3452 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); 3453 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) || 3454 (type == VXGE_HW_EVENT_VPATH_ERR)) { 3455 3456 if (unlikely(vdev->exec_mode)) 3457 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); 3458 else { 3459 /* check if this vpath is already set for reset */ 3460 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) { 3461 3462 /* disable interrupts for this vpath */ 3463 vxge_vpath_intr_disable(vdev, vpath_idx); 3464 3465 /* stop the queue for this vpath */ 3466 vxge_stop_tx_queue(&vdev->vpaths[vpath_idx]. 3467 fifo); 3468 } 3469 } 3470 } 3471 3472 vxge_debug_entryexit(vdev->level_trace, 3473 "%s: %s:%d Exiting...", 3474 vdev->ndev->name, __func__, __LINE__); 3475} 3476 3477static void verify_bandwidth(void) 3478{ 3479 int i, band_width, total = 0, equal_priority = 0; 3480 3481 /* 1. If user enters 0 for some fifo, give equal priority to all */ 3482 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3483 if (bw_percentage[i] == 0) { 3484 equal_priority = 1; 3485 break; 3486 } 3487 } 3488 3489 if (!equal_priority) { 3490 /* 2. If sum exceeds 100, give equal priority to all */ 3491 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3492 if (bw_percentage[i] == 0xFF) 3493 break; 3494 3495 total += bw_percentage[i]; 3496 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) { 3497 equal_priority = 1; 3498 break; 3499 } 3500 } 3501 } 3502 3503 if (!equal_priority) { 3504 /* Is all the bandwidth consumed? */ 3505 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) { 3506 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) { 3507 /* Split rest of bw equally among next VPs*/ 3508 band_width = 3509 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) / 3510 (VXGE_HW_MAX_VIRTUAL_PATHS - i); 3511 if (band_width < 2) /* min of 2% */ 3512 equal_priority = 1; 3513 else { 3514 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS; 3515 i++) 3516 bw_percentage[i] = 3517 band_width; 3518 } 3519 } 3520 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS) 3521 equal_priority = 1; 3522 } 3523 3524 if (equal_priority) { 3525 vxge_debug_init(VXGE_ERR, 3526 "%s: Assigning equal bandwidth to all the vpaths", 3527 VXGE_DRIVER_NAME); 3528 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX / 3529 VXGE_HW_MAX_VIRTUAL_PATHS; 3530 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) 3531 bw_percentage[i] = bw_percentage[0]; 3532 } 3533 3534 return; 3535} 3536 3537/* 3538 * Vpath configuration 3539 */ 3540static int __devinit vxge_config_vpaths( 3541 struct vxge_hw_device_config *device_config, 3542 u64 vpath_mask, struct vxge_config *config_param) 3543{ 3544 int i, no_of_vpaths = 0, default_no_vpath = 0, temp; 3545 u32 txdl_size, txdl_per_memblock; 3546 3547 temp = driver_config->vpath_per_dev; 3548 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) && 3549 (max_config_dev == VXGE_MAX_CONFIG_DEV)) { 3550 /* No more CPU. Return vpath number as zero.*/ 3551 if (driver_config->g_no_cpus == -1) 3552 return 0; 3553 3554 if (!driver_config->g_no_cpus) 3555 driver_config->g_no_cpus = num_online_cpus(); 3556 3557 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1; 3558 if (!driver_config->vpath_per_dev) 3559 driver_config->vpath_per_dev = 1; 3560 3561 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) 3562 if (!vxge_bVALn(vpath_mask, i, 1)) 3563 continue; 3564 else 3565 default_no_vpath++; 3566 if (default_no_vpath < driver_config->vpath_per_dev) 3567 driver_config->vpath_per_dev = default_no_vpath; 3568 3569 driver_config->g_no_cpus = driver_config->g_no_cpus - 3570 (driver_config->vpath_per_dev * 2); 3571 if (driver_config->g_no_cpus <= 0) 3572 driver_config->g_no_cpus = -1; 3573 } 3574 3575 if (driver_config->vpath_per_dev == 1) { 3576 vxge_debug_ll_config(VXGE_TRACE, 3577 "%s: Disable tx and rx steering, " 3578 "as single vpath is configured", VXGE_DRIVER_NAME); 3579 config_param->rth_steering = NO_STEERING; 3580 config_param->tx_steering_type = NO_STEERING; 3581 device_config->rth_en = 0; 3582 } 3583 3584 /* configure bandwidth */ 3585 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) 3586 device_config->vp_config[i].min_bandwidth = bw_percentage[i]; 3587 3588 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3589 device_config->vp_config[i].vp_id = i; 3590 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU; 3591 if (no_of_vpaths < driver_config->vpath_per_dev) { 3592 if (!vxge_bVALn(vpath_mask, i, 1)) { 3593 vxge_debug_ll_config(VXGE_TRACE, 3594 "%s: vpath: %d is not available", 3595 VXGE_DRIVER_NAME, i); 3596 continue; 3597 } else { 3598 vxge_debug_ll_config(VXGE_TRACE, 3599 "%s: vpath: %d available", 3600 VXGE_DRIVER_NAME, i); 3601 no_of_vpaths++; 3602 } 3603 } else { 3604 vxge_debug_ll_config(VXGE_TRACE, 3605 "%s: vpath: %d is not configured, " 3606 "max_config_vpath exceeded", 3607 VXGE_DRIVER_NAME, i); 3608 break; 3609 } 3610 3611 /* Configure Tx fifo's */ 3612 device_config->vp_config[i].fifo.enable = 3613 VXGE_HW_FIFO_ENABLE; 3614 device_config->vp_config[i].fifo.max_frags = 3615 MAX_SKB_FRAGS + 1; 3616 device_config->vp_config[i].fifo.memblock_size = 3617 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE; 3618 3619 txdl_size = device_config->vp_config[i].fifo.max_frags * 3620 sizeof(struct vxge_hw_fifo_txd); 3621 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size; 3622 3623 device_config->vp_config[i].fifo.fifo_blocks = 3624 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1; 3625 3626 device_config->vp_config[i].fifo.intr = 3627 VXGE_HW_FIFO_QUEUE_INTR_DISABLE; 3628 3629 /* Configure tti properties */ 3630 device_config->vp_config[i].tti.intr_enable = 3631 VXGE_HW_TIM_INTR_ENABLE; 3632 3633 device_config->vp_config[i].tti.btimer_val = 3634 (VXGE_TTI_BTIMER_VAL * 1000) / 272; 3635 3636 device_config->vp_config[i].tti.timer_ac_en = 3637 VXGE_HW_TIM_TIMER_AC_ENABLE; 3638 3639 /* For msi-x with napi (each vector 3640 has a handler of its own) - 3641 Set CI to OFF for all vpaths */ 3642 device_config->vp_config[i].tti.timer_ci_en = 3643 VXGE_HW_TIM_TIMER_CI_DISABLE; 3644 3645 device_config->vp_config[i].tti.timer_ri_en = 3646 VXGE_HW_TIM_TIMER_RI_DISABLE; 3647 3648 device_config->vp_config[i].tti.util_sel = 3649 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL; 3650 3651 device_config->vp_config[i].tti.ltimer_val = 3652 (VXGE_TTI_LTIMER_VAL * 1000) / 272; 3653 3654 device_config->vp_config[i].tti.rtimer_val = 3655 (VXGE_TTI_RTIMER_VAL * 1000) / 272; 3656 3657 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A; 3658 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B; 3659 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C; 3660 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A; 3661 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B; 3662 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C; 3663 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D; 3664 3665 /* Configure Rx rings */ 3666 device_config->vp_config[i].ring.enable = 3667 VXGE_HW_RING_ENABLE; 3668 3669 device_config->vp_config[i].ring.ring_blocks = 3670 VXGE_HW_DEF_RING_BLOCKS; 3671 device_config->vp_config[i].ring.buffer_mode = 3672 VXGE_HW_RING_RXD_BUFFER_MODE_1; 3673 device_config->vp_config[i].ring.rxds_limit = 3674 VXGE_HW_DEF_RING_RXDS_LIMIT; 3675 device_config->vp_config[i].ring.scatter_mode = 3676 VXGE_HW_RING_SCATTER_MODE_A; 3677 3678 /* Configure rti properties */ 3679 device_config->vp_config[i].rti.intr_enable = 3680 VXGE_HW_TIM_INTR_ENABLE; 3681 3682 device_config->vp_config[i].rti.btimer_val = 3683 (VXGE_RTI_BTIMER_VAL * 1000)/272; 3684 3685 device_config->vp_config[i].rti.timer_ac_en = 3686 VXGE_HW_TIM_TIMER_AC_ENABLE; 3687 3688 device_config->vp_config[i].rti.timer_ci_en = 3689 VXGE_HW_TIM_TIMER_CI_DISABLE; 3690 3691 device_config->vp_config[i].rti.timer_ri_en = 3692 VXGE_HW_TIM_TIMER_RI_DISABLE; 3693 3694 device_config->vp_config[i].rti.util_sel = 3695 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL; 3696 3697 device_config->vp_config[i].rti.urange_a = 3698 RTI_RX_URANGE_A; 3699 device_config->vp_config[i].rti.urange_b = 3700 RTI_RX_URANGE_B; 3701 device_config->vp_config[i].rti.urange_c = 3702 RTI_RX_URANGE_C; 3703 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A; 3704 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B; 3705 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C; 3706 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D; 3707 3708 device_config->vp_config[i].rti.rtimer_val = 3709 (VXGE_RTI_RTIMER_VAL * 1000) / 272; 3710 3711 device_config->vp_config[i].rti.ltimer_val = 3712 (VXGE_RTI_LTIMER_VAL * 1000) / 272; 3713 3714 device_config->vp_config[i].rpa_strip_vlan_tag = 3715 vlan_tag_strip; 3716 } 3717 3718 driver_config->vpath_per_dev = temp; 3719 return no_of_vpaths; 3720} 3721 3722/* initialize device configuratrions */ 3723static void __devinit vxge_device_config_init( 3724 struct vxge_hw_device_config *device_config, 3725 int *intr_type) 3726{ 3727 /* Used for CQRQ/SRQ. */ 3728 device_config->dma_blockpool_initial = 3729 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE; 3730 3731 device_config->dma_blockpool_max = 3732 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE; 3733 3734 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT) 3735 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT; 3736 3737#ifndef CONFIG_PCI_MSI 3738 vxge_debug_init(VXGE_ERR, 3739 "%s: This Kernel does not support " 3740 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); 3741 *intr_type = INTA; 3742#endif 3743 3744 /* Configure whether MSI-X or IRQL. */ 3745 switch (*intr_type) { 3746 case INTA: 3747 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE; 3748 break; 3749 3750 case MSI_X: 3751 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; 3752 break; 3753 } 3754 /* Timer period between device poll */ 3755 device_config->device_poll_millis = VXGE_TIMER_DELAY; 3756 3757 /* Configure mac based steering. */ 3758 device_config->rts_mac_en = addr_learn_en; 3759 3760 /* Configure Vpaths */ 3761 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT; 3762 3763 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", 3764 __func__); 3765 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d", 3766 device_config->dma_blockpool_initial); 3767 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d", 3768 device_config->dma_blockpool_max); 3769 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", 3770 device_config->intr_mode); 3771 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", 3772 device_config->device_poll_millis); 3773 vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d", 3774 device_config->rts_mac_en); 3775 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", 3776 device_config->rth_en); 3777 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", 3778 device_config->rth_it_type); 3779} 3780 3781static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask) 3782{ 3783 int i; 3784 3785 vxge_debug_init(VXGE_TRACE, 3786 "%s: %d Vpath(s) opened", 3787 vdev->ndev->name, vdev->no_of_vpath); 3788 3789 switch (vdev->config.intr_type) { 3790 case INTA: 3791 vxge_debug_init(VXGE_TRACE, 3792 "%s: Interrupt type INTA", vdev->ndev->name); 3793 break; 3794 3795 case MSI_X: 3796 vxge_debug_init(VXGE_TRACE, 3797 "%s: Interrupt type MSI-X", vdev->ndev->name); 3798 break; 3799 } 3800 3801 if (vdev->config.rth_steering) { 3802 vxge_debug_init(VXGE_TRACE, 3803 "%s: RTH steering enabled for TCP_IPV4", 3804 vdev->ndev->name); 3805 } else { 3806 vxge_debug_init(VXGE_TRACE, 3807 "%s: RTH steering disabled", vdev->ndev->name); 3808 } 3809 3810 switch (vdev->config.tx_steering_type) { 3811 case NO_STEERING: 3812 vxge_debug_init(VXGE_TRACE, 3813 "%s: Tx steering disabled", vdev->ndev->name); 3814 break; 3815 case TX_PRIORITY_STEERING: 3816 vxge_debug_init(VXGE_TRACE, 3817 "%s: Unsupported tx steering option", 3818 vdev->ndev->name); 3819 vxge_debug_init(VXGE_TRACE, 3820 "%s: Tx steering disabled", vdev->ndev->name); 3821 vdev->config.tx_steering_type = 0; 3822 break; 3823 case TX_VLAN_STEERING: 3824 vxge_debug_init(VXGE_TRACE, 3825 "%s: Unsupported tx steering option", 3826 vdev->ndev->name); 3827 vxge_debug_init(VXGE_TRACE, 3828 "%s: Tx steering disabled", vdev->ndev->name); 3829 vdev->config.tx_steering_type = 0; 3830 break; 3831 case TX_MULTIQ_STEERING: 3832 vxge_debug_init(VXGE_TRACE, 3833 "%s: Tx multiqueue steering enabled", 3834 vdev->ndev->name); 3835 break; 3836 case TX_PORT_STEERING: 3837 vxge_debug_init(VXGE_TRACE, 3838 "%s: Tx port steering enabled", 3839 vdev->ndev->name); 3840 break; 3841 default: 3842 vxge_debug_init(VXGE_ERR, 3843 "%s: Unsupported tx steering type", 3844 vdev->ndev->name); 3845 vxge_debug_init(VXGE_TRACE, 3846 "%s: Tx steering disabled", vdev->ndev->name); 3847 vdev->config.tx_steering_type = 0; 3848 } 3849 3850 if (vdev->config.gro_enable) { 3851 vxge_debug_init(VXGE_ERR, 3852 "%s: Generic receive offload enabled", 3853 vdev->ndev->name); 3854 } else 3855 vxge_debug_init(VXGE_TRACE, 3856 "%s: Generic receive offload disabled", 3857 vdev->ndev->name); 3858 3859 if (vdev->config.addr_learn_en) 3860 vxge_debug_init(VXGE_TRACE, 3861 "%s: MAC Address learning enabled", vdev->ndev->name); 3862 3863 vxge_debug_init(VXGE_TRACE, 3864 "%s: Rx doorbell mode enabled", vdev->ndev->name); 3865 3866 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3867 if (!vxge_bVALn(vpath_mask, i, 1)) 3868 continue; 3869 vxge_debug_ll_config(VXGE_TRACE, 3870 "%s: MTU size - %d", vdev->ndev->name, 3871 ((struct __vxge_hw_device *)(vdev->devh))-> 3872 config.vp_config[i].mtu); 3873 vxge_debug_init(VXGE_TRACE, 3874 "%s: VLAN tag stripping %s", vdev->ndev->name, 3875 ((struct __vxge_hw_device *)(vdev->devh))-> 3876 config.vp_config[i].rpa_strip_vlan_tag 3877 ? "Enabled" : "Disabled"); 3878 vxge_debug_init(VXGE_TRACE, 3879 "%s: Ring blocks : %d", vdev->ndev->name, 3880 ((struct __vxge_hw_device *)(vdev->devh))-> 3881 config.vp_config[i].ring.ring_blocks); 3882 vxge_debug_init(VXGE_TRACE, 3883 "%s: Fifo blocks : %d", vdev->ndev->name, 3884 ((struct __vxge_hw_device *)(vdev->devh))-> 3885 config.vp_config[i].fifo.fifo_blocks); 3886 vxge_debug_ll_config(VXGE_TRACE, 3887 "%s: Max frags : %d", vdev->ndev->name, 3888 ((struct __vxge_hw_device *)(vdev->devh))-> 3889 config.vp_config[i].fifo.max_frags); 3890 break; 3891 } 3892} 3893 3894#ifdef CONFIG_PM 3895/** 3896 * vxge_pm_suspend - vxge power management suspend entry point 3897 * 3898 */ 3899static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state) 3900{ 3901 return -ENOSYS; 3902} 3903/** 3904 * vxge_pm_resume - vxge power management resume entry point 3905 * 3906 */ 3907static int vxge_pm_resume(struct pci_dev *pdev) 3908{ 3909 return -ENOSYS; 3910} 3911 3912#endif 3913 3914/** 3915 * vxge_io_error_detected - called when PCI error is detected 3916 * @pdev: Pointer to PCI device 3917 * @state: The current pci connection state 3918 * 3919 * This function is called after a PCI bus error affecting 3920 * this device has been detected. 3921 */ 3922static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, 3923 pci_channel_state_t state) 3924{ 3925 struct __vxge_hw_device *hldev = 3926 (struct __vxge_hw_device *) pci_get_drvdata(pdev); 3927 struct net_device *netdev = hldev->ndev; 3928 3929 netif_device_detach(netdev); 3930 3931 if (state == pci_channel_io_perm_failure) 3932 return PCI_ERS_RESULT_DISCONNECT; 3933 3934 if (netif_running(netdev)) { 3935 /* Bring down the card, while avoiding PCI I/O */ 3936 do_vxge_close(netdev, 0); 3937 } 3938 3939 pci_disable_device(pdev); 3940 3941 return PCI_ERS_RESULT_NEED_RESET; 3942} 3943 3944/** 3945 * vxge_io_slot_reset - called after the pci bus has been reset. 3946 * @pdev: Pointer to PCI device 3947 * 3948 * Restart the card from scratch, as if from a cold-boot. 3949 * At this point, the card has exprienced a hard reset, 3950 * followed by fixups by BIOS, and has its config space 3951 * set up identically to what it was at cold boot. 3952 */ 3953static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) 3954{ 3955 struct __vxge_hw_device *hldev = 3956 (struct __vxge_hw_device *) pci_get_drvdata(pdev); 3957 struct net_device *netdev = hldev->ndev; 3958 3959 struct vxgedev *vdev = netdev_priv(netdev); 3960 3961 if (pci_enable_device(pdev)) { 3962 printk(KERN_ERR "%s: " 3963 "Cannot re-enable device after reset\n", 3964 VXGE_DRIVER_NAME); 3965 return PCI_ERS_RESULT_DISCONNECT; 3966 } 3967 3968 pci_set_master(pdev); 3969 vxge_reset(vdev); 3970 3971 return PCI_ERS_RESULT_RECOVERED; 3972} 3973 3974/** 3975 * vxge_io_resume - called when traffic can start flowing again. 3976 * @pdev: Pointer to PCI device 3977 * 3978 * This callback is called when the error recovery driver tells 3979 * us that its OK to resume normal operation. 3980 */ 3981static void vxge_io_resume(struct pci_dev *pdev) 3982{ 3983 struct __vxge_hw_device *hldev = 3984 (struct __vxge_hw_device *) pci_get_drvdata(pdev); 3985 struct net_device *netdev = hldev->ndev; 3986 3987 if (netif_running(netdev)) { 3988 if (vxge_open(netdev)) { 3989 printk(KERN_ERR "%s: " 3990 "Can't bring device back up after reset\n", 3991 VXGE_DRIVER_NAME); 3992 return; 3993 } 3994 } 3995 3996 netif_device_attach(netdev); 3997} 3998 3999/** 4000 * vxge_probe 4001 * @pdev : structure containing the PCI related information of the device. 4002 * @pre: List of PCI devices supported by the driver listed in vxge_id_table. 4003 * Description: 4004 * This function is called when a new PCI device gets detected and initializes 4005 * it. 4006 * Return value: 4007 * returns 0 on success and negative on failure. 4008 * 4009 */ 4010static int __devinit 4011vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) 4012{ 4013 struct __vxge_hw_device *hldev; 4014 enum vxge_hw_status status; 4015 int ret; 4016 int high_dma = 0; 4017 u64 vpath_mask = 0; 4018 struct vxgedev *vdev; 4019 struct vxge_config ll_config; 4020 struct vxge_hw_device_config *device_config = NULL; 4021 struct vxge_hw_device_attr attr; 4022 int i, j, no_of_vpath = 0, max_vpath_supported = 0; 4023 u8 *macaddr; 4024 struct vxge_mac_addrs *entry; 4025 static int bus = -1, device = -1; 4026 u8 new_device = 0; 4027 4028 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 4029 attr.pdev = pdev; 4030 4031 if (bus != pdev->bus->number) 4032 new_device = 1; 4033 if (device != PCI_SLOT(pdev->devfn)) 4034 new_device = 1; 4035 4036 bus = pdev->bus->number; 4037 device = PCI_SLOT(pdev->devfn); 4038 4039 if (new_device) { 4040 if (driver_config->config_dev_cnt && 4041 (driver_config->config_dev_cnt != 4042 driver_config->total_dev_cnt)) 4043 vxge_debug_init(VXGE_ERR, 4044 "%s: Configured %d of %d devices", 4045 VXGE_DRIVER_NAME, 4046 driver_config->config_dev_cnt, 4047 driver_config->total_dev_cnt); 4048 driver_config->config_dev_cnt = 0; 4049 driver_config->total_dev_cnt = 0; 4050 driver_config->g_no_cpus = 0; 4051 } 4052 4053 driver_config->vpath_per_dev = max_config_vpath; 4054 4055 driver_config->total_dev_cnt++; 4056 if (++driver_config->config_dev_cnt > max_config_dev) { 4057 ret = 0; 4058 goto _exit0; 4059 } 4060 4061 device_config = kzalloc(sizeof(struct vxge_hw_device_config), 4062 GFP_KERNEL); 4063 if (!device_config) { 4064 ret = -ENOMEM; 4065 vxge_debug_init(VXGE_ERR, 4066 "device_config : malloc failed %s %d", 4067 __FILE__, __LINE__); 4068 goto _exit0; 4069 } 4070 4071 memset(&ll_config, 0, sizeof(struct vxge_config)); 4072 ll_config.tx_steering_type = TX_MULTIQ_STEERING; 4073 ll_config.intr_type = MSI_X; 4074 ll_config.napi_weight = NEW_NAPI_WEIGHT; 4075 ll_config.rth_steering = RTH_STEERING; 4076 4077 /* get the default configuration parameters */ 4078 vxge_hw_device_config_default_get(device_config); 4079 4080 /* initialize configuration parameters */ 4081 vxge_device_config_init(device_config, &ll_config.intr_type); 4082 4083 ret = pci_enable_device(pdev); 4084 if (ret) { 4085 vxge_debug_init(VXGE_ERR, 4086 "%s : can not enable PCI device", __func__); 4087 goto _exit0; 4088 } 4089 4090 if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { 4091 vxge_debug_ll_config(VXGE_TRACE, 4092 "%s : using 64bit DMA", __func__); 4093 4094 high_dma = 1; 4095 4096 if (pci_set_consistent_dma_mask(pdev, 4097 0xffffffffffffffffULL)) { 4098 vxge_debug_init(VXGE_ERR, 4099 "%s : unable to obtain 64bit DMA for " 4100 "consistent allocations", __func__); 4101 ret = -ENOMEM; 4102 goto _exit1; 4103 } 4104 } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) { 4105 vxge_debug_ll_config(VXGE_TRACE, 4106 "%s : using 32bit DMA", __func__); 4107 } else { 4108 ret = -ENOMEM; 4109 goto _exit1; 4110 } 4111 4112 if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) { 4113 vxge_debug_init(VXGE_ERR, 4114 "%s : request regions failed", __func__); 4115 ret = -ENODEV; 4116 goto _exit1; 4117 } 4118 4119 pci_set_master(pdev); 4120 4121 attr.bar0 = pci_ioremap_bar(pdev, 0); 4122 if (!attr.bar0) { 4123 vxge_debug_init(VXGE_ERR, 4124 "%s : cannot remap io memory bar0", __func__); 4125 ret = -ENODEV; 4126 goto _exit2; 4127 } 4128 vxge_debug_ll_config(VXGE_TRACE, 4129 "pci ioremap bar0: %p:0x%llx", 4130 attr.bar0, 4131 (unsigned long long)pci_resource_start(pdev, 0)); 4132 4133 status = vxge_hw_device_hw_info_get(attr.bar0, 4134 &ll_config.device_hw_info); 4135 if (status != VXGE_HW_OK) { 4136 vxge_debug_init(VXGE_ERR, 4137 "%s: Reading of hardware info failed." 4138 "Please try upgrading the firmware.", VXGE_DRIVER_NAME); 4139 ret = -EINVAL; 4140 goto _exit3; 4141 } 4142 4143 if (ll_config.device_hw_info.fw_version.major != 4144 VXGE_DRIVER_FW_VERSION_MAJOR) { 4145 vxge_debug_init(VXGE_ERR, 4146 "%s: Incorrect firmware version." 4147 "Please upgrade the firmware to version 1.x.x", 4148 VXGE_DRIVER_NAME); 4149 ret = -EINVAL; 4150 goto _exit3; 4151 } 4152 4153 vpath_mask = ll_config.device_hw_info.vpath_mask; 4154 if (vpath_mask == 0) { 4155 vxge_debug_ll_config(VXGE_TRACE, 4156 "%s: No vpaths available in device", VXGE_DRIVER_NAME); 4157 ret = -EINVAL; 4158 goto _exit3; 4159 } 4160 4161 vxge_debug_ll_config(VXGE_TRACE, 4162 "%s:%d Vpath mask = %llx", __func__, __LINE__, 4163 (unsigned long long)vpath_mask); 4164 4165 /* Check how many vpaths are available */ 4166 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 4167 if (!((vpath_mask) & vxge_mBIT(i))) 4168 continue; 4169 max_vpath_supported++; 4170 } 4171 4172 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ 4173 if ((VXGE_HW_FUNCTION_MODE_SRIOV == 4174 ll_config.device_hw_info.function_mode) && 4175 (max_config_dev > 1) && (pdev->is_physfn)) { 4176 ret = pci_enable_sriov(pdev, max_config_dev - 1); 4177 if (ret) 4178 vxge_debug_ll_config(VXGE_ERR, 4179 "Failed to enable SRIOV: %d \n", ret); 4180 } 4181 4182 /* 4183 * Configure vpaths and get driver configured number of vpaths 4184 * which is less than or equal to the maximum vpaths per function. 4185 */ 4186 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config); 4187 if (!no_of_vpath) { 4188 vxge_debug_ll_config(VXGE_ERR, 4189 "%s: No more vpaths to configure", VXGE_DRIVER_NAME); 4190 ret = 0; 4191 goto _exit3; 4192 } 4193 4194 /* Setting driver callbacks */ 4195 attr.uld_callbacks.link_up = vxge_callback_link_up; 4196 attr.uld_callbacks.link_down = vxge_callback_link_down; 4197 attr.uld_callbacks.crit_err = vxge_callback_crit_err; 4198 4199 status = vxge_hw_device_initialize(&hldev, &attr, device_config); 4200 if (status != VXGE_HW_OK) { 4201 vxge_debug_init(VXGE_ERR, 4202 "Failed to initialize device (%d)", status); 4203 ret = -EINVAL; 4204 goto _exit3; 4205 } 4206 4207 /* if FCS stripping is not disabled in MAC fail driver load */ 4208 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) { 4209 vxge_debug_init(VXGE_ERR, 4210 "%s: FCS stripping is not disabled in MAC" 4211 " failing driver load", VXGE_DRIVER_NAME); 4212 ret = -EINVAL; 4213 goto _exit4; 4214 } 4215 4216 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); 4217 4218 /* set private device info */ 4219 pci_set_drvdata(pdev, hldev); 4220 4221 ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE; 4222 ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; 4223 ll_config.addr_learn_en = addr_learn_en; 4224 ll_config.rth_algorithm = RTH_ALG_JENKINS; 4225 ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; 4226 ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; 4227 ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4228 ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4229 ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4230 ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4231 ll_config.rth_bkt_sz = RTH_BUCKET_SIZE; 4232 ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4233 ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4234 4235 if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath, 4236 &vdev)) { 4237 ret = -EINVAL; 4238 goto _exit4; 4239 } 4240 4241 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); 4242 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), 4243 vxge_hw_device_trace_level_get(hldev)); 4244 4245 /* set private HW device info */ 4246 hldev->ndev = vdev->ndev; 4247 vdev->mtu = VXGE_HW_DEFAULT_MTU; 4248 vdev->bar0 = attr.bar0; 4249 vdev->max_vpath_supported = max_vpath_supported; 4250 vdev->no_of_vpath = no_of_vpath; 4251 4252 /* Virtual Path count */ 4253 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 4254 if (!vxge_bVALn(vpath_mask, i, 1)) 4255 continue; 4256 if (j >= vdev->no_of_vpath) 4257 break; 4258 4259 vdev->vpaths[j].is_configured = 1; 4260 vdev->vpaths[j].device_id = i; 4261 vdev->vpaths[j].fifo.driver_id = j; 4262 vdev->vpaths[j].ring.driver_id = j; 4263 vdev->vpaths[j].vdev = vdev; 4264 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; 4265 memcpy((u8 *)vdev->vpaths[j].macaddr, 4266 (u8 *)ll_config.device_hw_info.mac_addrs[i], 4267 ETH_ALEN); 4268 4269 /* Initialize the mac address list header */ 4270 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list); 4271 4272 vdev->vpaths[j].mac_addr_cnt = 0; 4273 vdev->vpaths[j].mcast_addr_cnt = 0; 4274 j++; 4275 } 4276 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE; 4277 vdev->max_config_port = max_config_port; 4278 4279 vdev->vlan_tag_strip = vlan_tag_strip; 4280 4281 /* map the hashing selector table to the configured vpaths */ 4282 for (i = 0; i < vdev->no_of_vpath; i++) 4283 vdev->vpath_selector[i] = vpath_selector[i]; 4284 4285 macaddr = (u8 *)vdev->vpaths[0].macaddr; 4286 4287 ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0'; 4288 ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0'; 4289 ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0'; 4290 4291 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s", 4292 vdev->ndev->name, ll_config.device_hw_info.serial_number); 4293 4294 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s", 4295 vdev->ndev->name, ll_config.device_hw_info.part_number); 4296 4297 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", 4298 vdev->ndev->name, ll_config.device_hw_info.product_desc); 4299 4300 vxge_debug_init(VXGE_TRACE, 4301 "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X", 4302 vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2], 4303 macaddr[3], macaddr[4], macaddr[5]); 4304 4305 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d", 4306 vdev->ndev->name, vxge_hw_device_link_width_get(hldev)); 4307 4308 vxge_debug_init(VXGE_TRACE, 4309 "%s: Firmware version : %s Date : %s", vdev->ndev->name, 4310 ll_config.device_hw_info.fw_version.version, 4311 ll_config.device_hw_info.fw_date.date); 4312 4313 if (new_device) { 4314 switch (ll_config.device_hw_info.function_mode) { 4315 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: 4316 vxge_debug_init(VXGE_TRACE, 4317 "%s: Single Function Mode Enabled", vdev->ndev->name); 4318 break; 4319 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION: 4320 vxge_debug_init(VXGE_TRACE, 4321 "%s: Multi Function Mode Enabled", vdev->ndev->name); 4322 break; 4323 case VXGE_HW_FUNCTION_MODE_SRIOV: 4324 vxge_debug_init(VXGE_TRACE, 4325 "%s: Single Root IOV Mode Enabled", vdev->ndev->name); 4326 break; 4327 case VXGE_HW_FUNCTION_MODE_MRIOV: 4328 vxge_debug_init(VXGE_TRACE, 4329 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name); 4330 break; 4331 } 4332 } 4333 4334 vxge_print_parm(vdev, vpath_mask); 4335 4336 /* Store the fw version for ethttool option */ 4337 strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version); 4338 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); 4339 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN); 4340 4341 /* Copy the station mac address to the list */ 4342 for (i = 0; i < vdev->no_of_vpath; i++) { 4343 entry = (struct vxge_mac_addrs *) 4344 kzalloc(sizeof(struct vxge_mac_addrs), 4345 GFP_KERNEL); 4346 if (NULL == entry) { 4347 vxge_debug_init(VXGE_ERR, 4348 "%s: mac_addr_list : memory allocation failed", 4349 vdev->ndev->name); 4350 ret = -EPERM; 4351 goto _exit5; 4352 } 4353 macaddr = (u8 *)&entry->macaddr; 4354 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); 4355 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list); 4356 vdev->vpaths[i].mac_addr_cnt = 1; 4357 } 4358 4359 kfree(device_config); 4360 4361 /* 4362 * INTA is shared in multi-function mode. This is unlike the INTA 4363 * implementation in MR mode, where each VH has its own INTA message. 4364 * - INTA is masked (disabled) as long as at least one function sets 4365 * its TITAN_MASK_ALL_INT.ALARM bit. 4366 * - INTA is unmasked (enabled) when all enabled functions have cleared 4367 * their own TITAN_MASK_ALL_INT.ALARM bit. 4368 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up. 4369 * Though this driver leaves the top level interrupts unmasked while 4370 * leaving the required module interrupt bits masked on exit, there 4371 * could be a rougue driver around that does not follow this procedure 4372 * resulting in a failure to generate interrupts. The following code is 4373 * present to prevent such a failure. 4374 */ 4375 4376 if (ll_config.device_hw_info.function_mode == 4377 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) 4378 if (vdev->config.intr_type == INTA) 4379 vxge_hw_device_unmask_all(hldev); 4380 4381 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", 4382 vdev->ndev->name, __func__, __LINE__); 4383 4384 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); 4385 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), 4386 vxge_hw_device_trace_level_get(hldev)); 4387 4388 return 0; 4389 4390_exit5: 4391 for (i = 0; i < vdev->no_of_vpath; i++) 4392 vxge_free_mac_add_list(&vdev->vpaths[i]); 4393 4394 vxge_device_unregister(hldev); 4395_exit4: 4396 pci_disable_sriov(pdev); 4397 vxge_hw_device_terminate(hldev); 4398_exit3: 4399 iounmap(attr.bar0); 4400_exit2: 4401 pci_release_regions(pdev); 4402_exit1: 4403 pci_disable_device(pdev); 4404_exit0: 4405 kfree(device_config); 4406 driver_config->config_dev_cnt--; 4407 pci_set_drvdata(pdev, NULL); 4408 return ret; 4409} 4410 4411/** 4412 * vxge_rem_nic - Free the PCI device 4413 * @pdev: structure containing the PCI related information of the device. 4414 * Description: This function is called by the Pci subsystem to release a 4415 * PCI device and free up all resource held up by the device. 4416 */ 4417static void __devexit 4418vxge_remove(struct pci_dev *pdev) 4419{ 4420 struct __vxge_hw_device *hldev; 4421 struct vxgedev *vdev = NULL; 4422 struct net_device *dev; 4423 int i = 0; 4424#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ 4425 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) 4426 u32 level_trace; 4427#endif 4428 4429 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev); 4430 4431 if (hldev == NULL) 4432 return; 4433 dev = hldev->ndev; 4434 vdev = netdev_priv(dev); 4435 4436#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ 4437 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) 4438 level_trace = vdev->level_trace; 4439#endif 4440 vxge_debug_entryexit(level_trace, 4441 "%s:%d", __func__, __LINE__); 4442 4443 vxge_debug_init(level_trace, 4444 "%s : removing PCI device...", __func__); 4445 vxge_device_unregister(hldev); 4446 4447 for (i = 0; i < vdev->no_of_vpath; i++) { 4448 vxge_free_mac_add_list(&vdev->vpaths[i]); 4449 vdev->vpaths[i].mcast_addr_cnt = 0; 4450 vdev->vpaths[i].mac_addr_cnt = 0; 4451 } 4452 4453 kfree(vdev->vpaths); 4454 4455 iounmap(vdev->bar0); 4456 4457 pci_disable_sriov(pdev); 4458 4459 /* we are safe to free it now */ 4460 free_netdev(dev); 4461 4462 vxge_debug_init(level_trace, 4463 "%s:%d Device unregistered", __func__, __LINE__); 4464 4465 vxge_hw_device_terminate(hldev); 4466 4467 pci_disable_device(pdev); 4468 pci_release_regions(pdev); 4469 pci_set_drvdata(pdev, NULL); 4470 vxge_debug_entryexit(level_trace, 4471 "%s:%d Exiting...", __func__, __LINE__); 4472} 4473 4474static struct pci_error_handlers vxge_err_handler = { 4475 .error_detected = vxge_io_error_detected, 4476 .slot_reset = vxge_io_slot_reset, 4477 .resume = vxge_io_resume, 4478}; 4479 4480static struct pci_driver vxge_driver = { 4481 .name = VXGE_DRIVER_NAME, 4482 .id_table = vxge_id_table, 4483 .probe = vxge_probe, 4484 .remove = __devexit_p(vxge_remove), 4485#ifdef CONFIG_PM 4486 .suspend = vxge_pm_suspend, 4487 .resume = vxge_pm_resume, 4488#endif 4489 .err_handler = &vxge_err_handler, 4490}; 4491 4492static int __init 4493vxge_starter(void) 4494{ 4495 int ret = 0; 4496 char version[32]; 4497 snprintf(version, 32, "%s", DRV_VERSION); 4498 4499 printk(KERN_CRIT "%s: Copyright(c) 2002-2009 Neterion Inc\n", 4500 VXGE_DRIVER_NAME); 4501 printk(KERN_CRIT "%s: Driver version: %s\n", 4502 VXGE_DRIVER_NAME, version); 4503 4504 verify_bandwidth(); 4505 4506 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL); 4507 if (!driver_config) 4508 return -ENOMEM; 4509 4510 ret = pci_register_driver(&vxge_driver); 4511 4512 if (driver_config->config_dev_cnt && 4513 (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) 4514 vxge_debug_init(VXGE_ERR, 4515 "%s: Configured %d of %d devices", 4516 VXGE_DRIVER_NAME, driver_config->config_dev_cnt, 4517 driver_config->total_dev_cnt); 4518 4519 if (ret) 4520 kfree(driver_config); 4521 4522 return ret; 4523} 4524 4525static void __exit 4526vxge_closer(void) 4527{ 4528 pci_unregister_driver(&vxge_driver); 4529 kfree(driver_config); 4530} 4531module_init(vxge_starter); 4532module_exit(vxge_closer);