Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
[PATCH] myri10ge - Fix spurious invokations of the watchdog reset handler
[PATCH] myri10ge - Write the firmware in 256-bytes chunks
[PATCH] Stop calling phy_stop_interrupts() twice
[PATCH] s2io driver bug fixes #2
[PATCH] s2io driver bug fixes #1
[PATCH] zd1211rw: Packet filter fix for managed (STA) mode
[PATCH] zd1211rw: Fixed endianess issue with length info tag detection
[PATCH] zd1211rw: Remove bogus assert
[PATCH] zd1211rw: Fix software encryption/decryption
[PATCH] zd1211rw: Pass more management frame types up to host
[PATCH] zd1211rw: Fixes radiotap header

+215 -250
+11 -13
drivers/net/myri10ge/myri10ge.c
··· 177 struct work_struct watchdog_work; 178 struct timer_list watchdog_timer; 179 int watchdog_tx_done; 180 int watchdog_resets; 181 int tx_linearized; 182 int pause; ··· 449 struct mcp_gen_header *hdr; 450 size_t hdr_offset; 451 int status; 452 453 if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { 454 dev_err(dev, "Unable to load %s firmware image via hotplug\n", ··· 481 goto abort_with_fw; 482 483 crc = crc32(~0, fw->data, fw->size); 484 - if (mgp->tx.boundary == 2048) { 485 - /* Avoid PCI burst on chipset with unaligned completions. */ 486 - int i; 487 - __iomem u32 *ptr = (__iomem u32 *) (mgp->sram + 488 - MYRI10GE_FW_OFFSET); 489 - for (i = 0; i < fw->size / 4; i++) { 490 - __raw_writel(((u32 *) fw->data)[i], ptr + i); 491 - wmb(); 492 - } 493 - } else { 494 - myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data, 495 - fw->size); 496 } 497 /* corruption checking is good for parity recovery and buggy chipset */ 498 memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); ··· 2543 2544 mgp = (struct myri10ge_priv *)arg; 2545 if (mgp->tx.req != mgp->tx.done && 2546 - mgp->tx.done == mgp->watchdog_tx_done) 2547 /* nic seems like it might be stuck.. */ 2548 schedule_work(&mgp->watchdog_work); 2549 else ··· 2553 jiffies + myri10ge_watchdog_timeout * HZ); 2554 2555 mgp->watchdog_tx_done = mgp->tx.done; 2556 } 2557 2558 static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
··· 177 struct work_struct watchdog_work; 178 struct timer_list watchdog_timer; 179 int watchdog_tx_done; 180 + int watchdog_tx_req; 181 int watchdog_resets; 182 int tx_linearized; 183 int pause; ··· 448 struct mcp_gen_header *hdr; 449 size_t hdr_offset; 450 int status; 451 + unsigned i; 452 453 if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { 454 dev_err(dev, "Unable to load %s firmware image via hotplug\n", ··· 479 goto abort_with_fw; 480 481 crc = crc32(~0, fw->data, fw->size); 482 + for (i = 0; i < fw->size; i += 256) { 483 + myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i, 484 + fw->data + i, 485 + min(256U, (unsigned)(fw->size - i))); 486 + mb(); 487 + readb(mgp->sram); 488 } 489 /* corruption checking is good for parity recovery and buggy chipset */ 490 memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); ··· 2547 2548 mgp = (struct myri10ge_priv *)arg; 2549 if (mgp->tx.req != mgp->tx.done && 2550 + mgp->tx.done == mgp->watchdog_tx_done && 2551 + mgp->watchdog_tx_req != mgp->watchdog_tx_done) 2552 /* nic seems like it might be stuck.. */ 2553 schedule_work(&mgp->watchdog_work); 2554 else ··· 2556 jiffies + myri10ge_watchdog_timeout * HZ); 2557 2558 mgp->watchdog_tx_done = mgp->tx.done; 2559 + mgp->watchdog_tx_req = mgp->tx.req; 2560 } 2561 2562 static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+2 -6
drivers/net/phy/phy.c
··· 419 420 /* phy_stop_machine 421 * 422 - * description: Stops the state machine timer, sets the state to 423 - * UP (unless it wasn't up yet), and then frees the interrupt, 424 - * if it is in use. This function must be called BEFORE 425 * phy_detach. 426 */ 427 void phy_stop_machine(struct phy_device *phydev) ··· 431 if (phydev->state > PHY_UP) 432 phydev->state = PHY_UP; 433 spin_unlock(&phydev->lock); 434 - 435 - if (phydev->irq != PHY_POLL) 436 - phy_stop_interrupts(phydev); 437 438 phydev->adjust_state = NULL; 439 }
··· 419 420 /* phy_stop_machine 421 * 422 + * description: Stops the state machine timer, sets the state to UP 423 + * (unless it wasn't up yet). This function must be called BEFORE 424 * phy_detach. 425 */ 426 void phy_stop_machine(struct phy_device *phydev) ··· 432 if (phydev->state > PHY_UP) 433 phydev->state = PHY_UP; 434 spin_unlock(&phydev->lock); 435 436 phydev->adjust_state = NULL; 437 }
+173 -213
drivers/net/s2io.c
··· 76 #include "s2io.h" 77 #include "s2io-regs.h" 78 79 - #define DRV_VERSION "2.0.14.2" 80 81 /* S2io Driver name & version. */ 82 static char s2io_driver_name[] = "Neterion"; ··· 370 END_SIGN 371 }; 372 373 /* Module Loadable parameters. */ 374 - static unsigned int tx_fifo_num = 1; 375 static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 376 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 377 - static unsigned int rx_ring_num = 1; 378 static unsigned int rx_ring_sz[MAX_RX_RINGS] = 379 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; 380 static unsigned int rts_frm_len[MAX_RX_RINGS] = 381 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 382 - static unsigned int rx_ring_mode = 1; 383 - static unsigned int use_continuous_tx_intrs = 1; 384 - static unsigned int rmac_pause_time = 0x100; 385 - static unsigned int mc_pause_threshold_q0q3 = 187; 386 - static unsigned int mc_pause_threshold_q4q7 = 187; 387 - static unsigned int shared_splits; 388 - static unsigned int tmac_util_period = 5; 389 - static unsigned int rmac_util_period = 5; 390 - static unsigned int bimodal = 0; 391 - static unsigned int l3l4hdr_size = 128; 392 - #ifndef CONFIG_S2IO_NAPI 393 - static unsigned int indicate_max_pkts; 394 - #endif 395 - /* Frequency of Rx desc syncs expressed as power of 2 */ 396 - static unsigned int rxsync_frequency = 3; 397 - /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ 398 - static unsigned int intr_type = 0; 399 - /* Large receive offload feature */ 400 - static unsigned int lro = 0; 401 - /* Max pkts to be aggregated by LRO at one time. If not specified, 402 - * aggregation happens until we hit max IP pkt size(64K) 403 - */ 404 - static unsigned int lro_max_pkts = 0xFFFF; 405 406 /* 407 * S2IO device table. ··· 476 size += config->tx_cfg[i].fifo_len; 477 } 478 if (size > MAX_AVAILABLE_TXDS) { 479 - DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ", 480 - __FUNCTION__); 481 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); 482 - return FAILURE; 483 } 484 485 lst_size = (sizeof(TxD_t) * config->max_txds); ··· 558 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); 559 if (!nic->ufo_in_band_v) 560 return -ENOMEM; 561 562 /* Allocation and initialization of RXDs in Rings */ 563 size = 0; ··· 1225 break; 1226 } 1227 1228 - /* Enable Tx FIFO partition 0. */ 1229 val64 = readq(&bar0->tx_fifo_partition_0); 1230 val64 |= (TX_FIFO_PARTITION_EN); 1231 writeq(val64, &bar0->tx_fifo_partition_0); ··· 1662 writeq(temp64, &bar0->general_int_mask); 1663 /* 1664 * If Hercules adapter enable GPIO otherwise 1665 - * disabled all PCIX, Flash, MDIO, IIC and GPIO 1666 * interrupts for now. 1667 * TODO 1668 */ ··· 2131 frag->size, PCI_DMA_TODEVICE); 2132 } 2133 } 2134 - txdlp->Host_Control = 0; 2135 return(skb); 2136 } 2137 ··· 2383 skb->data = (void *) (unsigned long)tmp; 2384 skb->tail = (void *) (unsigned long)tmp; 2385 2386 - ((RxD3_t*)rxdp)->Buffer0_ptr = 2387 - pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2388 PCI_DMA_FROMDEVICE); 2389 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2390 if (nic->rxd_mode == RXD_MODE_3B) { 2391 /* Two buffer mode */ ··· 2403 (nic->pdev, skb->data, dev->mtu + 4, 2404 PCI_DMA_FROMDEVICE); 2405 2406 - /* Buffer-1 will be dummy buffer not used */ 2407 - ((RxD3_t*)rxdp)->Buffer1_ptr = 2408 - pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, 2409 - PCI_DMA_FROMDEVICE); 2410 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2411 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2412 (dev->mtu + 4); ··· 2634 } 2635 #endif 2636 2637 /** 2638 - * s2io_netpoll - Rx interrupt service handler for netpoll support 2639 * @dev : pointer to the device structure. 2640 * Description: 2641 - * Polling 'interrupt' - used by things like netconsole to send skbs 2642 - * without having to re-enable interrupts. It's not called while 2643 - * the interrupt routine is executing. 2644 */ 2645 - 2646 - #ifdef CONFIG_NET_POLL_CONTROLLER 2647 static void s2io_netpoll(struct net_device *dev) 2648 { 2649 nic_t *nic = dev->priv; 2650 mac_info_t *mac_control; 2651 struct config_param *config; 2652 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2653 - u64 val64; 2654 int i; 2655 2656 disable_irq(dev->irq); ··· 2659 mac_control = &nic->mac_control; 2660 config = &nic->config; 2661 2662 - val64 = readq(&bar0->rx_traffic_int); 2663 writeq(val64, &bar0->rx_traffic_int); 2664 2665 for (i = 0; i < config->rx_ring_num; i++) 2666 rx_intr_handler(&mac_control->rings[i]); 2667 ··· 2736 /* If your are next to put index then it's FIFO full condition */ 2737 if ((get_block == put_block) && 2738 (get_info.offset + 1) == put_info.offset) { 2739 - DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); 2740 break; 2741 } 2742 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); ··· 2756 HEADER_SNAP_SIZE, 2757 PCI_DMA_FROMDEVICE); 2758 } else if (nic->rxd_mode == RXD_MODE_3B) { 2759 - pci_unmap_single(nic->pdev, (dma_addr_t) 2760 ((RxD3_t*)rxdp)->Buffer0_ptr, 2761 BUF0_LEN, PCI_DMA_FROMDEVICE); 2762 - pci_unmap_single(nic->pdev, (dma_addr_t) 2763 - ((RxD3_t*)rxdp)->Buffer1_ptr, 2764 - BUF1_LEN, PCI_DMA_FROMDEVICE); 2765 pci_unmap_single(nic->pdev, (dma_addr_t) 2766 ((RxD3_t*)rxdp)->Buffer2_ptr, 2767 dev->mtu + 4, 2768 PCI_DMA_FROMDEVICE); 2769 } else { 2770 - pci_unmap_single(nic->pdev, (dma_addr_t) 2771 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2772 PCI_DMA_FROMDEVICE); 2773 pci_unmap_single(nic->pdev, (dma_addr_t) ··· 3352 3353 /* Clear certain PCI/PCI-X fields after reset */ 3354 if (sp->device_type == XFRAME_II_DEVICE) { 3355 - /* Clear parity err detect bit */ 3356 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); 3357 3358 /* Clearing PCIX Ecc status register */ ··· 3553 u64 val64; 3554 int i; 3555 3556 - for (i=0; i< nic->avail_msix_vectors; i++) { 3557 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3558 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3559 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); ··· 3572 int i; 3573 3574 /* Store and display */ 3575 - for (i=0; i< nic->avail_msix_vectors; i++) { 3576 val64 = (BIT(15) | vBIT(i, 26, 6)); 3577 writeq(val64, &bar0->xmsi_access); 3578 if (wait_for_msix_trans(nic, i)) { ··· 3833 TxD_t *txdp; 3834 TxFIFO_element_t __iomem *tx_fifo; 3835 unsigned long flags; 3836 - #ifdef NETIF_F_TSO 3837 - int mss; 3838 - #endif 3839 u16 vlan_tag = 0; 3840 int vlan_priority = 0; 3841 mac_info_t *mac_control; 3842 struct config_param *config; 3843 3844 mac_control = &sp->mac_control; 3845 config = &sp->config; ··· 3885 return 0; 3886 } 3887 3888 - txdp->Control_1 = 0; 3889 - txdp->Control_2 = 0; 3890 #ifdef NETIF_F_TSO 3891 - mss = skb_shinfo(skb)->gso_size; 3892 - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 3893 txdp->Control_1 |= TXD_TCP_LSO_EN; 3894 - txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); 3895 } 3896 #endif 3897 if (skb->ip_summed == CHECKSUM_HW) { ··· 3907 } 3908 3909 frg_len = skb->len - skb->data_len; 3910 - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) { 3911 int ufo_size; 3912 3913 - ufo_size = skb_shinfo(skb)->gso_size; 3914 ufo_size &= ~7; 3915 txdp->Control_1 |= TXD_UFO_EN; 3916 txdp->Control_1 |= TXD_UFO_MSS(ufo_size); ··· 3927 sp->ufo_in_band_v, 3928 sizeof(u64), PCI_DMA_TODEVICE); 3929 txdp++; 3930 - txdp->Control_1 = 0; 3931 - txdp->Control_2 = 0; 3932 } 3933 3934 txdp->Buffer_Pointer = pci_map_single 3935 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3936 txdp->Host_Control = (unsigned long) skb; 3937 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 3938 - 3939 - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3940 txdp->Control_1 |= TXD_UFO_EN; 3941 3942 frg_cnt = skb_shinfo(skb)->nr_frags; ··· 3948 (sp->pdev, frag->page, frag->page_offset, 3949 frag->size, PCI_DMA_TODEVICE); 3950 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); 3951 - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3952 txdp->Control_1 |= TXD_UFO_EN; 3953 } 3954 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3955 3956 - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3957 frg_cnt++; /* as Txd0 was used for inband header */ 3958 3959 tx_fifo = mac_control->tx_FIFO_start[queue]; ··· 3962 3963 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | 3964 TX_FIFO_LAST_LIST); 3965 3966 - #ifdef NETIF_F_TSO 3967 - if (mss) 3968 - val64 |= TX_FIFO_SPECIAL_FUNC; 3969 - #endif 3970 - if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3971 - val64 |= TX_FIFO_SPECIAL_FUNC; 3972 writeq(val64, &tx_fifo->List_Control); 3973 3974 mmiowb(); ··· 3998 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 3999 } 4000 4001 static irqreturn_t 4002 s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) 4003 { 4004 struct net_device *dev = (struct net_device *) dev_id; 4005 nic_t *sp = dev->priv; 4006 int i; 4007 - int ret; 4008 mac_info_t *mac_control; 4009 struct config_param *config; 4010 ··· 4054 * reallocate the buffers from the interrupt handler itself, 4055 * else schedule a tasklet to reallocate the buffers. 4056 */ 4057 - for (i = 0; i < config->rx_ring_num; i++) { 4058 - if (!sp->lro) { 4059 - int rxb_size = atomic_read(&sp->rx_bufs_left[i]); 4060 - int level = rx_buffer_level(sp, rxb_size, i); 4061 - 4062 - if ((level == PANIC) && (!TASKLET_IN_USE)) { 4063 - DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", 4064 - dev->name); 4065 - DBG_PRINT(INTR_DBG, "PANIC levels\n"); 4066 - if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { 4067 - DBG_PRINT(ERR_DBG, "%s:Out of memory", 4068 - dev->name); 4069 - DBG_PRINT(ERR_DBG, " in ISR!!\n"); 4070 - clear_bit(0, (&sp->tasklet_status)); 4071 - atomic_dec(&sp->isr_cnt); 4072 - return IRQ_HANDLED; 4073 - } 4074 - clear_bit(0, (&sp->tasklet_status)); 4075 - } else if (level == LOW) { 4076 - tasklet_schedule(&sp->task); 4077 - } 4078 - } 4079 - else if (fill_rx_buffers(sp, i) == -ENOMEM) { 4080 - DBG_PRINT(ERR_DBG, "%s:Out of memory", 4081 - dev->name); 4082 - DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); 4083 - break; 4084 - } 4085 - } 4086 4087 atomic_dec(&sp->isr_cnt); 4088 return IRQ_HANDLED; ··· 4066 { 4067 ring_info_t *ring = (ring_info_t *)dev_id; 4068 nic_t *sp = ring->nic; 4069 - struct net_device *dev = (struct net_device *) dev_id; 4070 - int rxb_size, level, rng_n; 4071 4072 atomic_inc(&sp->isr_cnt); 4073 rx_intr_handler(ring); 4074 - 4075 - rng_n = ring->ring_no; 4076 - if (!sp->lro) { 4077 - rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); 4078 - level = rx_buffer_level(sp, rxb_size, rng_n); 4079 - 4080 - if ((level == PANIC) && (!TASKLET_IN_USE)) { 4081 - int ret; 4082 - DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); 4083 - DBG_PRINT(INTR_DBG, "PANIC levels\n"); 4084 - if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { 4085 - DBG_PRINT(ERR_DBG, "Out of memory in %s", 4086 - __FUNCTION__); 4087 - clear_bit(0, (&sp->tasklet_status)); 4088 - return IRQ_HANDLED; 4089 - } 4090 - clear_bit(0, (&sp->tasklet_status)); 4091 - } else if (level == LOW) { 4092 - tasklet_schedule(&sp->task); 4093 - } 4094 - } 4095 - else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { 4096 - DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name); 4097 - DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); 4098 - } 4099 4100 atomic_dec(&sp->isr_cnt); 4101 - 4102 return IRQ_HANDLED; 4103 } 4104 ··· 4237 * else schedule a tasklet to reallocate the buffers. 4238 */ 4239 #ifndef CONFIG_S2IO_NAPI 4240 - for (i = 0; i < config->rx_ring_num; i++) { 4241 - if (!sp->lro) { 4242 - int ret; 4243 - int rxb_size = atomic_read(&sp->rx_bufs_left[i]); 4244 - int level = rx_buffer_level(sp, rxb_size, i); 4245 - 4246 - if ((level == PANIC) && (!TASKLET_IN_USE)) { 4247 - DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", 4248 - dev->name); 4249 - DBG_PRINT(INTR_DBG, "PANIC levels\n"); 4250 - if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { 4251 - DBG_PRINT(ERR_DBG, "%s:Out of memory", 4252 - dev->name); 4253 - DBG_PRINT(ERR_DBG, " in ISR!!\n"); 4254 - clear_bit(0, (&sp->tasklet_status)); 4255 - atomic_dec(&sp->isr_cnt); 4256 - writeq(org_mask, &bar0->general_int_mask); 4257 - return IRQ_HANDLED; 4258 - } 4259 - clear_bit(0, (&sp->tasklet_status)); 4260 - } else if (level == LOW) { 4261 - tasklet_schedule(&sp->task); 4262 - } 4263 - } 4264 - else if (fill_rx_buffers(sp, i) == -ENOMEM) { 4265 - DBG_PRINT(ERR_DBG, "%s:Out of memory", 4266 - dev->name); 4267 - DBG_PRINT(ERR_DBG, " in Rx intr!!\n"); 4268 - break; 4269 - } 4270 - } 4271 #endif 4272 writeq(org_mask, &bar0->general_int_mask); 4273 atomic_dec(&sp->isr_cnt); ··· 4268 if (cnt == 5) 4269 break; /* Updt failed */ 4270 } while(1); 4271 } 4272 } 4273 ··· 4904 } 4905 static void s2io_vpd_read(nic_t *nic) 4906 { 4907 - u8 vpd_data[256],data; 4908 int i=0, cnt, fail = 0; 4909 int vpd_addr = 0x80; 4910 ··· 4917 strcpy(nic->product_name, "Xframe I 10GbE network adapter"); 4918 vpd_addr = 0x50; 4919 } 4920 4921 for (i = 0; i < 256; i +=4 ) { 4922 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); ··· 4944 memset(nic->product_name, 0, vpd_data[1]); 4945 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 4946 } 4947 } 4948 4949 /** ··· 5263 else 5264 *data = 0; 5265 5266 - return 0; 5267 } 5268 5269 /** ··· 5721 return 0; 5722 } 5723 5724 5725 static struct ethtool_ops netdev_ethtool_ops = { 5726 .get_settings = s2io_ethtool_gset, ··· 5754 .get_sg = ethtool_op_get_sg, 5755 .set_sg = ethtool_op_set_sg, 5756 #ifdef NETIF_F_TSO 5757 - .get_tso = ethtool_op_get_tso, 5758 - .set_tso = ethtool_op_set_tso, 5759 #endif 5760 .get_ufo = ethtool_op_get_ufo, 5761 .set_ufo = ethtool_op_set_ufo, ··· 6318 s2io_set_multicast(dev); 6319 6320 if (sp->lro) { 6321 - /* Initialize max aggregatable pkts based on MTU */ 6322 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; 6323 /* Check if we can use(if specified) user provided value */ 6324 if (lro_max_pkts < sp->lro_max_aggr_per_sess) ··· 6419 * @cksum : FCS checksum of the frame. 6420 * @ring_no : the ring from which this RxD was extracted. 6421 * Description: 6422 - * This function is called by the Tx interrupt serivce routine to perform 6423 * some OS related operations on the SKB before passing it to the upper 6424 * layers. It mainly checks if the checksum is OK, if so adds it to the 6425 * SKBs cksum variable, increments the Rx packet count and passes the SKB ··· 6679 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 6680 } 6681 6682 - MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); 6683 - MODULE_LICENSE("GPL"); 6684 - MODULE_VERSION(DRV_VERSION); 6685 - 6686 - module_param(tx_fifo_num, int, 0); 6687 - module_param(rx_ring_num, int, 0); 6688 - module_param(rx_ring_mode, int, 0); 6689 - module_param_array(tx_fifo_len, uint, NULL, 0); 6690 - module_param_array(rx_ring_sz, uint, NULL, 0); 6691 - module_param_array(rts_frm_len, uint, NULL, 0); 6692 - module_param(use_continuous_tx_intrs, int, 1); 6693 - module_param(rmac_pause_time, int, 0); 6694 - module_param(mc_pause_threshold_q0q3, int, 0); 6695 - module_param(mc_pause_threshold_q4q7, int, 0); 6696 - module_param(shared_splits, int, 0); 6697 - module_param(tmac_util_period, int, 0); 6698 - module_param(rmac_util_period, int, 0); 6699 - module_param(bimodal, bool, 0); 6700 - module_param(l3l4hdr_size, int , 0); 6701 - #ifndef CONFIG_S2IO_NAPI 6702 - module_param(indicate_max_pkts, int, 0); 6703 - #endif 6704 - module_param(rxsync_frequency, int, 0); 6705 - module_param(intr_type, int, 0); 6706 - module_param(lro, int, 0); 6707 - module_param(lro_max_pkts, int, 0); 6708 - 6709 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) 6710 { 6711 if ( tx_fifo_num > 8) { ··· 6786 } 6787 if (dev_intr_type != MSI_X) { 6788 if (pci_request_regions(pdev, s2io_driver_name)) { 6789 - DBG_PRINT(ERR_DBG, "Request Regions failed\n"), 6790 - pci_disable_device(pdev); 6791 return -ENODEV; 6792 } 6793 } ··· 6911 /* initialize the shared memory used by the NIC and the host */ 6912 if (init_shared_mem(sp)) { 6913 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", 6914 - __FUNCTION__); 6915 ret = -ENOMEM; 6916 goto mem_alloc_failed; 6917 } ··· 7048 dev->addr_len = ETH_ALEN; 7049 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); 7050 7051 /* 7052 * Initialize the tasklet status and link state flags 7053 * and the card state parameter ··· 7088 goto register_failed; 7089 } 7090 s2io_vpd_read(sp); 7091 - DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name); 7092 - DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n", 7093 - get_xena_rev_id(sp->pdev), 7094 - s2io_driver_version); 7095 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); 7096 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " 7097 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, 7098 sp->def_mac_addr[0].mac_addr[0], ··· 7393 if (ip->ihl != 5) /* IP has options */ 7394 return -1; 7395 7396 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || 7397 - !tcp->ack) { 7398 /* 7399 * Currently recognize only the ack control word and 7400 * any other control field being set would result in ··· 7553 static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, 7554 u32 tcp_len) 7555 { 7556 - struct sk_buff *tmp, *first = lro->parent; 7557 7558 first->len += tcp_len; 7559 first->data_len = lro->frags_len; 7560 skb_pull(skb, (skb->len - tcp_len)); 7561 - if ((tmp = skb_shinfo(first)->frag_list)) { 7562 - while (tmp->next) 7563 - tmp = tmp->next; 7564 - tmp->next = skb; 7565 - } 7566 else 7567 skb_shinfo(first)->frag_list = skb; 7568 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; 7569 return; 7570 }
··· 76 #include "s2io.h" 77 #include "s2io-regs.h" 78 79 + #define DRV_VERSION "2.0.15.2" 80 81 /* S2io Driver name & version. */ 82 static char s2io_driver_name[] = "Neterion"; ··· 370 END_SIGN 371 }; 372 373 + MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); 374 + MODULE_LICENSE("GPL"); 375 + MODULE_VERSION(DRV_VERSION); 376 + 377 + 378 /* Module Loadable parameters. */ 379 + S2IO_PARM_INT(tx_fifo_num, 1); 380 + S2IO_PARM_INT(rx_ring_num, 1); 381 + 382 + 383 + S2IO_PARM_INT(rx_ring_mode, 1); 384 + S2IO_PARM_INT(use_continuous_tx_intrs, 1); 385 + S2IO_PARM_INT(rmac_pause_time, 0x100); 386 + S2IO_PARM_INT(mc_pause_threshold_q0q3, 187); 387 + S2IO_PARM_INT(mc_pause_threshold_q4q7, 187); 388 + S2IO_PARM_INT(shared_splits, 0); 389 + S2IO_PARM_INT(tmac_util_period, 5); 390 + S2IO_PARM_INT(rmac_util_period, 5); 391 + S2IO_PARM_INT(bimodal, 0); 392 + S2IO_PARM_INT(l3l4hdr_size, 128); 393 + /* Frequency of Rx desc syncs expressed as power of 2 */ 394 + S2IO_PARM_INT(rxsync_frequency, 3); 395 + /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ 396 + S2IO_PARM_INT(intr_type, 0); 397 + /* Large receive offload feature */ 398 + S2IO_PARM_INT(lro, 0); 399 + /* Max pkts to be aggregated by LRO at one time. If not specified, 400 + * aggregation happens until we hit max IP pkt size(64K) 401 + */ 402 + S2IO_PARM_INT(lro_max_pkts, 0xFFFF); 403 + #ifndef CONFIG_S2IO_NAPI 404 + S2IO_PARM_INT(indicate_max_pkts, 0); 405 + #endif 406 + 407 static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 408 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 409 static unsigned int rx_ring_sz[MAX_RX_RINGS] = 410 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; 411 static unsigned int rts_frm_len[MAX_RX_RINGS] = 412 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 413 + 414 + module_param_array(tx_fifo_len, uint, NULL, 0); 415 + module_param_array(rx_ring_sz, uint, NULL, 0); 416 + module_param_array(rts_frm_len, uint, NULL, 0); 417 418 /* 419 * S2IO device table. ··· 464 size += config->tx_cfg[i].fifo_len; 465 } 466 if (size > MAX_AVAILABLE_TXDS) { 467 + DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, "); 468 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); 469 + return -EINVAL; 470 } 471 472 lst_size = (sizeof(TxD_t) * config->max_txds); ··· 547 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); 548 if (!nic->ufo_in_band_v) 549 return -ENOMEM; 550 + memset(nic->ufo_in_band_v, 0, size); 551 552 /* Allocation and initialization of RXDs in Rings */ 553 size = 0; ··· 1213 break; 1214 } 1215 1216 + /* Enable all configured Tx FIFO partitions */ 1217 val64 = readq(&bar0->tx_fifo_partition_0); 1218 val64 |= (TX_FIFO_PARTITION_EN); 1219 writeq(val64, &bar0->tx_fifo_partition_0); ··· 1650 writeq(temp64, &bar0->general_int_mask); 1651 /* 1652 * If Hercules adapter enable GPIO otherwise 1653 + * disable all PCIX, Flash, MDIO, IIC and GPIO 1654 * interrupts for now. 1655 * TODO 1656 */ ··· 2119 frag->size, PCI_DMA_TODEVICE); 2120 } 2121 } 2122 + memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds)); 2123 return(skb); 2124 } 2125 ··· 2371 skb->data = (void *) (unsigned long)tmp; 2372 skb->tail = (void *) (unsigned long)tmp; 2373 2374 + if (!(((RxD3_t*)rxdp)->Buffer0_ptr)) 2375 + ((RxD3_t*)rxdp)->Buffer0_ptr = 2376 + pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2377 PCI_DMA_FROMDEVICE); 2378 + else 2379 + pci_dma_sync_single_for_device(nic->pdev, 2380 + (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, 2381 + BUF0_LEN, PCI_DMA_FROMDEVICE); 2382 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2383 if (nic->rxd_mode == RXD_MODE_3B) { 2384 /* Two buffer mode */ ··· 2386 (nic->pdev, skb->data, dev->mtu + 4, 2387 PCI_DMA_FROMDEVICE); 2388 2389 + /* Buffer-1 will be dummy buffer. Not used */ 2390 + if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) { 2391 + ((RxD3_t*)rxdp)->Buffer1_ptr = 2392 + pci_map_single(nic->pdev, 2393 + ba->ba_1, BUF1_LEN, 2394 + PCI_DMA_FROMDEVICE); 2395 + } 2396 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2397 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2398 (dev->mtu + 4); ··· 2614 } 2615 #endif 2616 2617 + #ifdef CONFIG_NET_POLL_CONTROLLER 2618 /** 2619 + * s2io_netpoll - netpoll event handler entry point 2620 * @dev : pointer to the device structure. 2621 * Description: 2622 + * This function will be called by upper layer to check for events on the 2623 + * interface in situations where interrupts are disabled. It is used for 2624 + * specific in-kernel networking tasks, such as remote consoles and kernel 2625 + * debugging over the network (example netdump in RedHat). 2626 */ 2627 static void s2io_netpoll(struct net_device *dev) 2628 { 2629 nic_t *nic = dev->priv; 2630 mac_info_t *mac_control; 2631 struct config_param *config; 2632 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2633 + u64 val64 = 0xFFFFFFFFFFFFFFFFULL; 2634 int i; 2635 2636 disable_irq(dev->irq); ··· 2639 mac_control = &nic->mac_control; 2640 config = &nic->config; 2641 2642 writeq(val64, &bar0->rx_traffic_int); 2643 + writeq(val64, &bar0->tx_traffic_int); 2644 2645 + /* we need to free up the transmitted skbufs or else netpoll will 2646 + * run out of skbs and will fail and eventually netpoll application such 2647 + * as netdump will fail. 2648 + */ 2649 + for (i = 0; i < config->tx_fifo_num; i++) 2650 + tx_intr_handler(&mac_control->fifos[i]); 2651 + 2652 + /* check for received packet and indicate up to network */ 2653 for (i = 0; i < config->rx_ring_num; i++) 2654 rx_intr_handler(&mac_control->rings[i]); 2655 ··· 2708 /* If your are next to put index then it's FIFO full condition */ 2709 if ((get_block == put_block) && 2710 (get_info.offset + 1) == put_info.offset) { 2711 + DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); 2712 break; 2713 } 2714 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); ··· 2728 HEADER_SNAP_SIZE, 2729 PCI_DMA_FROMDEVICE); 2730 } else if (nic->rxd_mode == RXD_MODE_3B) { 2731 + pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2732 ((RxD3_t*)rxdp)->Buffer0_ptr, 2733 BUF0_LEN, PCI_DMA_FROMDEVICE); 2734 pci_unmap_single(nic->pdev, (dma_addr_t) 2735 ((RxD3_t*)rxdp)->Buffer2_ptr, 2736 dev->mtu + 4, 2737 PCI_DMA_FROMDEVICE); 2738 } else { 2739 + pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2740 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2741 PCI_DMA_FROMDEVICE); 2742 pci_unmap_single(nic->pdev, (dma_addr_t) ··· 3327 3328 /* Clear certain PCI/PCI-X fields after reset */ 3329 if (sp->device_type == XFRAME_II_DEVICE) { 3330 + /* Clear "detected parity error" bit */ 3331 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); 3332 3333 /* Clearing PCIX Ecc status register */ ··· 3528 u64 val64; 3529 int i; 3530 3531 + for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3532 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3533 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3534 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); ··· 3547 int i; 3548 3549 /* Store and display */ 3550 + for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3551 val64 = (BIT(15) | vBIT(i, 26, 6)); 3552 writeq(val64, &bar0->xmsi_access); 3553 if (wait_for_msix_trans(nic, i)) { ··· 3808 TxD_t *txdp; 3809 TxFIFO_element_t __iomem *tx_fifo; 3810 unsigned long flags; 3811 u16 vlan_tag = 0; 3812 int vlan_priority = 0; 3813 mac_info_t *mac_control; 3814 struct config_param *config; 3815 + int offload_type; 3816 3817 mac_control = &sp->mac_control; 3818 config = &sp->config; ··· 3862 return 0; 3863 } 3864 3865 + offload_type = s2io_offload_type(skb); 3866 #ifdef NETIF_F_TSO 3867 + if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 3868 txdp->Control_1 |= TXD_TCP_LSO_EN; 3869 + txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); 3870 } 3871 #endif 3872 if (skb->ip_summed == CHECKSUM_HW) { ··· 3886 } 3887 3888 frg_len = skb->len - skb->data_len; 3889 + if (offload_type == SKB_GSO_UDP) { 3890 int ufo_size; 3891 3892 + ufo_size = s2io_udp_mss(skb); 3893 ufo_size &= ~7; 3894 txdp->Control_1 |= TXD_UFO_EN; 3895 txdp->Control_1 |= TXD_UFO_MSS(ufo_size); ··· 3906 sp->ufo_in_band_v, 3907 sizeof(u64), PCI_DMA_TODEVICE); 3908 txdp++; 3909 } 3910 3911 txdp->Buffer_Pointer = pci_map_single 3912 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3913 txdp->Host_Control = (unsigned long) skb; 3914 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 3915 + if (offload_type == SKB_GSO_UDP) 3916 txdp->Control_1 |= TXD_UFO_EN; 3917 3918 frg_cnt = skb_shinfo(skb)->nr_frags; ··· 3930 (sp->pdev, frag->page, frag->page_offset, 3931 frag->size, PCI_DMA_TODEVICE); 3932 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); 3933 + if (offload_type == SKB_GSO_UDP) 3934 txdp->Control_1 |= TXD_UFO_EN; 3935 } 3936 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3937 3938 + if (offload_type == SKB_GSO_UDP) 3939 frg_cnt++; /* as Txd0 was used for inband header */ 3940 3941 tx_fifo = mac_control->tx_FIFO_start[queue]; ··· 3944 3945 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | 3946 TX_FIFO_LAST_LIST); 3947 + if (offload_type) 3948 + val64 |= TX_FIFO_SPECIAL_FUNC; 3949 3950 writeq(val64, &tx_fifo->List_Control); 3951 3952 mmiowb(); ··· 3984 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 3985 } 3986 3987 + static int s2io_chk_rx_buffers(nic_t *sp, int rng_n) 3988 + { 3989 + int rxb_size, level; 3990 + 3991 + if (!sp->lro) { 3992 + rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); 3993 + level = rx_buffer_level(sp, rxb_size, rng_n); 3994 + 3995 + if ((level == PANIC) && (!TASKLET_IN_USE)) { 3996 + int ret; 3997 + DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); 3998 + DBG_PRINT(INTR_DBG, "PANIC levels\n"); 3999 + if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { 4000 + DBG_PRINT(ERR_DBG, "Out of memory in %s", 4001 + __FUNCTION__); 4002 + clear_bit(0, (&sp->tasklet_status)); 4003 + return -1; 4004 + } 4005 + clear_bit(0, (&sp->tasklet_status)); 4006 + } else if (level == LOW) 4007 + tasklet_schedule(&sp->task); 4008 + 4009 + } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { 4010 + DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name); 4011 + DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); 4012 + } 4013 + return 0; 4014 + } 4015 + 4016 static irqreturn_t 4017 s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) 4018 { 4019 struct net_device *dev = (struct net_device *) dev_id; 4020 nic_t *sp = dev->priv; 4021 int i; 4022 mac_info_t *mac_control; 4023 struct config_param *config; 4024 ··· 4012 * reallocate the buffers from the interrupt handler itself, 4013 * else schedule a tasklet to reallocate the buffers. 4014 */ 4015 + for (i = 0; i < config->rx_ring_num; i++) 4016 + s2io_chk_rx_buffers(sp, i); 4017 4018 atomic_dec(&sp->isr_cnt); 4019 return IRQ_HANDLED; ··· 4051 { 4052 ring_info_t *ring = (ring_info_t *)dev_id; 4053 nic_t *sp = ring->nic; 4054 4055 atomic_inc(&sp->isr_cnt); 4056 + 4057 rx_intr_handler(ring); 4058 + s2io_chk_rx_buffers(sp, ring->ring_no); 4059 4060 atomic_dec(&sp->isr_cnt); 4061 return IRQ_HANDLED; 4062 } 4063 ··· 4248 * else schedule a tasklet to reallocate the buffers. 4249 */ 4250 #ifndef CONFIG_S2IO_NAPI 4251 + for (i = 0; i < config->rx_ring_num; i++) 4252 + s2io_chk_rx_buffers(sp, i); 4253 #endif 4254 writeq(org_mask, &bar0->general_int_mask); 4255 atomic_dec(&sp->isr_cnt); ··· 4308 if (cnt == 5) 4309 break; /* Updt failed */ 4310 } while(1); 4311 + } else { 4312 + memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t)); 4313 } 4314 } 4315 ··· 4942 } 4943 static void s2io_vpd_read(nic_t *nic) 4944 { 4945 + u8 *vpd_data; 4946 + u8 data; 4947 int i=0, cnt, fail = 0; 4948 int vpd_addr = 0x80; 4949 ··· 4954 strcpy(nic->product_name, "Xframe I 10GbE network adapter"); 4955 vpd_addr = 0x50; 4956 } 4957 + 4958 + vpd_data = kmalloc(256, GFP_KERNEL); 4959 + if (!vpd_data) 4960 + return; 4961 4962 for (i = 0; i < 256; i +=4 ) { 4963 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); ··· 4977 memset(nic->product_name, 0, vpd_data[1]); 4978 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 4979 } 4980 + kfree(vpd_data); 4981 } 4982 4983 /** ··· 5295 else 5296 *data = 0; 5297 5298 + return *data; 5299 } 5300 5301 /** ··· 5753 return 0; 5754 } 5755 5756 + static u32 s2io_ethtool_op_get_tso(struct net_device *dev) 5757 + { 5758 + return (dev->features & NETIF_F_TSO) != 0; 5759 + } 5760 + static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data) 5761 + { 5762 + if (data) 5763 + dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); 5764 + else 5765 + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 5766 + 5767 + return 0; 5768 + } 5769 5770 static struct ethtool_ops netdev_ethtool_ops = { 5771 .get_settings = s2io_ethtool_gset, ··· 5773 .get_sg = ethtool_op_get_sg, 5774 .set_sg = ethtool_op_set_sg, 5775 #ifdef NETIF_F_TSO 5776 + .get_tso = s2io_ethtool_op_get_tso, 5777 + .set_tso = s2io_ethtool_op_set_tso, 5778 #endif 5779 .get_ufo = ethtool_op_get_ufo, 5780 .set_ufo = ethtool_op_set_ufo, ··· 6337 s2io_set_multicast(dev); 6338 6339 if (sp->lro) { 6340 + /* Initialize max aggregatable pkts per session based on MTU */ 6341 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; 6342 /* Check if we can use(if specified) user provided value */ 6343 if (lro_max_pkts < sp->lro_max_aggr_per_sess) ··· 6438 * @cksum : FCS checksum of the frame. 6439 * @ring_no : the ring from which this RxD was extracted. 6440 * Description: 6441 + * This function is called by the Rx interrupt serivce routine to perform 6442 * some OS related operations on the SKB before passing it to the upper 6443 * layers. It mainly checks if the checksum is OK, if so adds it to the 6444 * SKBs cksum variable, increments the Rx packet count and passes the SKB ··· 6698 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 6699 } 6700 6701 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) 6702 { 6703 if ( tx_fifo_num > 8) { ··· 6832 } 6833 if (dev_intr_type != MSI_X) { 6834 if (pci_request_regions(pdev, s2io_driver_name)) { 6835 + DBG_PRINT(ERR_DBG, "Request Regions failed\n"); 6836 + pci_disable_device(pdev); 6837 return -ENODEV; 6838 } 6839 } ··· 6957 /* initialize the shared memory used by the NIC and the host */ 6958 if (init_shared_mem(sp)) { 6959 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", 6960 + dev->name); 6961 ret = -ENOMEM; 6962 goto mem_alloc_failed; 6963 } ··· 7094 dev->addr_len = ETH_ALEN; 7095 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); 7096 7097 + /* reset Nic and bring it to known state */ 7098 + s2io_reset(sp); 7099 + 7100 /* 7101 * Initialize the tasklet status and link state flags 7102 * and the card state parameter ··· 7131 goto register_failed; 7132 } 7133 s2io_vpd_read(sp); 7134 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); 7135 + DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name, 7136 + sp->product_name, get_xena_rev_id(sp->pdev)); 7137 + DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, 7138 + s2io_driver_version); 7139 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " 7140 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, 7141 sp->def_mac_addr[0].mac_addr[0], ··· 7436 if (ip->ihl != 5) /* IP has options */ 7437 return -1; 7438 7439 + /* If we see CE codepoint in IP header, packet is not mergeable */ 7440 + if (INET_ECN_is_ce(ipv4_get_dsfield(ip))) 7441 + return -1; 7442 + 7443 + /* If we see ECE or CWR flags in TCP header, packet is not mergeable */ 7444 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || 7445 + tcp->ece || tcp->cwr || !tcp->ack) { 7446 /* 7447 * Currently recognize only the ack control word and 7448 * any other control field being set would result in ··· 7591 static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, 7592 u32 tcp_len) 7593 { 7594 + struct sk_buff *first = lro->parent; 7595 7596 first->len += tcp_len; 7597 first->data_len = lro->frags_len; 7598 skb_pull(skb, (skb->len - tcp_len)); 7599 + if (skb_shinfo(first)->frag_list) 7600 + lro->last_frag->next = skb; 7601 else 7602 skb_shinfo(first)->frag_list = skb; 7603 + lro->last_frag = skb; 7604 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; 7605 return; 7606 }
+10
drivers/net/s2io.h
··· 719 /* Data structure to represent a LRO session */ 720 typedef struct lro { 721 struct sk_buff *parent; 722 u8 *l2h; 723 struct iphdr *iph; 724 struct tcphdr *tcph; ··· 1012 static void queue_rx_frame(struct sk_buff *skb); 1013 static void update_L3L4_header(nic_t *sp, lro_t *lro); 1014 static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); 1015 #endif /* _S2IO_H */
··· 719 /* Data structure to represent a LRO session */ 720 typedef struct lro { 721 struct sk_buff *parent; 722 + struct sk_buff *last_frag; 723 u8 *l2h; 724 struct iphdr *iph; 725 struct tcphdr *tcph; ··· 1011 static void queue_rx_frame(struct sk_buff *skb); 1012 static void update_L3L4_header(nic_t *sp, lro_t *lro); 1013 static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); 1014 + 1015 + #define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size 1016 + #define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size 1017 + #define s2io_offload_type(skb) skb_shinfo(skb)->gso_type 1018 + 1019 + #define S2IO_PARM_INT(X, def_val) \ 1020 + static unsigned int X = def_val;\ 1021 + module_param(X , uint, 0); 1022 + 1023 #endif /* _S2IO_H */
+2 -2
drivers/net/wireless/zd1211rw/zd_chip.c
··· 797 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 }, 798 { CR_ZD1211_RETRY_MAX, 0x2 }, 799 { CR_SNIFFER_ON, 0 }, 800 - { CR_RX_FILTER, AP_RX_FILTER }, 801 { CR_GROUP_HASH_P1, 0x00 }, 802 { CR_GROUP_HASH_P2, 0x80000000 }, 803 { CR_REG1, 0xa4 }, ··· 844 { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, 845 { CR_ZD1211B_TXOP, 0x01800824 }, 846 { CR_SNIFFER_ON, 0 }, 847 - { CR_RX_FILTER, AP_RX_FILTER }, 848 { CR_GROUP_HASH_P1, 0x00 }, 849 { CR_GROUP_HASH_P2, 0x80000000 }, 850 { CR_REG1, 0xa4 },
··· 797 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 }, 798 { CR_ZD1211_RETRY_MAX, 0x2 }, 799 { CR_SNIFFER_ON, 0 }, 800 + { CR_RX_FILTER, STA_RX_FILTER }, 801 { CR_GROUP_HASH_P1, 0x00 }, 802 { CR_GROUP_HASH_P2, 0x80000000 }, 803 { CR_REG1, 0xa4 }, ··· 844 { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, 845 { CR_ZD1211B_TXOP, 0x01800824 }, 846 { CR_SNIFFER_ON, 0 }, 847 + { CR_RX_FILTER, STA_RX_FILTER }, 848 { CR_GROUP_HASH_P1, 0x00 }, 849 { CR_GROUP_HASH_P2, 0x80000000 }, 850 { CR_REG1, 0xa4 },
+6 -4
drivers/net/wireless/zd1211rw/zd_chip.h
··· 461 462 #define CR_RX_FILTER CTL_REG(0x068c) 463 #define RX_FILTER_ASSOC_RESPONSE 0x0002 464 #define RX_FILTER_PROBE_RESPONSE 0x0020 465 #define RX_FILTER_BEACON 0x0100 466 #define RX_FILTER_AUTH 0x0800 467 - /* Sniff modus sets filter to 0xfffff */ 468 469 #define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690) 470 #define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694) ··· 550 #define CR_ZD1211B_AIFS_CTL2 CTL_REG(0x0b14) 551 #define CR_ZD1211B_TXOP CTL_REG(0x0b20) 552 #define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) 553 - 554 - #define AP_RX_FILTER 0x0400feff 555 - #define STA_RX_FILTER 0x0000ffff 556 557 #define CWIN_SIZE 0x007f043f 558
··· 461 462 #define CR_RX_FILTER CTL_REG(0x068c) 463 #define RX_FILTER_ASSOC_RESPONSE 0x0002 464 + #define RX_FILTER_REASSOC_RESPONSE 0x0008 465 #define RX_FILTER_PROBE_RESPONSE 0x0020 466 #define RX_FILTER_BEACON 0x0100 467 + #define RX_FILTER_DISASSOC 0x0400 468 #define RX_FILTER_AUTH 0x0800 469 + #define AP_RX_FILTER 0x0400feff 470 + #define STA_RX_FILTER 0x0000ffff 471 + 472 + /* Monitor mode sets filter to 0xfffff */ 473 474 #define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690) 475 #define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694) ··· 545 #define CR_ZD1211B_AIFS_CTL2 CTL_REG(0x0b14) 546 #define CR_ZD1211B_TXOP CTL_REG(0x0b20) 547 #define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) 548 549 #define CWIN_SIZE 0x007f043f 550
+8 -8
drivers/net/wireless/zd1211rw/zd_mac.c
··· 108 if (r) 109 goto disable_int; 110 111 - r = zd_set_encryption_type(chip, NO_WEP); 112 if (r) 113 goto disable_int; 114 ··· 138 { 139 struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); 140 struct zd_ioreq32 ioreqs[3] = { 141 - { CR_RX_FILTER, RX_FILTER_BEACON|RX_FILTER_PROBE_RESPONSE| 142 - RX_FILTER_AUTH|RX_FILTER_ASSOC_RESPONSE }, 143 { CR_SNIFFER_ON, 0U }, 144 - { CR_ENCRYPTION_TYPE, NO_WEP }, 145 }; 146 147 if (ieee->iw_mode == IW_MODE_MONITOR) { ··· 713 struct zd_rt_hdr { 714 struct ieee80211_radiotap_header rt_hdr; 715 u8 rt_flags; 716 u16 rt_channel; 717 u16 rt_chbitmask; 718 - u16 rt_rate; 719 - }; 720 721 static void fill_rt_header(void *buffer, struct zd_mac *mac, 722 const struct ieee80211_rx_stats *stats, ··· 735 if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256)) 736 hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP; 737 738 /* FIXME: 802.11a */ 739 hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz( 740 _zd_chip_get_channel(&mac->chip))); 741 hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ | 742 ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) == 743 ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK)); 744 - 745 - hdr->rt_rate = stats->rate / 5; 746 } 747 748 /* Returns 1 if the data packet is for us and 0 otherwise. */
··· 108 if (r) 109 goto disable_int; 110 111 + /* We must inform the device that we are doing encryption/decryption in 112 + * software at the moment. */ 113 + r = zd_set_encryption_type(chip, ENC_SNIFFER); 114 if (r) 115 goto disable_int; 116 ··· 136 { 137 struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); 138 struct zd_ioreq32 ioreqs[3] = { 139 + { CR_RX_FILTER, STA_RX_FILTER }, 140 { CR_SNIFFER_ON, 0U }, 141 }; 142 143 if (ieee->iw_mode == IW_MODE_MONITOR) { ··· 713 struct zd_rt_hdr { 714 struct ieee80211_radiotap_header rt_hdr; 715 u8 rt_flags; 716 + u8 rt_rate; 717 u16 rt_channel; 718 u16 rt_chbitmask; 719 + } __attribute__((packed)); 720 721 static void fill_rt_header(void *buffer, struct zd_mac *mac, 722 const struct ieee80211_rx_stats *stats, ··· 735 if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256)) 736 hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP; 737 738 + hdr->rt_rate = stats->rate / 5; 739 + 740 /* FIXME: 802.11a */ 741 hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz( 742 _zd_chip_get_channel(&mac->chip))); 743 hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ | 744 ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) == 745 ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK)); 746 } 747 748 /* Returns 1 if the data packet is for us and 0 otherwise. */
+3 -4
drivers/net/wireless/zd1211rw/zd_usb.c
··· 323 { 324 struct zd_usb_interrupt *intr = &usb->intr; 325 326 - ZD_ASSERT(in_interrupt()); 327 spin_lock(&intr->lock); 328 intr->read_regs_enabled = 0; 329 spin_unlock(&intr->lock); ··· 544 * be padded. Unaligned access might also happen if the length_info 545 * structure is not present. 546 */ 547 - if (get_unaligned(&length_info->tag) == RX_LENGTH_INFO_TAG) { 548 unsigned int l, k, n; 549 for (i = 0, l = 0;; i++) { 550 - k = le16_to_cpu(get_unaligned( 551 - &length_info->length[i])); 552 n = l+k; 553 if (n > length) 554 return;
··· 323 { 324 struct zd_usb_interrupt *intr = &usb->intr; 325 326 spin_lock(&intr->lock); 327 intr->read_regs_enabled = 0; 328 spin_unlock(&intr->lock); ··· 545 * be padded. Unaligned access might also happen if the length_info 546 * structure is not present. 547 */ 548 + if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG)) 549 + { 550 unsigned int l, k, n; 551 for (i = 0, l = 0;; i++) { 552 + k = le16_to_cpu(get_unaligned(&length_info->length[i])); 553 n = l+k; 554 if (n > length) 555 return;