Merge branch 'upstream-fixes' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

+45 -75
-3
drivers/net/e1000/e1000.h
··· 225 225 struct e1000_ps_page *ps_page; 226 226 struct e1000_ps_page_dma *ps_page_dma; 227 227 228 - struct sk_buff *rx_skb_top; 229 - struct sk_buff *rx_skb_prev; 230 - 231 228 /* cpu for rx queue */ 232 229 int cpu; 233 230
+45 -72
drivers/net/e1000/e1000_main.c
··· 103 103 #else 104 104 #define DRIVERNAPI "-NAPI" 105 105 #endif 106 - #define DRV_VERSION "6.3.9-k2"DRIVERNAPI 106 + #define DRV_VERSION "6.3.9-k4"DRIVERNAPI 107 107 char e1000_driver_version[] = DRV_VERSION; 108 108 static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 109 109 ··· 1635 1635 1636 1636 rxdr->next_to_clean = 0; 1637 1637 rxdr->next_to_use = 0; 1638 - rxdr->rx_skb_top = NULL; 1639 - rxdr->rx_skb_prev = NULL; 1640 1638 1641 1639 return 0; 1642 1640 } ··· 1711 1713 rctl |= adapter->rx_buffer_len << 0x11; 1712 1714 } else { 1713 1715 rctl &= ~E1000_RCTL_SZ_4096; 1714 - rctl &= ~E1000_RCTL_BSEX; 1715 - rctl |= E1000_RCTL_SZ_2048; 1716 + rctl |= E1000_RCTL_BSEX; 1717 + switch (adapter->rx_buffer_len) { 1718 + case E1000_RXBUFFER_2048: 1719 + default: 1720 + rctl |= E1000_RCTL_SZ_2048; 1721 + rctl &= ~E1000_RCTL_BSEX; 1722 + break; 1723 + case E1000_RXBUFFER_4096: 1724 + rctl |= E1000_RCTL_SZ_4096; 1725 + break; 1726 + case E1000_RXBUFFER_8192: 1727 + rctl |= E1000_RCTL_SZ_8192; 1728 + break; 1729 + case E1000_RXBUFFER_16384: 1730 + rctl |= E1000_RCTL_SZ_16384; 1731 + break; 1732 + } 1716 1733 } 1717 1734 1718 1735 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT ··· 2119 2106 ps_page->ps_page[j] = NULL; 2120 2107 } 2121 2108 } 2122 - 2123 - /* there also may be some cached data in our adapter */ 2124 - if (rx_ring->rx_skb_top) { 2125 - dev_kfree_skb(rx_ring->rx_skb_top); 2126 - 2127 - /* rx_skb_prev will be wiped out by rx_skb_top */ 2128 - rx_ring->rx_skb_top = NULL; 2129 - rx_ring->rx_skb_prev = NULL; 2130 - } 2131 - 2132 2109 2133 2110 size = sizeof(struct e1000_buffer) * rx_ring->count; 2134 2111 memset(rx_ring->buffer_info, 0, size); ··· 3109 3106 break; 3110 3107 } 3111 3108 3112 - /* since the driver code now supports splitting a packet across 3113 - * multiple descriptors, most of the fifo related limitations on 3114 - * jumbo frame traffic have gone away. 3115 - * simply use 2k descriptors for everything. 3116 - * 3117 - * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3118 - * means we reserve 2 more, this pushes us to allocate from the next 3119 - * larger slab size 3120 - * i.e. RXBUFFER_2048 --> size-4096 slab */ 3121 3109 3122 - /* recent hardware supports 1KB granularity */ 3123 3110 if (adapter->hw.mac_type > e1000_82547_rev_2) { 3124 - adapter->rx_buffer_len = 3125 - ((max_frame < E1000_RXBUFFER_2048) ? 3126 - max_frame : E1000_RXBUFFER_2048); 3111 + adapter->rx_buffer_len = max_frame; 3127 3112 E1000_ROUNDUP(adapter->rx_buffer_len, 1024); 3128 - } else 3129 - adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3113 + } else { 3114 + if(unlikely((adapter->hw.mac_type < e1000_82543) && 3115 + (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { 3116 + DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 3117 + "on 82542\n"); 3118 + return -EINVAL; 3119 + } else { 3120 + if(max_frame <= E1000_RXBUFFER_2048) 3121 + adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3122 + else if(max_frame <= E1000_RXBUFFER_4096) 3123 + adapter->rx_buffer_len = E1000_RXBUFFER_4096; 3124 + else if(max_frame <= E1000_RXBUFFER_8192) 3125 + adapter->rx_buffer_len = E1000_RXBUFFER_8192; 3126 + else if(max_frame <= E1000_RXBUFFER_16384) 3127 + adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3128 + } 3129 + } 3130 3130 3131 3131 netdev->mtu = new_mtu; 3132 3132 ··· 3626 3620 uint8_t last_byte; 3627 3621 unsigned int i; 3628 3622 int cleaned_count = 0; 3629 - boolean_t cleaned = FALSE, multi_descriptor = FALSE; 3623 + boolean_t cleaned = FALSE; 3630 3624 3631 3625 i = rx_ring->next_to_clean; 3632 3626 rx_desc = E1000_RX_DESC(*rx_ring, i); ··· 3658 3652 3659 3653 length = le16_to_cpu(rx_desc->length); 3660 3654 3661 - skb_put(skb, length); 3662 - 3663 - if (!(status & E1000_RXD_STAT_EOP)) { 3664 - if (!rx_ring->rx_skb_top) { 3665 - rx_ring->rx_skb_top = skb; 3666 - rx_ring->rx_skb_top->len = length; 3667 - rx_ring->rx_skb_prev = skb; 3668 - } else { 3669 - if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) { 3670 - rx_ring->rx_skb_prev->next = skb; 3671 - skb->prev = rx_ring->rx_skb_prev; 3672 - } else { 3673 - skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb; 3674 - } 3675 - rx_ring->rx_skb_prev = skb; 3676 - rx_ring->rx_skb_top->data_len += length; 3677 - } 3655 + if (unlikely(!(status & E1000_RXD_STAT_EOP))) { 3656 + /* All receives must fit into a single buffer */ 3657 + E1000_DBG("%s: Receive packet consumed multiple" 3658 + " buffers\n", netdev->name); 3659 + dev_kfree_skb_irq(skb); 3678 3660 goto next_desc; 3679 - } else { 3680 - if (rx_ring->rx_skb_top) { 3681 - if (skb_shinfo(rx_ring->rx_skb_top) 3682 - ->frag_list) { 3683 - rx_ring->rx_skb_prev->next = skb; 3684 - skb->prev = rx_ring->rx_skb_prev; 3685 - } else 3686 - skb_shinfo(rx_ring->rx_skb_top) 3687 - ->frag_list = skb; 3688 - 3689 - rx_ring->rx_skb_top->data_len += length; 3690 - rx_ring->rx_skb_top->len += 3691 - rx_ring->rx_skb_top->data_len; 3692 - 3693 - skb = rx_ring->rx_skb_top; 3694 - multi_descriptor = TRUE; 3695 - rx_ring->rx_skb_top = NULL; 3696 - rx_ring->rx_skb_prev = NULL; 3697 - } 3698 3661 } 3699 3662 3700 3663 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { ··· 3687 3712 * performance for small packets with large amounts 3688 3713 * of reassembly being done in the stack */ 3689 3714 #define E1000_CB_LENGTH 256 3690 - if ((length < E1000_CB_LENGTH) && 3691 - !rx_ring->rx_skb_top && 3692 - /* or maybe (status & E1000_RXD_STAT_EOP) && */ 3693 - !multi_descriptor) { 3715 + if (length < E1000_CB_LENGTH) { 3694 3716 struct sk_buff *new_skb = 3695 3717 dev_alloc_skb(length + NET_IP_ALIGN); 3696 3718 if (new_skb) { ··· 3701 3729 skb = new_skb; 3702 3730 skb_put(skb, length); 3703 3731 } 3704 - } 3732 + } else 3733 + skb_put(skb, length); 3705 3734 3706 3735 /* end copybreak code */ 3707 3736