qla3xxx: bugfix: Fixed jumbo frame handling for 3032 chip.

The scatter/gather lists were not being build correctly. When
large frames spanned several buffers the chip would panic.

Signed-off-by: Ron Mercer <ron.mercer@qlogic.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>

authored by Ron Mercer and committed by Jeff Garzik 3e71f6dd 97916330

+64 -36
+64 -35
drivers/net/qla3xxx.c
··· 2122 2123 if (ip) { 2124 if (ip->protocol == IPPROTO_TCP) { 2125 - mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC; 2126 mac_iocb_ptr->ip_hdr_off = offset; 2127 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2128 } else if (ip->protocol == IPPROTO_UDP) { 2129 - mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC; 2130 mac_iocb_ptr->ip_hdr_off = offset; 2131 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2132 } ··· 2136 } 2137 2138 /* 2139 - * The difference between 3022 and 3032 sends: 2140 - * 3022 only supports a simple single segment transmission. 2141 - * 3032 supports checksumming and scatter/gather lists (fragments). 2142 - * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2143 - * in the IOCB plus a chain of outbound address lists (OAL) that 2144 - * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2145 - * will used to point to an OAL when more ALP entries are required. 2146 - * The IOCB is always the top of the chain followed by one or more 2147 - * OALs (when necessary). 2148 */ 2149 - static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2150 { 2151 - struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2152 - struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2153 - struct ql_tx_buf_cb *tx_cb; 2154 - u32 tot_len = skb->len; 2155 struct oal *oal; 2156 struct oal_entry *oal_entry; 2157 - int len; 2158 - struct ob_mac_iocb_req *mac_iocb_ptr; 2159 u64 map; 2160 int seg_cnt, seg = 0; 2161 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2162 2163 - if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2164 - if (!netif_queue_stopped(ndev)) 2165 - netif_stop_queue(ndev); 2166 - return NETDEV_TX_BUSY; 2167 - } 2168 - tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; 2169 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags)); 2170 if(seg_cnt == -1) { 2171 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2172 - return NETDEV_TX_OK; 2173 - 2174 } 2175 - mac_iocb_ptr = tx_cb->queue_entry; 2176 - mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2177 - mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2178 - mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2179 - mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); 2180 - tx_cb->skb = skb; 2181 - if (skb->ip_summed == CHECKSUM_PARTIAL) 2182 - ql_hw_csum_setup(skb, mac_iocb_ptr); 2183 - len = skb_headlen(skb); 2184 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2185 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2186 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); ··· 2215 oal_entry->len = 2216 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); 2217 } 2218 wmb(); 2219 qdev->req_producer_index++; 2220 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
··· 2122 2123 if (ip) { 2124 if (ip->protocol == IPPROTO_TCP) { 2125 + mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | 2126 + OB_3032MAC_IOCB_REQ_IC; 2127 mac_iocb_ptr->ip_hdr_off = offset; 2128 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2129 } else if (ip->protocol == IPPROTO_UDP) { 2130 + mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | 2131 + OB_3032MAC_IOCB_REQ_IC; 2132 mac_iocb_ptr->ip_hdr_off = offset; 2133 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2134 } ··· 2134 } 2135 2136 /* 2137 + * Map the buffers for this transmit. This will return 2138 + * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 2139 */ 2140 + static int ql_send_map(struct ql3_adapter *qdev, 2141 + struct ob_mac_iocb_req *mac_iocb_ptr, 2142 + struct ql_tx_buf_cb *tx_cb, 2143 + struct sk_buff *skb) 2144 { 2145 struct oal *oal; 2146 struct oal_entry *oal_entry; 2147 + int len = skb_headlen(skb); 2148 u64 map; 2149 int seg_cnt, seg = 0; 2150 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2151 2152 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags)); 2153 if(seg_cnt == -1) { 2154 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2155 + return NETDEV_TX_BUSY; 2156 } 2157 + /* 2158 + * Map the skb buffer first. 2159 + */ 2160 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2161 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2162 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); ··· 2235 oal_entry->len = 2236 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); 2237 } 2238 + return NETDEV_TX_OK; 2239 + } 2240 + 2241 + /* 2242 + * The difference between 3022 and 3032 sends: 2243 + * 3022 only supports a simple single segment transmission. 2244 + * 3032 supports checksumming and scatter/gather lists (fragments). 2245 + * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2246 + * in the IOCB plus a chain of outbound address lists (OAL) that 2247 + * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2248 + * will used to point to an OAL when more ALP entries are required. 2249 + * The IOCB is always the top of the chain followed by one or more 2250 + * OALs (when necessary). 2251 + */ 2252 + static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2253 + { 2254 + struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2255 + struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2256 + struct ql_tx_buf_cb *tx_cb; 2257 + u32 tot_len = skb->len; 2258 + struct ob_mac_iocb_req *mac_iocb_ptr; 2259 + 2260 + if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2261 + if (!netif_queue_stopped(ndev)) 2262 + netif_stop_queue(ndev); 2263 + return NETDEV_TX_BUSY; 2264 + } 2265 + 2266 + tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; 2267 + if((tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags))) == -1) { 2268 + printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2269 + return NETDEV_TX_OK; 2270 + } 2271 + 2272 + mac_iocb_ptr = tx_cb->queue_entry; 2273 + mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2274 + mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; 2275 + mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2276 + mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2277 + mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); 2278 + tx_cb->skb = skb; 2279 + if (skb->ip_summed == CHECKSUM_PARTIAL) 2280 + ql_hw_csum_setup(skb, mac_iocb_ptr); 2281 + 2282 + if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { 2283 + printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); 2284 + return NETDEV_TX_BUSY; 2285 + } 2286 + 2287 wmb(); 2288 qdev->req_producer_index++; 2289 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
-1
drivers/net/qla3xxx.h
··· 1094 u32 len; 1095 #define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */ 1096 #define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */ 1097 - u32 reserved; 1098 }; 1099 1100 struct oal {
··· 1094 u32 len; 1095 #define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */ 1096 #define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */ 1097 }; 1098 1099 struct oal {