Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 17431928194b36a0f88082df875e2e036da7fddf 1171 lines 32 kB view raw
1/**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2009 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11#include <linux/pci.h> 12#include <linux/tcp.h> 13#include <linux/ip.h> 14#include <linux/in.h> 15#include <linux/ipv6.h> 16#include <linux/slab.h> 17#include <net/ipv6.h> 18#include <linux/if_ether.h> 19#include <linux/highmem.h> 20#include "net_driver.h" 21#include "efx.h" 22#include "nic.h" 23#include "workarounds.h" 24 25/* 26 * TX descriptor ring full threshold 27 * 28 * The tx_queue descriptor ring fill-level must fall below this value 29 * before we restart the netif queue 30 */ 31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u) 32 33/* We need to be able to nest calls to netif_tx_stop_queue(), partly 34 * because of the 2 hardware queues associated with each core queue, 35 * but also so that we can inhibit TX for reasons other than a full 36 * hardware queue. */ 37void efx_stop_queue(struct efx_channel *channel) 38{ 39 struct efx_nic *efx = channel->efx; 40 41 if (!channel->tx_queue) 42 return; 43 44 spin_lock_bh(&channel->tx_stop_lock); 45 EFX_TRACE(efx, "stop TX queue\n"); 46 47 atomic_inc(&channel->tx_stop_count); 48 netif_tx_stop_queue( 49 netdev_get_tx_queue( 50 efx->net_dev, 51 channel->tx_queue->queue / EFX_TXQ_TYPES)); 52 53 spin_unlock_bh(&channel->tx_stop_lock); 54} 55 56/* Decrement core TX queue stop count and wake it if the count is 0 */ 57void efx_wake_queue(struct efx_channel *channel) 58{ 59 struct efx_nic *efx = channel->efx; 60 61 if (!channel->tx_queue) 62 return; 63 64 local_bh_disable(); 65 if (atomic_dec_and_lock(&channel->tx_stop_count, 66 &channel->tx_stop_lock)) { 67 EFX_TRACE(efx, "waking TX queue\n"); 68 netif_tx_wake_queue( 69 netdev_get_tx_queue( 70 efx->net_dev, 71 channel->tx_queue->queue / EFX_TXQ_TYPES)); 72 spin_unlock(&channel->tx_stop_lock); 73 } 74 local_bh_enable(); 75} 76 77static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 78 struct efx_tx_buffer *buffer) 79{ 80 if (buffer->unmap_len) { 81 struct pci_dev *pci_dev = tx_queue->efx->pci_dev; 82 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - 83 buffer->unmap_len); 84 if (buffer->unmap_single) 85 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, 86 PCI_DMA_TODEVICE); 87 else 88 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, 89 PCI_DMA_TODEVICE); 90 buffer->unmap_len = 0; 91 buffer->unmap_single = false; 92 } 93 94 if (buffer->skb) { 95 dev_kfree_skb_any((struct sk_buff *) buffer->skb); 96 buffer->skb = NULL; 97 EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x " 98 "complete\n", tx_queue->queue, read_ptr); 99 } 100} 101 102/** 103 * struct efx_tso_header - a DMA mapped buffer for packet headers 104 * @next: Linked list of free ones. 105 * The list is protected by the TX queue lock. 106 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. 107 * @dma_addr: The DMA address of the header below. 108 * 109 * This controls the memory used for a TSO header. Use TSOH_DATA() 110 * to find the packet header data. Use TSOH_SIZE() to calculate the 111 * total size required for a given packet header length. TSO headers 112 * in the free list are exactly %TSOH_STD_SIZE bytes in size. 113 */ 114struct efx_tso_header { 115 union { 116 struct efx_tso_header *next; 117 size_t unmap_len; 118 }; 119 dma_addr_t dma_addr; 120}; 121 122static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 123 struct sk_buff *skb); 124static void efx_fini_tso(struct efx_tx_queue *tx_queue); 125static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, 126 struct efx_tso_header *tsoh); 127 128static void efx_tsoh_free(struct efx_tx_queue *tx_queue, 129 struct efx_tx_buffer *buffer) 130{ 131 if (buffer->tsoh) { 132 if (likely(!buffer->tsoh->unmap_len)) { 133 buffer->tsoh->next = tx_queue->tso_headers_free; 134 tx_queue->tso_headers_free = buffer->tsoh; 135 } else { 136 efx_tsoh_heap_free(tx_queue, buffer->tsoh); 137 } 138 buffer->tsoh = NULL; 139 } 140} 141 142 143static inline unsigned 144efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) 145{ 146 /* Depending on the NIC revision, we can use descriptor 147 * lengths up to 8K or 8K-1. However, since PCI Express 148 * devices must split read requests at 4K boundaries, there is 149 * little benefit from using descriptors that cross those 150 * boundaries and we keep things simple by not doing so. 151 */ 152 unsigned len = (~dma_addr & 0xfff) + 1; 153 154 /* Work around hardware bug for unaligned buffers. */ 155 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) 156 len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); 157 158 return len; 159} 160 161/* 162 * Add a socket buffer to a TX queue 163 * 164 * This maps all fragments of a socket buffer for DMA and adds them to 165 * the TX queue. The queue's insert pointer will be incremented by 166 * the number of fragments in the socket buffer. 167 * 168 * If any DMA mapping fails, any mapped fragments will be unmapped, 169 * the queue's insert pointer will be restored to its original value. 170 * 171 * This function is split out from efx_hard_start_xmit to allow the 172 * loopback test to direct packets via specific TX queues. 173 * 174 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 175 * You must hold netif_tx_lock() to call this function. 176 */ 177netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 178{ 179 struct efx_nic *efx = tx_queue->efx; 180 struct pci_dev *pci_dev = efx->pci_dev; 181 struct efx_tx_buffer *buffer; 182 skb_frag_t *fragment; 183 struct page *page; 184 int page_offset; 185 unsigned int len, unmap_len = 0, fill_level, insert_ptr; 186 dma_addr_t dma_addr, unmap_addr = 0; 187 unsigned int dma_len; 188 bool unmap_single; 189 int q_space, i = 0; 190 netdev_tx_t rc = NETDEV_TX_OK; 191 192 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 193 194 if (skb_shinfo(skb)->gso_size) 195 return efx_enqueue_skb_tso(tx_queue, skb); 196 197 /* Get size of the initial fragment */ 198 len = skb_headlen(skb); 199 200 /* Pad if necessary */ 201 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { 202 EFX_BUG_ON_PARANOID(skb->data_len); 203 len = 32 + 1; 204 if (skb_pad(skb, len - skb->len)) 205 return NETDEV_TX_OK; 206 } 207 208 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 209 q_space = EFX_TXQ_MASK - 1 - fill_level; 210 211 /* Map for DMA. Use pci_map_single rather than pci_map_page 212 * since this is more efficient on machines with sparse 213 * memory. 214 */ 215 unmap_single = true; 216 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); 217 218 /* Process all fragments */ 219 while (1) { 220 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) 221 goto pci_err; 222 223 /* Store fields for marking in the per-fragment final 224 * descriptor */ 225 unmap_len = len; 226 unmap_addr = dma_addr; 227 228 /* Add to TX queue, splitting across DMA boundaries */ 229 do { 230 if (unlikely(q_space-- <= 0)) { 231 /* It might be that completions have 232 * happened since the xmit path last 233 * checked. Update the xmit path's 234 * copy of read_count. 235 */ 236 ++tx_queue->stopped; 237 /* This memory barrier protects the 238 * change of stopped from the access 239 * of read_count. */ 240 smp_mb(); 241 tx_queue->old_read_count = 242 *(volatile unsigned *) 243 &tx_queue->read_count; 244 fill_level = (tx_queue->insert_count 245 - tx_queue->old_read_count); 246 q_space = EFX_TXQ_MASK - 1 - fill_level; 247 if (unlikely(q_space-- <= 0)) 248 goto stop; 249 smp_mb(); 250 --tx_queue->stopped; 251 } 252 253 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; 254 buffer = &tx_queue->buffer[insert_ptr]; 255 efx_tsoh_free(tx_queue, buffer); 256 EFX_BUG_ON_PARANOID(buffer->tsoh); 257 EFX_BUG_ON_PARANOID(buffer->skb); 258 EFX_BUG_ON_PARANOID(buffer->len); 259 EFX_BUG_ON_PARANOID(!buffer->continuation); 260 EFX_BUG_ON_PARANOID(buffer->unmap_len); 261 262 dma_len = efx_max_tx_len(efx, dma_addr); 263 if (likely(dma_len >= len)) 264 dma_len = len; 265 266 /* Fill out per descriptor fields */ 267 buffer->len = dma_len; 268 buffer->dma_addr = dma_addr; 269 len -= dma_len; 270 dma_addr += dma_len; 271 ++tx_queue->insert_count; 272 } while (len); 273 274 /* Transfer ownership of the unmapping to the final buffer */ 275 buffer->unmap_single = unmap_single; 276 buffer->unmap_len = unmap_len; 277 unmap_len = 0; 278 279 /* Get address and size of next fragment */ 280 if (i >= skb_shinfo(skb)->nr_frags) 281 break; 282 fragment = &skb_shinfo(skb)->frags[i]; 283 len = fragment->size; 284 page = fragment->page; 285 page_offset = fragment->page_offset; 286 i++; 287 /* Map for DMA */ 288 unmap_single = false; 289 dma_addr = pci_map_page(pci_dev, page, page_offset, len, 290 PCI_DMA_TODEVICE); 291 } 292 293 /* Transfer ownership of the skb to the final buffer */ 294 buffer->skb = skb; 295 buffer->continuation = false; 296 297 /* Pass off to hardware */ 298 efx_nic_push_buffers(tx_queue); 299 300 return NETDEV_TX_OK; 301 302 pci_err: 303 EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d " 304 "fragments for DMA\n", tx_queue->queue, skb->len, 305 skb_shinfo(skb)->nr_frags + 1); 306 307 /* Mark the packet as transmitted, and free the SKB ourselves */ 308 dev_kfree_skb_any(skb); 309 goto unwind; 310 311 stop: 312 rc = NETDEV_TX_BUSY; 313 314 if (tx_queue->stopped == 1) 315 efx_stop_queue(tx_queue->channel); 316 317 unwind: 318 /* Work backwards until we hit the original insert pointer value */ 319 while (tx_queue->insert_count != tx_queue->write_count) { 320 --tx_queue->insert_count; 321 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; 322 buffer = &tx_queue->buffer[insert_ptr]; 323 efx_dequeue_buffer(tx_queue, buffer); 324 buffer->len = 0; 325 } 326 327 /* Free the fragment we were mid-way through pushing */ 328 if (unmap_len) { 329 if (unmap_single) 330 pci_unmap_single(pci_dev, unmap_addr, unmap_len, 331 PCI_DMA_TODEVICE); 332 else 333 pci_unmap_page(pci_dev, unmap_addr, unmap_len, 334 PCI_DMA_TODEVICE); 335 } 336 337 return rc; 338} 339 340/* Remove packets from the TX queue 341 * 342 * This removes packets from the TX queue, up to and including the 343 * specified index. 344 */ 345static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 346 unsigned int index) 347{ 348 struct efx_nic *efx = tx_queue->efx; 349 unsigned int stop_index, read_ptr; 350 351 stop_index = (index + 1) & EFX_TXQ_MASK; 352 read_ptr = tx_queue->read_count & EFX_TXQ_MASK; 353 354 while (read_ptr != stop_index) { 355 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 356 if (unlikely(buffer->len == 0)) { 357 EFX_ERR(tx_queue->efx, "TX queue %d spurious TX " 358 "completion id %x\n", tx_queue->queue, 359 read_ptr); 360 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); 361 return; 362 } 363 364 efx_dequeue_buffer(tx_queue, buffer); 365 buffer->continuation = true; 366 buffer->len = 0; 367 368 ++tx_queue->read_count; 369 read_ptr = tx_queue->read_count & EFX_TXQ_MASK; 370 } 371} 372 373/* Initiate a packet transmission. We use one channel per CPU 374 * (sharing when we have more CPUs than channels). On Falcon, the TX 375 * completion events will be directed back to the CPU that transmitted 376 * the packet, which should be cache-efficient. 377 * 378 * Context: non-blocking. 379 * Note that returning anything other than NETDEV_TX_OK will cause the 380 * OS to free the skb. 381 */ 382netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, 383 struct net_device *net_dev) 384{ 385 struct efx_nic *efx = netdev_priv(net_dev); 386 struct efx_tx_queue *tx_queue; 387 388 if (unlikely(efx->port_inhibited)) 389 return NETDEV_TX_BUSY; 390 391 tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)]; 392 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) 393 tx_queue += EFX_TXQ_TYPE_OFFLOAD; 394 395 return efx_enqueue_skb(tx_queue, skb); 396} 397 398void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 399{ 400 unsigned fill_level; 401 struct efx_nic *efx = tx_queue->efx; 402 403 EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK); 404 405 efx_dequeue_buffers(tx_queue, index); 406 407 /* See if we need to restart the netif queue. This barrier 408 * separates the update of read_count from the test of 409 * stopped. */ 410 smp_mb(); 411 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { 412 fill_level = tx_queue->insert_count - tx_queue->read_count; 413 if (fill_level < EFX_TXQ_THRESHOLD) { 414 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 415 416 /* Do this under netif_tx_lock(), to avoid racing 417 * with efx_xmit(). */ 418 netif_tx_lock(efx->net_dev); 419 if (tx_queue->stopped) { 420 tx_queue->stopped = 0; 421 efx_wake_queue(tx_queue->channel); 422 } 423 netif_tx_unlock(efx->net_dev); 424 } 425 } 426} 427 428int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 429{ 430 struct efx_nic *efx = tx_queue->efx; 431 unsigned int txq_size; 432 int i, rc; 433 434 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); 435 436 /* Allocate software ring */ 437 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer); 438 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 439 if (!tx_queue->buffer) 440 return -ENOMEM; 441 for (i = 0; i <= EFX_TXQ_MASK; ++i) 442 tx_queue->buffer[i].continuation = true; 443 444 /* Allocate hardware ring */ 445 rc = efx_nic_probe_tx(tx_queue); 446 if (rc) 447 goto fail; 448 449 return 0; 450 451 fail: 452 kfree(tx_queue->buffer); 453 tx_queue->buffer = NULL; 454 return rc; 455} 456 457void efx_init_tx_queue(struct efx_tx_queue *tx_queue) 458{ 459 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue); 460 461 tx_queue->insert_count = 0; 462 tx_queue->write_count = 0; 463 tx_queue->read_count = 0; 464 tx_queue->old_read_count = 0; 465 BUG_ON(tx_queue->stopped); 466 467 /* Set up TX descriptor ring */ 468 efx_nic_init_tx(tx_queue); 469} 470 471void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 472{ 473 struct efx_tx_buffer *buffer; 474 475 if (!tx_queue->buffer) 476 return; 477 478 /* Free any buffers left in the ring */ 479 while (tx_queue->read_count != tx_queue->write_count) { 480 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK]; 481 efx_dequeue_buffer(tx_queue, buffer); 482 buffer->continuation = true; 483 buffer->len = 0; 484 485 ++tx_queue->read_count; 486 } 487} 488 489void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 490{ 491 EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue); 492 493 /* Flush TX queue, remove descriptor ring */ 494 efx_nic_fini_tx(tx_queue); 495 496 efx_release_tx_buffers(tx_queue); 497 498 /* Free up TSO header cache */ 499 efx_fini_tso(tx_queue); 500 501 /* Release queue's stop on port, if any */ 502 if (tx_queue->stopped) { 503 tx_queue->stopped = 0; 504 efx_wake_queue(tx_queue->channel); 505 } 506} 507 508void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 509{ 510 EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue); 511 efx_nic_remove_tx(tx_queue); 512 513 kfree(tx_queue->buffer); 514 tx_queue->buffer = NULL; 515} 516 517 518/* Efx TCP segmentation acceleration. 519 * 520 * Why? Because by doing it here in the driver we can go significantly 521 * faster than the GSO. 522 * 523 * Requires TX checksum offload support. 524 */ 525 526/* Number of bytes inserted at the start of a TSO header buffer, 527 * similar to NET_IP_ALIGN. 528 */ 529#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 530#define TSOH_OFFSET 0 531#else 532#define TSOH_OFFSET NET_IP_ALIGN 533#endif 534 535#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) 536 537/* Total size of struct efx_tso_header, buffer and padding */ 538#define TSOH_SIZE(hdr_len) \ 539 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) 540 541/* Size of blocks on free list. Larger blocks must be allocated from 542 * the heap. 543 */ 544#define TSOH_STD_SIZE 128 545 546#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) 547#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) 548#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) 549#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) 550#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) 551 552/** 553 * struct tso_state - TSO state for an SKB 554 * @out_len: Remaining length in current segment 555 * @seqnum: Current sequence number 556 * @ipv4_id: Current IPv4 ID, host endian 557 * @packet_space: Remaining space in current packet 558 * @dma_addr: DMA address of current position 559 * @in_len: Remaining length in current SKB fragment 560 * @unmap_len: Length of SKB fragment 561 * @unmap_addr: DMA address of SKB fragment 562 * @unmap_single: DMA single vs page mapping flag 563 * @protocol: Network protocol (after any VLAN header) 564 * @header_len: Number of bytes of header 565 * @full_packet_size: Number of bytes to put in each outgoing segment 566 * 567 * The state used during segmentation. It is put into this data structure 568 * just to make it easy to pass into inline functions. 569 */ 570struct tso_state { 571 /* Output position */ 572 unsigned out_len; 573 unsigned seqnum; 574 unsigned ipv4_id; 575 unsigned packet_space; 576 577 /* Input position */ 578 dma_addr_t dma_addr; 579 unsigned in_len; 580 unsigned unmap_len; 581 dma_addr_t unmap_addr; 582 bool unmap_single; 583 584 __be16 protocol; 585 unsigned header_len; 586 int full_packet_size; 587}; 588 589 590/* 591 * Verify that our various assumptions about sk_buffs and the conditions 592 * under which TSO will be attempted hold true. Return the protocol number. 593 */ 594static __be16 efx_tso_check_protocol(struct sk_buff *skb) 595{ 596 __be16 protocol = skb->protocol; 597 598 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != 599 protocol); 600 if (protocol == htons(ETH_P_8021Q)) { 601 /* Find the encapsulated protocol; reset network header 602 * and transport header based on that. */ 603 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 604 protocol = veh->h_vlan_encapsulated_proto; 605 skb_set_network_header(skb, sizeof(*veh)); 606 if (protocol == htons(ETH_P_IP)) 607 skb_set_transport_header(skb, sizeof(*veh) + 608 4 * ip_hdr(skb)->ihl); 609 else if (protocol == htons(ETH_P_IPV6)) 610 skb_set_transport_header(skb, sizeof(*veh) + 611 sizeof(struct ipv6hdr)); 612 } 613 614 if (protocol == htons(ETH_P_IP)) { 615 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); 616 } else { 617 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); 618 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); 619 } 620 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) 621 + (tcp_hdr(skb)->doff << 2u)) > 622 skb_headlen(skb)); 623 624 return protocol; 625} 626 627 628/* 629 * Allocate a page worth of efx_tso_header structures, and string them 630 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. 631 */ 632static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) 633{ 634 635 struct pci_dev *pci_dev = tx_queue->efx->pci_dev; 636 struct efx_tso_header *tsoh; 637 dma_addr_t dma_addr; 638 u8 *base_kva, *kva; 639 640 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); 641 if (base_kva == NULL) { 642 EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO" 643 " headers\n"); 644 return -ENOMEM; 645 } 646 647 /* pci_alloc_consistent() allocates pages. */ 648 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); 649 650 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { 651 tsoh = (struct efx_tso_header *)kva; 652 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); 653 tsoh->next = tx_queue->tso_headers_free; 654 tx_queue->tso_headers_free = tsoh; 655 } 656 657 return 0; 658} 659 660 661/* Free up a TSO header, and all others in the same page. */ 662static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, 663 struct efx_tso_header *tsoh, 664 struct pci_dev *pci_dev) 665{ 666 struct efx_tso_header **p; 667 unsigned long base_kva; 668 dma_addr_t base_dma; 669 670 base_kva = (unsigned long)tsoh & PAGE_MASK; 671 base_dma = tsoh->dma_addr & PAGE_MASK; 672 673 p = &tx_queue->tso_headers_free; 674 while (*p != NULL) { 675 if (((unsigned long)*p & PAGE_MASK) == base_kva) 676 *p = (*p)->next; 677 else 678 p = &(*p)->next; 679 } 680 681 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 682} 683 684static struct efx_tso_header * 685efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) 686{ 687 struct efx_tso_header *tsoh; 688 689 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); 690 if (unlikely(!tsoh)) 691 return NULL; 692 693 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, 694 TSOH_BUFFER(tsoh), header_len, 695 PCI_DMA_TODEVICE); 696 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, 697 tsoh->dma_addr))) { 698 kfree(tsoh); 699 return NULL; 700 } 701 702 tsoh->unmap_len = header_len; 703 return tsoh; 704} 705 706static void 707efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) 708{ 709 pci_unmap_single(tx_queue->efx->pci_dev, 710 tsoh->dma_addr, tsoh->unmap_len, 711 PCI_DMA_TODEVICE); 712 kfree(tsoh); 713} 714 715/** 716 * efx_tx_queue_insert - push descriptors onto the TX queue 717 * @tx_queue: Efx TX queue 718 * @dma_addr: DMA address of fragment 719 * @len: Length of fragment 720 * @final_buffer: The final buffer inserted into the queue 721 * 722 * Push descriptors onto the TX queue. Return 0 on success or 1 if 723 * @tx_queue full. 724 */ 725static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 726 dma_addr_t dma_addr, unsigned len, 727 struct efx_tx_buffer **final_buffer) 728{ 729 struct efx_tx_buffer *buffer; 730 struct efx_nic *efx = tx_queue->efx; 731 unsigned dma_len, fill_level, insert_ptr; 732 int q_space; 733 734 EFX_BUG_ON_PARANOID(len <= 0); 735 736 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 737 /* -1 as there is no way to represent all descriptors used */ 738 q_space = EFX_TXQ_MASK - 1 - fill_level; 739 740 while (1) { 741 if (unlikely(q_space-- <= 0)) { 742 /* It might be that completions have happened 743 * since the xmit path last checked. Update 744 * the xmit path's copy of read_count. 745 */ 746 ++tx_queue->stopped; 747 /* This memory barrier protects the change of 748 * stopped from the access of read_count. */ 749 smp_mb(); 750 tx_queue->old_read_count = 751 *(volatile unsigned *)&tx_queue->read_count; 752 fill_level = (tx_queue->insert_count 753 - tx_queue->old_read_count); 754 q_space = EFX_TXQ_MASK - 1 - fill_level; 755 if (unlikely(q_space-- <= 0)) { 756 *final_buffer = NULL; 757 return 1; 758 } 759 smp_mb(); 760 --tx_queue->stopped; 761 } 762 763 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; 764 buffer = &tx_queue->buffer[insert_ptr]; 765 ++tx_queue->insert_count; 766 767 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 768 tx_queue->read_count > 769 EFX_TXQ_MASK); 770 771 efx_tsoh_free(tx_queue, buffer); 772 EFX_BUG_ON_PARANOID(buffer->len); 773 EFX_BUG_ON_PARANOID(buffer->unmap_len); 774 EFX_BUG_ON_PARANOID(buffer->skb); 775 EFX_BUG_ON_PARANOID(!buffer->continuation); 776 EFX_BUG_ON_PARANOID(buffer->tsoh); 777 778 buffer->dma_addr = dma_addr; 779 780 dma_len = efx_max_tx_len(efx, dma_addr); 781 782 /* If there is enough space to send then do so */ 783 if (dma_len >= len) 784 break; 785 786 buffer->len = dma_len; /* Don't set the other members */ 787 dma_addr += dma_len; 788 len -= dma_len; 789 } 790 791 EFX_BUG_ON_PARANOID(!len); 792 buffer->len = len; 793 *final_buffer = buffer; 794 return 0; 795} 796 797 798/* 799 * Put a TSO header into the TX queue. 800 * 801 * This is special-cased because we know that it is small enough to fit in 802 * a single fragment, and we know it doesn't cross a page boundary. It 803 * also allows us to not worry about end-of-packet etc. 804 */ 805static void efx_tso_put_header(struct efx_tx_queue *tx_queue, 806 struct efx_tso_header *tsoh, unsigned len) 807{ 808 struct efx_tx_buffer *buffer; 809 810 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK]; 811 efx_tsoh_free(tx_queue, buffer); 812 EFX_BUG_ON_PARANOID(buffer->len); 813 EFX_BUG_ON_PARANOID(buffer->unmap_len); 814 EFX_BUG_ON_PARANOID(buffer->skb); 815 EFX_BUG_ON_PARANOID(!buffer->continuation); 816 EFX_BUG_ON_PARANOID(buffer->tsoh); 817 buffer->len = len; 818 buffer->dma_addr = tsoh->dma_addr; 819 buffer->tsoh = tsoh; 820 821 ++tx_queue->insert_count; 822} 823 824 825/* Remove descriptors put into a tx_queue. */ 826static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 827{ 828 struct efx_tx_buffer *buffer; 829 dma_addr_t unmap_addr; 830 831 /* Work backwards until we hit the original insert pointer value */ 832 while (tx_queue->insert_count != tx_queue->write_count) { 833 --tx_queue->insert_count; 834 buffer = &tx_queue->buffer[tx_queue->insert_count & 835 EFX_TXQ_MASK]; 836 efx_tsoh_free(tx_queue, buffer); 837 EFX_BUG_ON_PARANOID(buffer->skb); 838 if (buffer->unmap_len) { 839 unmap_addr = (buffer->dma_addr + buffer->len - 840 buffer->unmap_len); 841 if (buffer->unmap_single) 842 pci_unmap_single(tx_queue->efx->pci_dev, 843 unmap_addr, buffer->unmap_len, 844 PCI_DMA_TODEVICE); 845 else 846 pci_unmap_page(tx_queue->efx->pci_dev, 847 unmap_addr, buffer->unmap_len, 848 PCI_DMA_TODEVICE); 849 buffer->unmap_len = 0; 850 } 851 buffer->len = 0; 852 buffer->continuation = true; 853 } 854} 855 856 857/* Parse the SKB header and initialise state. */ 858static void tso_start(struct tso_state *st, const struct sk_buff *skb) 859{ 860 /* All ethernet/IP/TCP headers combined size is TCP header size 861 * plus offset of TCP header relative to start of packet. 862 */ 863 st->header_len = ((tcp_hdr(skb)->doff << 2u) 864 + PTR_DIFF(tcp_hdr(skb), skb->data)); 865 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; 866 867 if (st->protocol == htons(ETH_P_IP)) 868 st->ipv4_id = ntohs(ip_hdr(skb)->id); 869 else 870 st->ipv4_id = 0; 871 st->seqnum = ntohl(tcp_hdr(skb)->seq); 872 873 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 874 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); 875 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); 876 877 st->packet_space = st->full_packet_size; 878 st->out_len = skb->len - st->header_len; 879 st->unmap_len = 0; 880 st->unmap_single = false; 881} 882 883static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 884 skb_frag_t *frag) 885{ 886 st->unmap_addr = pci_map_page(efx->pci_dev, frag->page, 887 frag->page_offset, frag->size, 888 PCI_DMA_TODEVICE); 889 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { 890 st->unmap_single = false; 891 st->unmap_len = frag->size; 892 st->in_len = frag->size; 893 st->dma_addr = st->unmap_addr; 894 return 0; 895 } 896 return -ENOMEM; 897} 898 899static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, 900 const struct sk_buff *skb) 901{ 902 int hl = st->header_len; 903 int len = skb_headlen(skb) - hl; 904 905 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, 906 len, PCI_DMA_TODEVICE); 907 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { 908 st->unmap_single = true; 909 st->unmap_len = len; 910 st->in_len = len; 911 st->dma_addr = st->unmap_addr; 912 return 0; 913 } 914 return -ENOMEM; 915} 916 917 918/** 919 * tso_fill_packet_with_fragment - form descriptors for the current fragment 920 * @tx_queue: Efx TX queue 921 * @skb: Socket buffer 922 * @st: TSO state 923 * 924 * Form descriptors for the current fragment, until we reach the end 925 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 926 * space in @tx_queue. 927 */ 928static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 929 const struct sk_buff *skb, 930 struct tso_state *st) 931{ 932 struct efx_tx_buffer *buffer; 933 int n, end_of_packet, rc; 934 935 if (st->in_len == 0) 936 return 0; 937 if (st->packet_space == 0) 938 return 0; 939 940 EFX_BUG_ON_PARANOID(st->in_len <= 0); 941 EFX_BUG_ON_PARANOID(st->packet_space <= 0); 942 943 n = min(st->in_len, st->packet_space); 944 945 st->packet_space -= n; 946 st->out_len -= n; 947 st->in_len -= n; 948 949 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); 950 if (likely(rc == 0)) { 951 if (st->out_len == 0) 952 /* Transfer ownership of the skb */ 953 buffer->skb = skb; 954 955 end_of_packet = st->out_len == 0 || st->packet_space == 0; 956 buffer->continuation = !end_of_packet; 957 958 if (st->in_len == 0) { 959 /* Transfer ownership of the pci mapping */ 960 buffer->unmap_len = st->unmap_len; 961 buffer->unmap_single = st->unmap_single; 962 st->unmap_len = 0; 963 } 964 } 965 966 st->dma_addr += n; 967 return rc; 968} 969 970 971/** 972 * tso_start_new_packet - generate a new header and prepare for the new packet 973 * @tx_queue: Efx TX queue 974 * @skb: Socket buffer 975 * @st: TSO state 976 * 977 * Generate a new header and prepare for the new packet. Return 0 on 978 * success, or -1 if failed to alloc header. 979 */ 980static int tso_start_new_packet(struct efx_tx_queue *tx_queue, 981 const struct sk_buff *skb, 982 struct tso_state *st) 983{ 984 struct efx_tso_header *tsoh; 985 struct tcphdr *tsoh_th; 986 unsigned ip_length; 987 u8 *header; 988 989 /* Allocate a DMA-mapped header buffer. */ 990 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { 991 if (tx_queue->tso_headers_free == NULL) { 992 if (efx_tsoh_block_alloc(tx_queue)) 993 return -1; 994 } 995 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); 996 tsoh = tx_queue->tso_headers_free; 997 tx_queue->tso_headers_free = tsoh->next; 998 tsoh->unmap_len = 0; 999 } else { 1000 tx_queue->tso_long_headers++; 1001 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); 1002 if (unlikely(!tsoh)) 1003 return -1; 1004 } 1005 1006 header = TSOH_BUFFER(tsoh); 1007 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); 1008 1009 /* Copy and update the headers. */ 1010 memcpy(header, skb->data, st->header_len); 1011 1012 tsoh_th->seq = htonl(st->seqnum); 1013 st->seqnum += skb_shinfo(skb)->gso_size; 1014 if (st->out_len > skb_shinfo(skb)->gso_size) { 1015 /* This packet will not finish the TSO burst. */ 1016 ip_length = st->full_packet_size - ETH_HDR_LEN(skb); 1017 tsoh_th->fin = 0; 1018 tsoh_th->psh = 0; 1019 } else { 1020 /* This packet will be the last in the TSO burst. */ 1021 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; 1022 tsoh_th->fin = tcp_hdr(skb)->fin; 1023 tsoh_th->psh = tcp_hdr(skb)->psh; 1024 } 1025 1026 if (st->protocol == htons(ETH_P_IP)) { 1027 struct iphdr *tsoh_iph = 1028 (struct iphdr *)(header + SKB_IPV4_OFF(skb)); 1029 1030 tsoh_iph->tot_len = htons(ip_length); 1031 1032 /* Linux leaves suitable gaps in the IP ID space for us to fill. */ 1033 tsoh_iph->id = htons(st->ipv4_id); 1034 st->ipv4_id++; 1035 } else { 1036 struct ipv6hdr *tsoh_iph = 1037 (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); 1038 1039 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); 1040 } 1041 1042 st->packet_space = skb_shinfo(skb)->gso_size; 1043 ++tx_queue->tso_packets; 1044 1045 /* Form a descriptor for this header. */ 1046 efx_tso_put_header(tx_queue, tsoh, st->header_len); 1047 1048 return 0; 1049} 1050 1051 1052/** 1053 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer 1054 * @tx_queue: Efx TX queue 1055 * @skb: Socket buffer 1056 * 1057 * Context: You must hold netif_tx_lock() to call this function. 1058 * 1059 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if 1060 * @skb was not enqueued. In all cases @skb is consumed. Return 1061 * %NETDEV_TX_OK or %NETDEV_TX_BUSY. 1062 */ 1063static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 1064 struct sk_buff *skb) 1065{ 1066 struct efx_nic *efx = tx_queue->efx; 1067 int frag_i, rc, rc2 = NETDEV_TX_OK; 1068 struct tso_state state; 1069 1070 /* Find the packet protocol and sanity-check it */ 1071 state.protocol = efx_tso_check_protocol(skb); 1072 1073 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 1074 1075 tso_start(&state, skb); 1076 1077 /* Assume that skb header area contains exactly the headers, and 1078 * all payload is in the frag list. 1079 */ 1080 if (skb_headlen(skb) == state.header_len) { 1081 /* Grab the first payload fragment. */ 1082 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); 1083 frag_i = 0; 1084 rc = tso_get_fragment(&state, efx, 1085 skb_shinfo(skb)->frags + frag_i); 1086 if (rc) 1087 goto mem_err; 1088 } else { 1089 rc = tso_get_head_fragment(&state, efx, skb); 1090 if (rc) 1091 goto mem_err; 1092 frag_i = -1; 1093 } 1094 1095 if (tso_start_new_packet(tx_queue, skb, &state) < 0) 1096 goto mem_err; 1097 1098 while (1) { 1099 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); 1100 if (unlikely(rc)) 1101 goto stop; 1102 1103 /* Move onto the next fragment? */ 1104 if (state.in_len == 0) { 1105 if (++frag_i >= skb_shinfo(skb)->nr_frags) 1106 /* End of payload reached. */ 1107 break; 1108 rc = tso_get_fragment(&state, efx, 1109 skb_shinfo(skb)->frags + frag_i); 1110 if (rc) 1111 goto mem_err; 1112 } 1113 1114 /* Start at new packet? */ 1115 if (state.packet_space == 0 && 1116 tso_start_new_packet(tx_queue, skb, &state) < 0) 1117 goto mem_err; 1118 } 1119 1120 /* Pass off to hardware */ 1121 efx_nic_push_buffers(tx_queue); 1122 1123 tx_queue->tso_bursts++; 1124 return NETDEV_TX_OK; 1125 1126 mem_err: 1127 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n"); 1128 dev_kfree_skb_any(skb); 1129 goto unwind; 1130 1131 stop: 1132 rc2 = NETDEV_TX_BUSY; 1133 1134 /* Stop the queue if it wasn't stopped before. */ 1135 if (tx_queue->stopped == 1) 1136 efx_stop_queue(tx_queue->channel); 1137 1138 unwind: 1139 /* Free the DMA mapping we were in the process of writing out */ 1140 if (state.unmap_len) { 1141 if (state.unmap_single) 1142 pci_unmap_single(efx->pci_dev, state.unmap_addr, 1143 state.unmap_len, PCI_DMA_TODEVICE); 1144 else 1145 pci_unmap_page(efx->pci_dev, state.unmap_addr, 1146 state.unmap_len, PCI_DMA_TODEVICE); 1147 } 1148 1149 efx_enqueue_unwind(tx_queue); 1150 return rc2; 1151} 1152 1153 1154/* 1155 * Free up all TSO datastructures associated with tx_queue. This 1156 * routine should be called only once the tx_queue is both empty and 1157 * will no longer be used. 1158 */ 1159static void efx_fini_tso(struct efx_tx_queue *tx_queue) 1160{ 1161 unsigned i; 1162 1163 if (tx_queue->buffer) { 1164 for (i = 0; i <= EFX_TXQ_MASK; ++i) 1165 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1166 } 1167 1168 while (tx_queue->tso_headers_free != NULL) 1169 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, 1170 tx_queue->efx->pci_dev); 1171}