Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.38-rc1 1126 lines 32 kB view raw
1/**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2009 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11#include <linux/pci.h> 12#include <linux/tcp.h> 13#include <linux/ip.h> 14#include <linux/in.h> 15#include <linux/ipv6.h> 16#include <linux/slab.h> 17#include <net/ipv6.h> 18#include <linux/if_ether.h> 19#include <linux/highmem.h> 20#include "net_driver.h" 21#include "efx.h" 22#include "nic.h" 23#include "workarounds.h" 24 25/* 26 * TX descriptor ring full threshold 27 * 28 * The tx_queue descriptor ring fill-level must fall below this value 29 * before we restart the netif queue 30 */ 31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) 32 33static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 34 struct efx_tx_buffer *buffer) 35{ 36 if (buffer->unmap_len) { 37 struct pci_dev *pci_dev = tx_queue->efx->pci_dev; 38 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - 39 buffer->unmap_len); 40 if (buffer->unmap_single) 41 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, 42 PCI_DMA_TODEVICE); 43 else 44 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, 45 PCI_DMA_TODEVICE); 46 buffer->unmap_len = 0; 47 buffer->unmap_single = false; 48 } 49 50 if (buffer->skb) { 51 dev_kfree_skb_any((struct sk_buff *) buffer->skb); 52 buffer->skb = NULL; 53 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, 54 "TX queue %d transmission id %x complete\n", 55 tx_queue->queue, tx_queue->read_count); 56 } 57} 58 59/** 60 * struct efx_tso_header - a DMA mapped buffer for packet headers 61 * @next: Linked list of free ones. 62 * The list is protected by the TX queue lock. 63 * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. 64 * @dma_addr: The DMA address of the header below. 65 * 66 * This controls the memory used for a TSO header. Use TSOH_DATA() 67 * to find the packet header data. Use TSOH_SIZE() to calculate the 68 * total size required for a given packet header length. TSO headers 69 * in the free list are exactly %TSOH_STD_SIZE bytes in size. 70 */ 71struct efx_tso_header { 72 union { 73 struct efx_tso_header *next; 74 size_t unmap_len; 75 }; 76 dma_addr_t dma_addr; 77}; 78 79static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 80 struct sk_buff *skb); 81static void efx_fini_tso(struct efx_tx_queue *tx_queue); 82static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, 83 struct efx_tso_header *tsoh); 84 85static void efx_tsoh_free(struct efx_tx_queue *tx_queue, 86 struct efx_tx_buffer *buffer) 87{ 88 if (buffer->tsoh) { 89 if (likely(!buffer->tsoh->unmap_len)) { 90 buffer->tsoh->next = tx_queue->tso_headers_free; 91 tx_queue->tso_headers_free = buffer->tsoh; 92 } else { 93 efx_tsoh_heap_free(tx_queue, buffer->tsoh); 94 } 95 buffer->tsoh = NULL; 96 } 97} 98 99 100static inline unsigned 101efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) 102{ 103 /* Depending on the NIC revision, we can use descriptor 104 * lengths up to 8K or 8K-1. However, since PCI Express 105 * devices must split read requests at 4K boundaries, there is 106 * little benefit from using descriptors that cross those 107 * boundaries and we keep things simple by not doing so. 108 */ 109 unsigned len = (~dma_addr & 0xfff) + 1; 110 111 /* Work around hardware bug for unaligned buffers. */ 112 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) 113 len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); 114 115 return len; 116} 117 118/* 119 * Add a socket buffer to a TX queue 120 * 121 * This maps all fragments of a socket buffer for DMA and adds them to 122 * the TX queue. The queue's insert pointer will be incremented by 123 * the number of fragments in the socket buffer. 124 * 125 * If any DMA mapping fails, any mapped fragments will be unmapped, 126 * the queue's insert pointer will be restored to its original value. 127 * 128 * This function is split out from efx_hard_start_xmit to allow the 129 * loopback test to direct packets via specific TX queues. 130 * 131 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 132 * You must hold netif_tx_lock() to call this function. 133 */ 134netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 135{ 136 struct efx_nic *efx = tx_queue->efx; 137 struct pci_dev *pci_dev = efx->pci_dev; 138 struct efx_tx_buffer *buffer; 139 skb_frag_t *fragment; 140 struct page *page; 141 int page_offset; 142 unsigned int len, unmap_len = 0, fill_level, insert_ptr; 143 dma_addr_t dma_addr, unmap_addr = 0; 144 unsigned int dma_len; 145 bool unmap_single; 146 int q_space, i = 0; 147 netdev_tx_t rc = NETDEV_TX_OK; 148 149 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 150 151 if (skb_shinfo(skb)->gso_size) 152 return efx_enqueue_skb_tso(tx_queue, skb); 153 154 /* Get size of the initial fragment */ 155 len = skb_headlen(skb); 156 157 /* Pad if necessary */ 158 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { 159 EFX_BUG_ON_PARANOID(skb->data_len); 160 len = 32 + 1; 161 if (skb_pad(skb, len - skb->len)) 162 return NETDEV_TX_OK; 163 } 164 165 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 166 q_space = efx->txq_entries - 1 - fill_level; 167 168 /* Map for DMA. Use pci_map_single rather than pci_map_page 169 * since this is more efficient on machines with sparse 170 * memory. 171 */ 172 unmap_single = true; 173 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); 174 175 /* Process all fragments */ 176 while (1) { 177 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) 178 goto pci_err; 179 180 /* Store fields for marking in the per-fragment final 181 * descriptor */ 182 unmap_len = len; 183 unmap_addr = dma_addr; 184 185 /* Add to TX queue, splitting across DMA boundaries */ 186 do { 187 if (unlikely(q_space-- <= 0)) { 188 /* It might be that completions have 189 * happened since the xmit path last 190 * checked. Update the xmit path's 191 * copy of read_count. 192 */ 193 netif_tx_stop_queue(tx_queue->core_txq); 194 /* This memory barrier protects the 195 * change of queue state from the access 196 * of read_count. */ 197 smp_mb(); 198 tx_queue->old_read_count = 199 ACCESS_ONCE(tx_queue->read_count); 200 fill_level = (tx_queue->insert_count 201 - tx_queue->old_read_count); 202 q_space = efx->txq_entries - 1 - fill_level; 203 if (unlikely(q_space-- <= 0)) { 204 rc = NETDEV_TX_BUSY; 205 goto unwind; 206 } 207 smp_mb(); 208 netif_tx_start_queue(tx_queue->core_txq); 209 } 210 211 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 212 buffer = &tx_queue->buffer[insert_ptr]; 213 efx_tsoh_free(tx_queue, buffer); 214 EFX_BUG_ON_PARANOID(buffer->tsoh); 215 EFX_BUG_ON_PARANOID(buffer->skb); 216 EFX_BUG_ON_PARANOID(buffer->len); 217 EFX_BUG_ON_PARANOID(!buffer->continuation); 218 EFX_BUG_ON_PARANOID(buffer->unmap_len); 219 220 dma_len = efx_max_tx_len(efx, dma_addr); 221 if (likely(dma_len >= len)) 222 dma_len = len; 223 224 /* Fill out per descriptor fields */ 225 buffer->len = dma_len; 226 buffer->dma_addr = dma_addr; 227 len -= dma_len; 228 dma_addr += dma_len; 229 ++tx_queue->insert_count; 230 } while (len); 231 232 /* Transfer ownership of the unmapping to the final buffer */ 233 buffer->unmap_single = unmap_single; 234 buffer->unmap_len = unmap_len; 235 unmap_len = 0; 236 237 /* Get address and size of next fragment */ 238 if (i >= skb_shinfo(skb)->nr_frags) 239 break; 240 fragment = &skb_shinfo(skb)->frags[i]; 241 len = fragment->size; 242 page = fragment->page; 243 page_offset = fragment->page_offset; 244 i++; 245 /* Map for DMA */ 246 unmap_single = false; 247 dma_addr = pci_map_page(pci_dev, page, page_offset, len, 248 PCI_DMA_TODEVICE); 249 } 250 251 /* Transfer ownership of the skb to the final buffer */ 252 buffer->skb = skb; 253 buffer->continuation = false; 254 255 /* Pass off to hardware */ 256 efx_nic_push_buffers(tx_queue); 257 258 return NETDEV_TX_OK; 259 260 pci_err: 261 netif_err(efx, tx_err, efx->net_dev, 262 " TX queue %d could not map skb with %d bytes %d " 263 "fragments for DMA\n", tx_queue->queue, skb->len, 264 skb_shinfo(skb)->nr_frags + 1); 265 266 /* Mark the packet as transmitted, and free the SKB ourselves */ 267 dev_kfree_skb_any(skb); 268 269 unwind: 270 /* Work backwards until we hit the original insert pointer value */ 271 while (tx_queue->insert_count != tx_queue->write_count) { 272 --tx_queue->insert_count; 273 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 274 buffer = &tx_queue->buffer[insert_ptr]; 275 efx_dequeue_buffer(tx_queue, buffer); 276 buffer->len = 0; 277 } 278 279 /* Free the fragment we were mid-way through pushing */ 280 if (unmap_len) { 281 if (unmap_single) 282 pci_unmap_single(pci_dev, unmap_addr, unmap_len, 283 PCI_DMA_TODEVICE); 284 else 285 pci_unmap_page(pci_dev, unmap_addr, unmap_len, 286 PCI_DMA_TODEVICE); 287 } 288 289 return rc; 290} 291 292/* Remove packets from the TX queue 293 * 294 * This removes packets from the TX queue, up to and including the 295 * specified index. 296 */ 297static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 298 unsigned int index) 299{ 300 struct efx_nic *efx = tx_queue->efx; 301 unsigned int stop_index, read_ptr; 302 303 stop_index = (index + 1) & tx_queue->ptr_mask; 304 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 305 306 while (read_ptr != stop_index) { 307 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 308 if (unlikely(buffer->len == 0)) { 309 netif_err(efx, tx_err, efx->net_dev, 310 "TX queue %d spurious TX completion id %x\n", 311 tx_queue->queue, read_ptr); 312 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); 313 return; 314 } 315 316 efx_dequeue_buffer(tx_queue, buffer); 317 buffer->continuation = true; 318 buffer->len = 0; 319 320 ++tx_queue->read_count; 321 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 322 } 323} 324 325/* Initiate a packet transmission. We use one channel per CPU 326 * (sharing when we have more CPUs than channels). On Falcon, the TX 327 * completion events will be directed back to the CPU that transmitted 328 * the packet, which should be cache-efficient. 329 * 330 * Context: non-blocking. 331 * Note that returning anything other than NETDEV_TX_OK will cause the 332 * OS to free the skb. 333 */ 334netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, 335 struct net_device *net_dev) 336{ 337 struct efx_nic *efx = netdev_priv(net_dev); 338 struct efx_tx_queue *tx_queue; 339 340 if (unlikely(efx->port_inhibited)) 341 return NETDEV_TX_BUSY; 342 343 tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), 344 skb->ip_summed == CHECKSUM_PARTIAL ? 345 EFX_TXQ_TYPE_OFFLOAD : 0); 346 347 return efx_enqueue_skb(tx_queue, skb); 348} 349 350void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 351{ 352 unsigned fill_level; 353 struct efx_nic *efx = tx_queue->efx; 354 355 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 356 357 efx_dequeue_buffers(tx_queue, index); 358 359 /* See if we need to restart the netif queue. This barrier 360 * separates the update of read_count from the test of the 361 * queue state. */ 362 smp_mb(); 363 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 364 likely(efx->port_enabled)) { 365 fill_level = tx_queue->insert_count - tx_queue->read_count; 366 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 367 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 368 netif_tx_wake_queue(tx_queue->core_txq); 369 } 370 } 371 372 /* Check whether the hardware queue is now empty */ 373 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { 374 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); 375 if (tx_queue->read_count == tx_queue->old_write_count) { 376 smp_mb(); 377 tx_queue->empty_read_count = 378 tx_queue->read_count | EFX_EMPTY_COUNT_VALID; 379 } 380 } 381} 382 383int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 384{ 385 struct efx_nic *efx = tx_queue->efx; 386 unsigned int entries; 387 int i, rc; 388 389 /* Create the smallest power-of-two aligned ring */ 390 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); 391 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 392 tx_queue->ptr_mask = entries - 1; 393 394 netif_dbg(efx, probe, efx->net_dev, 395 "creating TX queue %d size %#x mask %#x\n", 396 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); 397 398 /* Allocate software ring */ 399 tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer), 400 GFP_KERNEL); 401 if (!tx_queue->buffer) 402 return -ENOMEM; 403 for (i = 0; i <= tx_queue->ptr_mask; ++i) 404 tx_queue->buffer[i].continuation = true; 405 406 /* Allocate hardware ring */ 407 rc = efx_nic_probe_tx(tx_queue); 408 if (rc) 409 goto fail; 410 411 return 0; 412 413 fail: 414 kfree(tx_queue->buffer); 415 tx_queue->buffer = NULL; 416 return rc; 417} 418 419void efx_init_tx_queue(struct efx_tx_queue *tx_queue) 420{ 421 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 422 "initialising TX queue %d\n", tx_queue->queue); 423 424 tx_queue->insert_count = 0; 425 tx_queue->write_count = 0; 426 tx_queue->old_write_count = 0; 427 tx_queue->read_count = 0; 428 tx_queue->old_read_count = 0; 429 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; 430 431 /* Set up TX descriptor ring */ 432 efx_nic_init_tx(tx_queue); 433} 434 435void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 436{ 437 struct efx_tx_buffer *buffer; 438 439 if (!tx_queue->buffer) 440 return; 441 442 /* Free any buffers left in the ring */ 443 while (tx_queue->read_count != tx_queue->write_count) { 444 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 445 efx_dequeue_buffer(tx_queue, buffer); 446 buffer->continuation = true; 447 buffer->len = 0; 448 449 ++tx_queue->read_count; 450 } 451} 452 453void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 454{ 455 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 456 "shutting down TX queue %d\n", tx_queue->queue); 457 458 /* Flush TX queue, remove descriptor ring */ 459 efx_nic_fini_tx(tx_queue); 460 461 efx_release_tx_buffers(tx_queue); 462 463 /* Free up TSO header cache */ 464 efx_fini_tso(tx_queue); 465} 466 467void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 468{ 469 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 470 "destroying TX queue %d\n", tx_queue->queue); 471 efx_nic_remove_tx(tx_queue); 472 473 kfree(tx_queue->buffer); 474 tx_queue->buffer = NULL; 475} 476 477 478/* Efx TCP segmentation acceleration. 479 * 480 * Why? Because by doing it here in the driver we can go significantly 481 * faster than the GSO. 482 * 483 * Requires TX checksum offload support. 484 */ 485 486/* Number of bytes inserted at the start of a TSO header buffer, 487 * similar to NET_IP_ALIGN. 488 */ 489#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 490#define TSOH_OFFSET 0 491#else 492#define TSOH_OFFSET NET_IP_ALIGN 493#endif 494 495#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) 496 497/* Total size of struct efx_tso_header, buffer and padding */ 498#define TSOH_SIZE(hdr_len) \ 499 (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) 500 501/* Size of blocks on free list. Larger blocks must be allocated from 502 * the heap. 503 */ 504#define TSOH_STD_SIZE 128 505 506#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) 507#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) 508#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) 509#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) 510#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) 511 512/** 513 * struct tso_state - TSO state for an SKB 514 * @out_len: Remaining length in current segment 515 * @seqnum: Current sequence number 516 * @ipv4_id: Current IPv4 ID, host endian 517 * @packet_space: Remaining space in current packet 518 * @dma_addr: DMA address of current position 519 * @in_len: Remaining length in current SKB fragment 520 * @unmap_len: Length of SKB fragment 521 * @unmap_addr: DMA address of SKB fragment 522 * @unmap_single: DMA single vs page mapping flag 523 * @protocol: Network protocol (after any VLAN header) 524 * @header_len: Number of bytes of header 525 * @full_packet_size: Number of bytes to put in each outgoing segment 526 * 527 * The state used during segmentation. It is put into this data structure 528 * just to make it easy to pass into inline functions. 529 */ 530struct tso_state { 531 /* Output position */ 532 unsigned out_len; 533 unsigned seqnum; 534 unsigned ipv4_id; 535 unsigned packet_space; 536 537 /* Input position */ 538 dma_addr_t dma_addr; 539 unsigned in_len; 540 unsigned unmap_len; 541 dma_addr_t unmap_addr; 542 bool unmap_single; 543 544 __be16 protocol; 545 unsigned header_len; 546 int full_packet_size; 547}; 548 549 550/* 551 * Verify that our various assumptions about sk_buffs and the conditions 552 * under which TSO will be attempted hold true. Return the protocol number. 553 */ 554static __be16 efx_tso_check_protocol(struct sk_buff *skb) 555{ 556 __be16 protocol = skb->protocol; 557 558 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != 559 protocol); 560 if (protocol == htons(ETH_P_8021Q)) { 561 /* Find the encapsulated protocol; reset network header 562 * and transport header based on that. */ 563 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 564 protocol = veh->h_vlan_encapsulated_proto; 565 skb_set_network_header(skb, sizeof(*veh)); 566 if (protocol == htons(ETH_P_IP)) 567 skb_set_transport_header(skb, sizeof(*veh) + 568 4 * ip_hdr(skb)->ihl); 569 else if (protocol == htons(ETH_P_IPV6)) 570 skb_set_transport_header(skb, sizeof(*veh) + 571 sizeof(struct ipv6hdr)); 572 } 573 574 if (protocol == htons(ETH_P_IP)) { 575 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); 576 } else { 577 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); 578 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); 579 } 580 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) 581 + (tcp_hdr(skb)->doff << 2u)) > 582 skb_headlen(skb)); 583 584 return protocol; 585} 586 587 588/* 589 * Allocate a page worth of efx_tso_header structures, and string them 590 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. 591 */ 592static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) 593{ 594 595 struct pci_dev *pci_dev = tx_queue->efx->pci_dev; 596 struct efx_tso_header *tsoh; 597 dma_addr_t dma_addr; 598 u8 *base_kva, *kva; 599 600 base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); 601 if (base_kva == NULL) { 602 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, 603 "Unable to allocate page for TSO headers\n"); 604 return -ENOMEM; 605 } 606 607 /* pci_alloc_consistent() allocates pages. */ 608 EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); 609 610 for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { 611 tsoh = (struct efx_tso_header *)kva; 612 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); 613 tsoh->next = tx_queue->tso_headers_free; 614 tx_queue->tso_headers_free = tsoh; 615 } 616 617 return 0; 618} 619 620 621/* Free up a TSO header, and all others in the same page. */ 622static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, 623 struct efx_tso_header *tsoh, 624 struct pci_dev *pci_dev) 625{ 626 struct efx_tso_header **p; 627 unsigned long base_kva; 628 dma_addr_t base_dma; 629 630 base_kva = (unsigned long)tsoh & PAGE_MASK; 631 base_dma = tsoh->dma_addr & PAGE_MASK; 632 633 p = &tx_queue->tso_headers_free; 634 while (*p != NULL) { 635 if (((unsigned long)*p & PAGE_MASK) == base_kva) 636 *p = (*p)->next; 637 else 638 p = &(*p)->next; 639 } 640 641 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 642} 643 644static struct efx_tso_header * 645efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) 646{ 647 struct efx_tso_header *tsoh; 648 649 tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); 650 if (unlikely(!tsoh)) 651 return NULL; 652 653 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, 654 TSOH_BUFFER(tsoh), header_len, 655 PCI_DMA_TODEVICE); 656 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, 657 tsoh->dma_addr))) { 658 kfree(tsoh); 659 return NULL; 660 } 661 662 tsoh->unmap_len = header_len; 663 return tsoh; 664} 665 666static void 667efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) 668{ 669 pci_unmap_single(tx_queue->efx->pci_dev, 670 tsoh->dma_addr, tsoh->unmap_len, 671 PCI_DMA_TODEVICE); 672 kfree(tsoh); 673} 674 675/** 676 * efx_tx_queue_insert - push descriptors onto the TX queue 677 * @tx_queue: Efx TX queue 678 * @dma_addr: DMA address of fragment 679 * @len: Length of fragment 680 * @final_buffer: The final buffer inserted into the queue 681 * 682 * Push descriptors onto the TX queue. Return 0 on success or 1 if 683 * @tx_queue full. 684 */ 685static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 686 dma_addr_t dma_addr, unsigned len, 687 struct efx_tx_buffer **final_buffer) 688{ 689 struct efx_tx_buffer *buffer; 690 struct efx_nic *efx = tx_queue->efx; 691 unsigned dma_len, fill_level, insert_ptr; 692 int q_space; 693 694 EFX_BUG_ON_PARANOID(len <= 0); 695 696 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 697 /* -1 as there is no way to represent all descriptors used */ 698 q_space = efx->txq_entries - 1 - fill_level; 699 700 while (1) { 701 if (unlikely(q_space-- <= 0)) { 702 /* It might be that completions have happened 703 * since the xmit path last checked. Update 704 * the xmit path's copy of read_count. 705 */ 706 netif_tx_stop_queue(tx_queue->core_txq); 707 /* This memory barrier protects the change of 708 * queue state from the access of read_count. */ 709 smp_mb(); 710 tx_queue->old_read_count = 711 ACCESS_ONCE(tx_queue->read_count); 712 fill_level = (tx_queue->insert_count 713 - tx_queue->old_read_count); 714 q_space = efx->txq_entries - 1 - fill_level; 715 if (unlikely(q_space-- <= 0)) { 716 *final_buffer = NULL; 717 return 1; 718 } 719 smp_mb(); 720 netif_tx_start_queue(tx_queue->core_txq); 721 } 722 723 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 724 buffer = &tx_queue->buffer[insert_ptr]; 725 ++tx_queue->insert_count; 726 727 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 728 tx_queue->read_count >= 729 efx->txq_entries); 730 731 efx_tsoh_free(tx_queue, buffer); 732 EFX_BUG_ON_PARANOID(buffer->len); 733 EFX_BUG_ON_PARANOID(buffer->unmap_len); 734 EFX_BUG_ON_PARANOID(buffer->skb); 735 EFX_BUG_ON_PARANOID(!buffer->continuation); 736 EFX_BUG_ON_PARANOID(buffer->tsoh); 737 738 buffer->dma_addr = dma_addr; 739 740 dma_len = efx_max_tx_len(efx, dma_addr); 741 742 /* If there is enough space to send then do so */ 743 if (dma_len >= len) 744 break; 745 746 buffer->len = dma_len; /* Don't set the other members */ 747 dma_addr += dma_len; 748 len -= dma_len; 749 } 750 751 EFX_BUG_ON_PARANOID(!len); 752 buffer->len = len; 753 *final_buffer = buffer; 754 return 0; 755} 756 757 758/* 759 * Put a TSO header into the TX queue. 760 * 761 * This is special-cased because we know that it is small enough to fit in 762 * a single fragment, and we know it doesn't cross a page boundary. It 763 * also allows us to not worry about end-of-packet etc. 764 */ 765static void efx_tso_put_header(struct efx_tx_queue *tx_queue, 766 struct efx_tso_header *tsoh, unsigned len) 767{ 768 struct efx_tx_buffer *buffer; 769 770 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; 771 efx_tsoh_free(tx_queue, buffer); 772 EFX_BUG_ON_PARANOID(buffer->len); 773 EFX_BUG_ON_PARANOID(buffer->unmap_len); 774 EFX_BUG_ON_PARANOID(buffer->skb); 775 EFX_BUG_ON_PARANOID(!buffer->continuation); 776 EFX_BUG_ON_PARANOID(buffer->tsoh); 777 buffer->len = len; 778 buffer->dma_addr = tsoh->dma_addr; 779 buffer->tsoh = tsoh; 780 781 ++tx_queue->insert_count; 782} 783 784 785/* Remove descriptors put into a tx_queue. */ 786static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 787{ 788 struct efx_tx_buffer *buffer; 789 dma_addr_t unmap_addr; 790 791 /* Work backwards until we hit the original insert pointer value */ 792 while (tx_queue->insert_count != tx_queue->write_count) { 793 --tx_queue->insert_count; 794 buffer = &tx_queue->buffer[tx_queue->insert_count & 795 tx_queue->ptr_mask]; 796 efx_tsoh_free(tx_queue, buffer); 797 EFX_BUG_ON_PARANOID(buffer->skb); 798 if (buffer->unmap_len) { 799 unmap_addr = (buffer->dma_addr + buffer->len - 800 buffer->unmap_len); 801 if (buffer->unmap_single) 802 pci_unmap_single(tx_queue->efx->pci_dev, 803 unmap_addr, buffer->unmap_len, 804 PCI_DMA_TODEVICE); 805 else 806 pci_unmap_page(tx_queue->efx->pci_dev, 807 unmap_addr, buffer->unmap_len, 808 PCI_DMA_TODEVICE); 809 buffer->unmap_len = 0; 810 } 811 buffer->len = 0; 812 buffer->continuation = true; 813 } 814} 815 816 817/* Parse the SKB header and initialise state. */ 818static void tso_start(struct tso_state *st, const struct sk_buff *skb) 819{ 820 /* All ethernet/IP/TCP headers combined size is TCP header size 821 * plus offset of TCP header relative to start of packet. 822 */ 823 st->header_len = ((tcp_hdr(skb)->doff << 2u) 824 + PTR_DIFF(tcp_hdr(skb), skb->data)); 825 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; 826 827 if (st->protocol == htons(ETH_P_IP)) 828 st->ipv4_id = ntohs(ip_hdr(skb)->id); 829 else 830 st->ipv4_id = 0; 831 st->seqnum = ntohl(tcp_hdr(skb)->seq); 832 833 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 834 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); 835 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); 836 837 st->packet_space = st->full_packet_size; 838 st->out_len = skb->len - st->header_len; 839 st->unmap_len = 0; 840 st->unmap_single = false; 841} 842 843static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 844 skb_frag_t *frag) 845{ 846 st->unmap_addr = pci_map_page(efx->pci_dev, frag->page, 847 frag->page_offset, frag->size, 848 PCI_DMA_TODEVICE); 849 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { 850 st->unmap_single = false; 851 st->unmap_len = frag->size; 852 st->in_len = frag->size; 853 st->dma_addr = st->unmap_addr; 854 return 0; 855 } 856 return -ENOMEM; 857} 858 859static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, 860 const struct sk_buff *skb) 861{ 862 int hl = st->header_len; 863 int len = skb_headlen(skb) - hl; 864 865 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, 866 len, PCI_DMA_TODEVICE); 867 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { 868 st->unmap_single = true; 869 st->unmap_len = len; 870 st->in_len = len; 871 st->dma_addr = st->unmap_addr; 872 return 0; 873 } 874 return -ENOMEM; 875} 876 877 878/** 879 * tso_fill_packet_with_fragment - form descriptors for the current fragment 880 * @tx_queue: Efx TX queue 881 * @skb: Socket buffer 882 * @st: TSO state 883 * 884 * Form descriptors for the current fragment, until we reach the end 885 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 886 * space in @tx_queue. 887 */ 888static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 889 const struct sk_buff *skb, 890 struct tso_state *st) 891{ 892 struct efx_tx_buffer *buffer; 893 int n, end_of_packet, rc; 894 895 if (st->in_len == 0) 896 return 0; 897 if (st->packet_space == 0) 898 return 0; 899 900 EFX_BUG_ON_PARANOID(st->in_len <= 0); 901 EFX_BUG_ON_PARANOID(st->packet_space <= 0); 902 903 n = min(st->in_len, st->packet_space); 904 905 st->packet_space -= n; 906 st->out_len -= n; 907 st->in_len -= n; 908 909 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); 910 if (likely(rc == 0)) { 911 if (st->out_len == 0) 912 /* Transfer ownership of the skb */ 913 buffer->skb = skb; 914 915 end_of_packet = st->out_len == 0 || st->packet_space == 0; 916 buffer->continuation = !end_of_packet; 917 918 if (st->in_len == 0) { 919 /* Transfer ownership of the pci mapping */ 920 buffer->unmap_len = st->unmap_len; 921 buffer->unmap_single = st->unmap_single; 922 st->unmap_len = 0; 923 } 924 } 925 926 st->dma_addr += n; 927 return rc; 928} 929 930 931/** 932 * tso_start_new_packet - generate a new header and prepare for the new packet 933 * @tx_queue: Efx TX queue 934 * @skb: Socket buffer 935 * @st: TSO state 936 * 937 * Generate a new header and prepare for the new packet. Return 0 on 938 * success, or -1 if failed to alloc header. 939 */ 940static int tso_start_new_packet(struct efx_tx_queue *tx_queue, 941 const struct sk_buff *skb, 942 struct tso_state *st) 943{ 944 struct efx_tso_header *tsoh; 945 struct tcphdr *tsoh_th; 946 unsigned ip_length; 947 u8 *header; 948 949 /* Allocate a DMA-mapped header buffer. */ 950 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { 951 if (tx_queue->tso_headers_free == NULL) { 952 if (efx_tsoh_block_alloc(tx_queue)) 953 return -1; 954 } 955 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); 956 tsoh = tx_queue->tso_headers_free; 957 tx_queue->tso_headers_free = tsoh->next; 958 tsoh->unmap_len = 0; 959 } else { 960 tx_queue->tso_long_headers++; 961 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); 962 if (unlikely(!tsoh)) 963 return -1; 964 } 965 966 header = TSOH_BUFFER(tsoh); 967 tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); 968 969 /* Copy and update the headers. */ 970 memcpy(header, skb->data, st->header_len); 971 972 tsoh_th->seq = htonl(st->seqnum); 973 st->seqnum += skb_shinfo(skb)->gso_size; 974 if (st->out_len > skb_shinfo(skb)->gso_size) { 975 /* This packet will not finish the TSO burst. */ 976 ip_length = st->full_packet_size - ETH_HDR_LEN(skb); 977 tsoh_th->fin = 0; 978 tsoh_th->psh = 0; 979 } else { 980 /* This packet will be the last in the TSO burst. */ 981 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; 982 tsoh_th->fin = tcp_hdr(skb)->fin; 983 tsoh_th->psh = tcp_hdr(skb)->psh; 984 } 985 986 if (st->protocol == htons(ETH_P_IP)) { 987 struct iphdr *tsoh_iph = 988 (struct iphdr *)(header + SKB_IPV4_OFF(skb)); 989 990 tsoh_iph->tot_len = htons(ip_length); 991 992 /* Linux leaves suitable gaps in the IP ID space for us to fill. */ 993 tsoh_iph->id = htons(st->ipv4_id); 994 st->ipv4_id++; 995 } else { 996 struct ipv6hdr *tsoh_iph = 997 (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); 998 999 tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); 1000 } 1001 1002 st->packet_space = skb_shinfo(skb)->gso_size; 1003 ++tx_queue->tso_packets; 1004 1005 /* Form a descriptor for this header. */ 1006 efx_tso_put_header(tx_queue, tsoh, st->header_len); 1007 1008 return 0; 1009} 1010 1011 1012/** 1013 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer 1014 * @tx_queue: Efx TX queue 1015 * @skb: Socket buffer 1016 * 1017 * Context: You must hold netif_tx_lock() to call this function. 1018 * 1019 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if 1020 * @skb was not enqueued. In all cases @skb is consumed. Return 1021 * %NETDEV_TX_OK or %NETDEV_TX_BUSY. 1022 */ 1023static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 1024 struct sk_buff *skb) 1025{ 1026 struct efx_nic *efx = tx_queue->efx; 1027 int frag_i, rc, rc2 = NETDEV_TX_OK; 1028 struct tso_state state; 1029 1030 /* Find the packet protocol and sanity-check it */ 1031 state.protocol = efx_tso_check_protocol(skb); 1032 1033 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 1034 1035 tso_start(&state, skb); 1036 1037 /* Assume that skb header area contains exactly the headers, and 1038 * all payload is in the frag list. 1039 */ 1040 if (skb_headlen(skb) == state.header_len) { 1041 /* Grab the first payload fragment. */ 1042 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); 1043 frag_i = 0; 1044 rc = tso_get_fragment(&state, efx, 1045 skb_shinfo(skb)->frags + frag_i); 1046 if (rc) 1047 goto mem_err; 1048 } else { 1049 rc = tso_get_head_fragment(&state, efx, skb); 1050 if (rc) 1051 goto mem_err; 1052 frag_i = -1; 1053 } 1054 1055 if (tso_start_new_packet(tx_queue, skb, &state) < 0) 1056 goto mem_err; 1057 1058 while (1) { 1059 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); 1060 if (unlikely(rc)) { 1061 rc2 = NETDEV_TX_BUSY; 1062 goto unwind; 1063 } 1064 1065 /* Move onto the next fragment? */ 1066 if (state.in_len == 0) { 1067 if (++frag_i >= skb_shinfo(skb)->nr_frags) 1068 /* End of payload reached. */ 1069 break; 1070 rc = tso_get_fragment(&state, efx, 1071 skb_shinfo(skb)->frags + frag_i); 1072 if (rc) 1073 goto mem_err; 1074 } 1075 1076 /* Start at new packet? */ 1077 if (state.packet_space == 0 && 1078 tso_start_new_packet(tx_queue, skb, &state) < 0) 1079 goto mem_err; 1080 } 1081 1082 /* Pass off to hardware */ 1083 efx_nic_push_buffers(tx_queue); 1084 1085 tx_queue->tso_bursts++; 1086 return NETDEV_TX_OK; 1087 1088 mem_err: 1089 netif_err(efx, tx_err, efx->net_dev, 1090 "Out of memory for TSO headers, or PCI mapping error\n"); 1091 dev_kfree_skb_any(skb); 1092 1093 unwind: 1094 /* Free the DMA mapping we were in the process of writing out */ 1095 if (state.unmap_len) { 1096 if (state.unmap_single) 1097 pci_unmap_single(efx->pci_dev, state.unmap_addr, 1098 state.unmap_len, PCI_DMA_TODEVICE); 1099 else 1100 pci_unmap_page(efx->pci_dev, state.unmap_addr, 1101 state.unmap_len, PCI_DMA_TODEVICE); 1102 } 1103 1104 efx_enqueue_unwind(tx_queue); 1105 return rc2; 1106} 1107 1108 1109/* 1110 * Free up all TSO datastructures associated with tx_queue. This 1111 * routine should be called only once the tx_queue is both empty and 1112 * will no longer be used. 1113 */ 1114static void efx_fini_tso(struct efx_tx_queue *tx_queue) 1115{ 1116 unsigned i; 1117 1118 if (tx_queue->buffer) { 1119 for (i = 0; i <= tx_queue->ptr_mask; ++i) 1120 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1121 } 1122 1123 while (tx_queue->tso_headers_free != NULL) 1124 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, 1125 tx_queue->efx->pci_dev); 1126}