Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 17431928194b36a0f88082df875e2e036da7fddf 3363 lines 95 kB view raw
1/* 2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <linux/skbuff.h> 33#include <linux/netdevice.h> 34#include <linux/etherdevice.h> 35#include <linux/if_vlan.h> 36#include <linux/ip.h> 37#include <linux/tcp.h> 38#include <linux/dma-mapping.h> 39#include <linux/slab.h> 40#include <net/arp.h> 41#include "common.h" 42#include "regs.h" 43#include "sge_defs.h" 44#include "t3_cpl.h" 45#include "firmware_exports.h" 46#include "cxgb3_offload.h" 47 48#define USE_GTS 0 49 50#define SGE_RX_SM_BUF_SIZE 1536 51 52#define SGE_RX_COPY_THRES 256 53#define SGE_RX_PULL_LEN 128 54 55#define SGE_PG_RSVD SMP_CACHE_BYTES 56/* 57 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. 58 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs 59 * directly. 60 */ 61#define FL0_PG_CHUNK_SIZE 2048 62#define FL0_PG_ORDER 0 63#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) 64#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) 65#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) 66#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER) 67 68#define SGE_RX_DROP_THRES 16 69#define RX_RECLAIM_PERIOD (HZ/4) 70 71/* 72 * Max number of Rx buffers we replenish at a time. 73 */ 74#define MAX_RX_REFILL 16U 75/* 76 * Period of the Tx buffer reclaim timer. This timer does not need to run 77 * frequently as Tx buffers are usually reclaimed by new Tx packets. 78 */ 79#define TX_RECLAIM_PERIOD (HZ / 4) 80#define TX_RECLAIM_TIMER_CHUNK 64U 81#define TX_RECLAIM_CHUNK 16U 82 83/* WR size in bytes */ 84#define WR_LEN (WR_FLITS * 8) 85 86/* 87 * Types of Tx queues in each queue set. Order here matters, do not change. 88 */ 89enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL }; 90 91/* Values for sge_txq.flags */ 92enum { 93 TXQ_RUNNING = 1 << 0, /* fetch engine is running */ 94 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */ 95}; 96 97struct tx_desc { 98 __be64 flit[TX_DESC_FLITS]; 99}; 100 101struct rx_desc { 102 __be32 addr_lo; 103 __be32 len_gen; 104 __be32 gen2; 105 __be32 addr_hi; 106}; 107 108struct tx_sw_desc { /* SW state per Tx descriptor */ 109 struct sk_buff *skb; 110 u8 eop; /* set if last descriptor for packet */ 111 u8 addr_idx; /* buffer index of first SGL entry in descriptor */ 112 u8 fragidx; /* first page fragment associated with descriptor */ 113 s8 sflit; /* start flit of first SGL entry in descriptor */ 114}; 115 116struct rx_sw_desc { /* SW state per Rx descriptor */ 117 union { 118 struct sk_buff *skb; 119 struct fl_pg_chunk pg_chunk; 120 }; 121 DEFINE_DMA_UNMAP_ADDR(dma_addr); 122}; 123 124struct rsp_desc { /* response queue descriptor */ 125 struct rss_header rss_hdr; 126 __be32 flags; 127 __be32 len_cq; 128 u8 imm_data[47]; 129 u8 intr_gen; 130}; 131 132/* 133 * Holds unmapping information for Tx packets that need deferred unmapping. 134 * This structure lives at skb->head and must be allocated by callers. 135 */ 136struct deferred_unmap_info { 137 struct pci_dev *pdev; 138 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 139}; 140 141/* 142 * Maps a number of flits to the number of Tx descriptors that can hold them. 143 * The formula is 144 * 145 * desc = 1 + (flits - 2) / (WR_FLITS - 1). 146 * 147 * HW allows up to 4 descriptors to be combined into a WR. 148 */ 149static u8 flit_desc_map[] = { 150 0, 151#if SGE_NUM_GENBITS == 1 152 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 153 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 154 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 155 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 156#elif SGE_NUM_GENBITS == 2 157 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 158 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 159 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 160 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 161#else 162# error "SGE_NUM_GENBITS must be 1 or 2" 163#endif 164}; 165 166static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) 167{ 168 return container_of(q, struct sge_qset, fl[qidx]); 169} 170 171static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) 172{ 173 return container_of(q, struct sge_qset, rspq); 174} 175 176static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) 177{ 178 return container_of(q, struct sge_qset, txq[qidx]); 179} 180 181/** 182 * refill_rspq - replenish an SGE response queue 183 * @adapter: the adapter 184 * @q: the response queue to replenish 185 * @credits: how many new responses to make available 186 * 187 * Replenishes a response queue by making the supplied number of responses 188 * available to HW. 189 */ 190static inline void refill_rspq(struct adapter *adapter, 191 const struct sge_rspq *q, unsigned int credits) 192{ 193 rmb(); 194 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN, 195 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); 196} 197 198/** 199 * need_skb_unmap - does the platform need unmapping of sk_buffs? 200 * 201 * Returns true if the platform needs sk_buff unmapping. The compiler 202 * optimizes away unecessary code if this returns true. 203 */ 204static inline int need_skb_unmap(void) 205{ 206 /* 207 * This structure is used to tell if the platform needs buffer 208 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything. 209 */ 210 struct dummy { 211 DEFINE_DMA_UNMAP_ADDR(addr); 212 }; 213 214 return sizeof(struct dummy) != 0; 215} 216 217/** 218 * unmap_skb - unmap a packet main body and its page fragments 219 * @skb: the packet 220 * @q: the Tx queue containing Tx descriptors for the packet 221 * @cidx: index of Tx descriptor 222 * @pdev: the PCI device 223 * 224 * Unmap the main body of an sk_buff and its page fragments, if any. 225 * Because of the fairly complicated structure of our SGLs and the desire 226 * to conserve space for metadata, the information necessary to unmap an 227 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx 228 * descriptors (the physical addresses of the various data buffers), and 229 * the SW descriptor state (assorted indices). The send functions 230 * initialize the indices for the first packet descriptor so we can unmap 231 * the buffers held in the first Tx descriptor here, and we have enough 232 * information at this point to set the state for the next Tx descriptor. 233 * 234 * Note that it is possible to clean up the first descriptor of a packet 235 * before the send routines have written the next descriptors, but this 236 * race does not cause any problem. We just end up writing the unmapping 237 * info for the descriptor first. 238 */ 239static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, 240 unsigned int cidx, struct pci_dev *pdev) 241{ 242 const struct sg_ent *sgp; 243 struct tx_sw_desc *d = &q->sdesc[cidx]; 244 int nfrags, frag_idx, curflit, j = d->addr_idx; 245 246 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; 247 frag_idx = d->fragidx; 248 249 if (frag_idx == 0 && skb_headlen(skb)) { 250 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), 251 skb_headlen(skb), PCI_DMA_TODEVICE); 252 j = 1; 253 } 254 255 curflit = d->sflit + 1 + j; 256 nfrags = skb_shinfo(skb)->nr_frags; 257 258 while (frag_idx < nfrags && curflit < WR_FLITS) { 259 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), 260 skb_shinfo(skb)->frags[frag_idx].size, 261 PCI_DMA_TODEVICE); 262 j ^= 1; 263 if (j == 0) { 264 sgp++; 265 curflit++; 266 } 267 curflit++; 268 frag_idx++; 269 } 270 271 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */ 272 d = cidx + 1 == q->size ? q->sdesc : d + 1; 273 d->fragidx = frag_idx; 274 d->addr_idx = j; 275 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ 276 } 277} 278 279/** 280 * free_tx_desc - reclaims Tx descriptors and their buffers 281 * @adapter: the adapter 282 * @q: the Tx queue to reclaim descriptors from 283 * @n: the number of descriptors to reclaim 284 * 285 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated 286 * Tx buffers. Called with the Tx queue lock held. 287 */ 288static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, 289 unsigned int n) 290{ 291 struct tx_sw_desc *d; 292 struct pci_dev *pdev = adapter->pdev; 293 unsigned int cidx = q->cidx; 294 295 const int need_unmap = need_skb_unmap() && 296 q->cntxt_id >= FW_TUNNEL_SGEEC_START; 297 298 d = &q->sdesc[cidx]; 299 while (n--) { 300 if (d->skb) { /* an SGL is present */ 301 if (need_unmap) 302 unmap_skb(d->skb, q, cidx, pdev); 303 if (d->eop) 304 kfree_skb(d->skb); 305 } 306 ++d; 307 if (++cidx == q->size) { 308 cidx = 0; 309 d = q->sdesc; 310 } 311 } 312 q->cidx = cidx; 313} 314 315/** 316 * reclaim_completed_tx - reclaims completed Tx descriptors 317 * @adapter: the adapter 318 * @q: the Tx queue to reclaim completed descriptors from 319 * @chunk: maximum number of descriptors to reclaim 320 * 321 * Reclaims Tx descriptors that the SGE has indicated it has processed, 322 * and frees the associated buffers if possible. Called with the Tx 323 * queue's lock held. 324 */ 325static inline unsigned int reclaim_completed_tx(struct adapter *adapter, 326 struct sge_txq *q, 327 unsigned int chunk) 328{ 329 unsigned int reclaim = q->processed - q->cleaned; 330 331 reclaim = min(chunk, reclaim); 332 if (reclaim) { 333 free_tx_desc(adapter, q, reclaim); 334 q->cleaned += reclaim; 335 q->in_use -= reclaim; 336 } 337 return q->processed - q->cleaned; 338} 339 340/** 341 * should_restart_tx - are there enough resources to restart a Tx queue? 342 * @q: the Tx queue 343 * 344 * Checks if there are enough descriptors to restart a suspended Tx queue. 345 */ 346static inline int should_restart_tx(const struct sge_txq *q) 347{ 348 unsigned int r = q->processed - q->cleaned; 349 350 return q->in_use - r < (q->size >> 1); 351} 352 353static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, 354 struct rx_sw_desc *d) 355{ 356 if (q->use_pages && d->pg_chunk.page) { 357 (*d->pg_chunk.p_cnt)--; 358 if (!*d->pg_chunk.p_cnt) 359 pci_unmap_page(pdev, 360 d->pg_chunk.mapping, 361 q->alloc_size, PCI_DMA_FROMDEVICE); 362 363 put_page(d->pg_chunk.page); 364 d->pg_chunk.page = NULL; 365 } else { 366 pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr), 367 q->buf_size, PCI_DMA_FROMDEVICE); 368 kfree_skb(d->skb); 369 d->skb = NULL; 370 } 371} 372 373/** 374 * free_rx_bufs - free the Rx buffers on an SGE free list 375 * @pdev: the PCI device associated with the adapter 376 * @rxq: the SGE free list to clean up 377 * 378 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from 379 * this queue should be stopped before calling this function. 380 */ 381static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) 382{ 383 unsigned int cidx = q->cidx; 384 385 while (q->credits--) { 386 struct rx_sw_desc *d = &q->sdesc[cidx]; 387 388 389 clear_rx_desc(pdev, q, d); 390 if (++cidx == q->size) 391 cidx = 0; 392 } 393 394 if (q->pg_chunk.page) { 395 __free_pages(q->pg_chunk.page, q->order); 396 q->pg_chunk.page = NULL; 397 } 398} 399 400/** 401 * add_one_rx_buf - add a packet buffer to a free-buffer list 402 * @va: buffer start VA 403 * @len: the buffer length 404 * @d: the HW Rx descriptor to write 405 * @sd: the SW Rx descriptor to write 406 * @gen: the generation bit value 407 * @pdev: the PCI device associated with the adapter 408 * 409 * Add a buffer of the given length to the supplied HW and SW Rx 410 * descriptors. 411 */ 412static inline int add_one_rx_buf(void *va, unsigned int len, 413 struct rx_desc *d, struct rx_sw_desc *sd, 414 unsigned int gen, struct pci_dev *pdev) 415{ 416 dma_addr_t mapping; 417 418 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); 419 if (unlikely(pci_dma_mapping_error(pdev, mapping))) 420 return -ENOMEM; 421 422 dma_unmap_addr_set(sd, dma_addr, mapping); 423 424 d->addr_lo = cpu_to_be32(mapping); 425 d->addr_hi = cpu_to_be32((u64) mapping >> 32); 426 wmb(); 427 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); 428 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); 429 return 0; 430} 431 432static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, 433 unsigned int gen) 434{ 435 d->addr_lo = cpu_to_be32(mapping); 436 d->addr_hi = cpu_to_be32((u64) mapping >> 32); 437 wmb(); 438 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); 439 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); 440 return 0; 441} 442 443static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, 444 struct rx_sw_desc *sd, gfp_t gfp, 445 unsigned int order) 446{ 447 if (!q->pg_chunk.page) { 448 dma_addr_t mapping; 449 450 q->pg_chunk.page = alloc_pages(gfp, order); 451 if (unlikely(!q->pg_chunk.page)) 452 return -ENOMEM; 453 q->pg_chunk.va = page_address(q->pg_chunk.page); 454 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - 455 SGE_PG_RSVD; 456 q->pg_chunk.offset = 0; 457 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 458 0, q->alloc_size, PCI_DMA_FROMDEVICE); 459 q->pg_chunk.mapping = mapping; 460 } 461 sd->pg_chunk = q->pg_chunk; 462 463 prefetch(sd->pg_chunk.p_cnt); 464 465 q->pg_chunk.offset += q->buf_size; 466 if (q->pg_chunk.offset == (PAGE_SIZE << order)) 467 q->pg_chunk.page = NULL; 468 else { 469 q->pg_chunk.va += q->buf_size; 470 get_page(q->pg_chunk.page); 471 } 472 473 if (sd->pg_chunk.offset == 0) 474 *sd->pg_chunk.p_cnt = 1; 475 else 476 *sd->pg_chunk.p_cnt += 1; 477 478 return 0; 479} 480 481static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) 482{ 483 if (q->pend_cred >= q->credits / 4) { 484 q->pend_cred = 0; 485 wmb(); 486 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); 487 } 488} 489 490/** 491 * refill_fl - refill an SGE free-buffer list 492 * @adapter: the adapter 493 * @q: the free-list to refill 494 * @n: the number of new buffers to allocate 495 * @gfp: the gfp flags for allocating new buffers 496 * 497 * (Re)populate an SGE free-buffer list with up to @n new packet buffers, 498 * allocated with the supplied gfp flags. The caller must assure that 499 * @n does not exceed the queue's capacity. 500 */ 501static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) 502{ 503 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 504 struct rx_desc *d = &q->desc[q->pidx]; 505 unsigned int count = 0; 506 507 while (n--) { 508 dma_addr_t mapping; 509 int err; 510 511 if (q->use_pages) { 512 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, 513 q->order))) { 514nomem: q->alloc_failed++; 515 break; 516 } 517 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; 518 dma_unmap_addr_set(sd, dma_addr, mapping); 519 520 add_one_rx_chunk(mapping, d, q->gen); 521 pci_dma_sync_single_for_device(adap->pdev, mapping, 522 q->buf_size - SGE_PG_RSVD, 523 PCI_DMA_FROMDEVICE); 524 } else { 525 void *buf_start; 526 527 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); 528 if (!skb) 529 goto nomem; 530 531 sd->skb = skb; 532 buf_start = skb->data; 533 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, 534 q->gen, adap->pdev); 535 if (unlikely(err)) { 536 clear_rx_desc(adap->pdev, q, sd); 537 break; 538 } 539 } 540 541 d++; 542 sd++; 543 if (++q->pidx == q->size) { 544 q->pidx = 0; 545 q->gen ^= 1; 546 sd = q->sdesc; 547 d = q->desc; 548 } 549 count++; 550 } 551 552 q->credits += count; 553 q->pend_cred += count; 554 ring_fl_db(adap, q); 555 556 return count; 557} 558 559static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) 560{ 561 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), 562 GFP_ATOMIC | __GFP_COMP); 563} 564 565/** 566 * recycle_rx_buf - recycle a receive buffer 567 * @adapter: the adapter 568 * @q: the SGE free list 569 * @idx: index of buffer to recycle 570 * 571 * Recycles the specified buffer on the given free list by adding it at 572 * the next available slot on the list. 573 */ 574static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, 575 unsigned int idx) 576{ 577 struct rx_desc *from = &q->desc[idx]; 578 struct rx_desc *to = &q->desc[q->pidx]; 579 580 q->sdesc[q->pidx] = q->sdesc[idx]; 581 to->addr_lo = from->addr_lo; /* already big endian */ 582 to->addr_hi = from->addr_hi; /* likewise */ 583 wmb(); 584 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); 585 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); 586 587 if (++q->pidx == q->size) { 588 q->pidx = 0; 589 q->gen ^= 1; 590 } 591 592 q->credits++; 593 q->pend_cred++; 594 ring_fl_db(adap, q); 595} 596 597/** 598 * alloc_ring - allocate resources for an SGE descriptor ring 599 * @pdev: the PCI device 600 * @nelem: the number of descriptors 601 * @elem_size: the size of each descriptor 602 * @sw_size: the size of the SW state associated with each ring element 603 * @phys: the physical address of the allocated ring 604 * @metadata: address of the array holding the SW state for the ring 605 * 606 * Allocates resources for an SGE descriptor ring, such as Tx queues, 607 * free buffer lists, or response queues. Each SGE ring requires 608 * space for its HW descriptors plus, optionally, space for the SW state 609 * associated with each HW entry (the metadata). The function returns 610 * three values: the virtual address for the HW ring (the return value 611 * of the function), the physical address of the HW ring, and the address 612 * of the SW ring. 613 */ 614static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, 615 size_t sw_size, dma_addr_t * phys, void *metadata) 616{ 617 size_t len = nelem * elem_size; 618 void *s = NULL; 619 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); 620 621 if (!p) 622 return NULL; 623 if (sw_size && metadata) { 624 s = kcalloc(nelem, sw_size, GFP_KERNEL); 625 626 if (!s) { 627 dma_free_coherent(&pdev->dev, len, p, *phys); 628 return NULL; 629 } 630 *(void **)metadata = s; 631 } 632 memset(p, 0, len); 633 return p; 634} 635 636/** 637 * t3_reset_qset - reset a sge qset 638 * @q: the queue set 639 * 640 * Reset the qset structure. 641 * the NAPI structure is preserved in the event of 642 * the qset's reincarnation, for example during EEH recovery. 643 */ 644static void t3_reset_qset(struct sge_qset *q) 645{ 646 if (q->adap && 647 !(q->adap->flags & NAPI_INIT)) { 648 memset(q, 0, sizeof(*q)); 649 return; 650 } 651 652 q->adap = NULL; 653 memset(&q->rspq, 0, sizeof(q->rspq)); 654 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); 655 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); 656 q->txq_stopped = 0; 657 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ 658 q->rx_reclaim_timer.function = NULL; 659 q->nomem = 0; 660 napi_free_frags(&q->napi); 661} 662 663 664/** 665 * free_qset - free the resources of an SGE queue set 666 * @adapter: the adapter owning the queue set 667 * @q: the queue set 668 * 669 * Release the HW and SW resources associated with an SGE queue set, such 670 * as HW contexts, packet buffers, and descriptor rings. Traffic to the 671 * queue set must be quiesced prior to calling this. 672 */ 673static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) 674{ 675 int i; 676 struct pci_dev *pdev = adapter->pdev; 677 678 for (i = 0; i < SGE_RXQ_PER_SET; ++i) 679 if (q->fl[i].desc) { 680 spin_lock_irq(&adapter->sge.reg_lock); 681 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); 682 spin_unlock_irq(&adapter->sge.reg_lock); 683 free_rx_bufs(pdev, &q->fl[i]); 684 kfree(q->fl[i].sdesc); 685 dma_free_coherent(&pdev->dev, 686 q->fl[i].size * 687 sizeof(struct rx_desc), q->fl[i].desc, 688 q->fl[i].phys_addr); 689 } 690 691 for (i = 0; i < SGE_TXQ_PER_SET; ++i) 692 if (q->txq[i].desc) { 693 spin_lock_irq(&adapter->sge.reg_lock); 694 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); 695 spin_unlock_irq(&adapter->sge.reg_lock); 696 if (q->txq[i].sdesc) { 697 free_tx_desc(adapter, &q->txq[i], 698 q->txq[i].in_use); 699 kfree(q->txq[i].sdesc); 700 } 701 dma_free_coherent(&pdev->dev, 702 q->txq[i].size * 703 sizeof(struct tx_desc), 704 q->txq[i].desc, q->txq[i].phys_addr); 705 __skb_queue_purge(&q->txq[i].sendq); 706 } 707 708 if (q->rspq.desc) { 709 spin_lock_irq(&adapter->sge.reg_lock); 710 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); 711 spin_unlock_irq(&adapter->sge.reg_lock); 712 dma_free_coherent(&pdev->dev, 713 q->rspq.size * sizeof(struct rsp_desc), 714 q->rspq.desc, q->rspq.phys_addr); 715 } 716 717 t3_reset_qset(q); 718} 719 720/** 721 * init_qset_cntxt - initialize an SGE queue set context info 722 * @qs: the queue set 723 * @id: the queue set id 724 * 725 * Initializes the TIDs and context ids for the queues of a queue set. 726 */ 727static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) 728{ 729 qs->rspq.cntxt_id = id; 730 qs->fl[0].cntxt_id = 2 * id; 731 qs->fl[1].cntxt_id = 2 * id + 1; 732 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; 733 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; 734 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; 735 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; 736 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; 737} 738 739/** 740 * sgl_len - calculates the size of an SGL of the given capacity 741 * @n: the number of SGL entries 742 * 743 * Calculates the number of flits needed for a scatter/gather list that 744 * can hold the given number of entries. 745 */ 746static inline unsigned int sgl_len(unsigned int n) 747{ 748 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ 749 return (3 * n) / 2 + (n & 1); 750} 751 752/** 753 * flits_to_desc - returns the num of Tx descriptors for the given flits 754 * @n: the number of flits 755 * 756 * Calculates the number of Tx descriptors needed for the supplied number 757 * of flits. 758 */ 759static inline unsigned int flits_to_desc(unsigned int n) 760{ 761 BUG_ON(n >= ARRAY_SIZE(flit_desc_map)); 762 return flit_desc_map[n]; 763} 764 765/** 766 * get_packet - return the next ingress packet buffer from a free list 767 * @adap: the adapter that received the packet 768 * @fl: the SGE free list holding the packet 769 * @len: the packet length including any SGE padding 770 * @drop_thres: # of remaining buffers before we start dropping packets 771 * 772 * Get the next packet from a free list and complete setup of the 773 * sk_buff. If the packet is small we make a copy and recycle the 774 * original buffer, otherwise we use the original buffer itself. If a 775 * positive drop threshold is supplied packets are dropped and their 776 * buffers recycled if (a) the number of remaining buffers is under the 777 * threshold and the packet is too big to copy, or (b) the packet should 778 * be copied but there is no memory for the copy. 779 */ 780static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, 781 unsigned int len, unsigned int drop_thres) 782{ 783 struct sk_buff *skb = NULL; 784 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 785 786 prefetch(sd->skb->data); 787 fl->credits--; 788 789 if (len <= SGE_RX_COPY_THRES) { 790 skb = alloc_skb(len, GFP_ATOMIC); 791 if (likely(skb != NULL)) { 792 __skb_put(skb, len); 793 pci_dma_sync_single_for_cpu(adap->pdev, 794 dma_unmap_addr(sd, dma_addr), len, 795 PCI_DMA_FROMDEVICE); 796 memcpy(skb->data, sd->skb->data, len); 797 pci_dma_sync_single_for_device(adap->pdev, 798 dma_unmap_addr(sd, dma_addr), len, 799 PCI_DMA_FROMDEVICE); 800 } else if (!drop_thres) 801 goto use_orig_buf; 802recycle: 803 recycle_rx_buf(adap, fl, fl->cidx); 804 return skb; 805 } 806 807 if (unlikely(fl->credits < drop_thres) && 808 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), 809 GFP_ATOMIC | __GFP_COMP) == 0) 810 goto recycle; 811 812use_orig_buf: 813 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), 814 fl->buf_size, PCI_DMA_FROMDEVICE); 815 skb = sd->skb; 816 skb_put(skb, len); 817 __refill_fl(adap, fl); 818 return skb; 819} 820 821/** 822 * get_packet_pg - return the next ingress packet buffer from a free list 823 * @adap: the adapter that received the packet 824 * @fl: the SGE free list holding the packet 825 * @len: the packet length including any SGE padding 826 * @drop_thres: # of remaining buffers before we start dropping packets 827 * 828 * Get the next packet from a free list populated with page chunks. 829 * If the packet is small we make a copy and recycle the original buffer, 830 * otherwise we attach the original buffer as a page fragment to a fresh 831 * sk_buff. If a positive drop threshold is supplied packets are dropped 832 * and their buffers recycled if (a) the number of remaining buffers is 833 * under the threshold and the packet is too big to copy, or (b) there's 834 * no system memory. 835 * 836 * Note: this function is similar to @get_packet but deals with Rx buffers 837 * that are page chunks rather than sk_buffs. 838 */ 839static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, 840 struct sge_rspq *q, unsigned int len, 841 unsigned int drop_thres) 842{ 843 struct sk_buff *newskb, *skb; 844 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 845 846 dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr); 847 848 newskb = skb = q->pg_skb; 849 if (!skb && (len <= SGE_RX_COPY_THRES)) { 850 newskb = alloc_skb(len, GFP_ATOMIC); 851 if (likely(newskb != NULL)) { 852 __skb_put(newskb, len); 853 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, 854 PCI_DMA_FROMDEVICE); 855 memcpy(newskb->data, sd->pg_chunk.va, len); 856 pci_dma_sync_single_for_device(adap->pdev, dma_addr, 857 len, 858 PCI_DMA_FROMDEVICE); 859 } else if (!drop_thres) 860 return NULL; 861recycle: 862 fl->credits--; 863 recycle_rx_buf(adap, fl, fl->cidx); 864 q->rx_recycle_buf++; 865 return newskb; 866 } 867 868 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) 869 goto recycle; 870 871 prefetch(sd->pg_chunk.p_cnt); 872 873 if (!skb) 874 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); 875 876 if (unlikely(!newskb)) { 877 if (!drop_thres) 878 return NULL; 879 goto recycle; 880 } 881 882 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, 883 PCI_DMA_FROMDEVICE); 884 (*sd->pg_chunk.p_cnt)--; 885 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) 886 pci_unmap_page(adap->pdev, 887 sd->pg_chunk.mapping, 888 fl->alloc_size, 889 PCI_DMA_FROMDEVICE); 890 if (!skb) { 891 __skb_put(newskb, SGE_RX_PULL_LEN); 892 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); 893 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page, 894 sd->pg_chunk.offset + SGE_RX_PULL_LEN, 895 len - SGE_RX_PULL_LEN); 896 newskb->len = len; 897 newskb->data_len = len - SGE_RX_PULL_LEN; 898 newskb->truesize += newskb->data_len; 899 } else { 900 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags, 901 sd->pg_chunk.page, 902 sd->pg_chunk.offset, len); 903 newskb->len += len; 904 newskb->data_len += len; 905 newskb->truesize += len; 906 } 907 908 fl->credits--; 909 /* 910 * We do not refill FLs here, we let the caller do it to overlap a 911 * prefetch. 912 */ 913 return newskb; 914} 915 916/** 917 * get_imm_packet - return the next ingress packet buffer from a response 918 * @resp: the response descriptor containing the packet data 919 * 920 * Return a packet containing the immediate data of the given response. 921 */ 922static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) 923{ 924 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC); 925 926 if (skb) { 927 __skb_put(skb, IMMED_PKT_SIZE); 928 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); 929 } 930 return skb; 931} 932 933/** 934 * calc_tx_descs - calculate the number of Tx descriptors for a packet 935 * @skb: the packet 936 * 937 * Returns the number of Tx descriptors needed for the given Ethernet 938 * packet. Ethernet packets require addition of WR and CPL headers. 939 */ 940static inline unsigned int calc_tx_descs(const struct sk_buff *skb) 941{ 942 unsigned int flits; 943 944 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt)) 945 return 1; 946 947 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; 948 if (skb_shinfo(skb)->gso_size) 949 flits++; 950 return flits_to_desc(flits); 951} 952 953/** 954 * make_sgl - populate a scatter/gather list for a packet 955 * @skb: the packet 956 * @sgp: the SGL to populate 957 * @start: start address of skb main body data to include in the SGL 958 * @len: length of skb main body data to include in the SGL 959 * @pdev: the PCI device 960 * 961 * Generates a scatter/gather list for the buffers that make up a packet 962 * and returns the SGL size in 8-byte words. The caller must size the SGL 963 * appropriately. 964 */ 965static inline unsigned int make_sgl(const struct sk_buff *skb, 966 struct sg_ent *sgp, unsigned char *start, 967 unsigned int len, struct pci_dev *pdev) 968{ 969 dma_addr_t mapping; 970 unsigned int i, j = 0, nfrags; 971 972 if (len) { 973 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); 974 sgp->len[0] = cpu_to_be32(len); 975 sgp->addr[0] = cpu_to_be64(mapping); 976 j = 1; 977 } 978 979 nfrags = skb_shinfo(skb)->nr_frags; 980 for (i = 0; i < nfrags; i++) { 981 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 982 983 mapping = pci_map_page(pdev, frag->page, frag->page_offset, 984 frag->size, PCI_DMA_TODEVICE); 985 sgp->len[j] = cpu_to_be32(frag->size); 986 sgp->addr[j] = cpu_to_be64(mapping); 987 j ^= 1; 988 if (j == 0) 989 ++sgp; 990 } 991 if (j) 992 sgp->len[j] = 0; 993 return ((nfrags + (len != 0)) * 3) / 2 + j; 994} 995 996/** 997 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell 998 * @adap: the adapter 999 * @q: the Tx queue 1000 * 1001 * Ring the doorbel if a Tx queue is asleep. There is a natural race, 1002 * where the HW is going to sleep just after we checked, however, 1003 * then the interrupt handler will detect the outstanding TX packet 1004 * and ring the doorbell for us. 1005 * 1006 * When GTS is disabled we unconditionally ring the doorbell. 1007 */ 1008static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) 1009{ 1010#if USE_GTS 1011 clear_bit(TXQ_LAST_PKT_DB, &q->flags); 1012 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { 1013 set_bit(TXQ_LAST_PKT_DB, &q->flags); 1014 t3_write_reg(adap, A_SG_KDOORBELL, 1015 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1016 } 1017#else 1018 wmb(); /* write descriptors before telling HW */ 1019 t3_write_reg(adap, A_SG_KDOORBELL, 1020 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1021#endif 1022} 1023 1024static inline void wr_gen2(struct tx_desc *d, unsigned int gen) 1025{ 1026#if SGE_NUM_GENBITS == 2 1027 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); 1028#endif 1029} 1030 1031/** 1032 * write_wr_hdr_sgl - write a WR header and, optionally, SGL 1033 * @ndesc: number of Tx descriptors spanned by the SGL 1034 * @skb: the packet corresponding to the WR 1035 * @d: first Tx descriptor to be written 1036 * @pidx: index of above descriptors 1037 * @q: the SGE Tx queue 1038 * @sgl: the SGL 1039 * @flits: number of flits to the start of the SGL in the first descriptor 1040 * @sgl_flits: the SGL size in flits 1041 * @gen: the Tx descriptor generation 1042 * @wr_hi: top 32 bits of WR header based on WR type (big endian) 1043 * @wr_lo: low 32 bits of WR header based on WR type (big endian) 1044 * 1045 * Write a work request header and an associated SGL. If the SGL is 1046 * small enough to fit into one Tx descriptor it has already been written 1047 * and we just need to write the WR header. Otherwise we distribute the 1048 * SGL across the number of descriptors it spans. 1049 */ 1050static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, 1051 struct tx_desc *d, unsigned int pidx, 1052 const struct sge_txq *q, 1053 const struct sg_ent *sgl, 1054 unsigned int flits, unsigned int sgl_flits, 1055 unsigned int gen, __be32 wr_hi, 1056 __be32 wr_lo) 1057{ 1058 struct work_request_hdr *wrp = (struct work_request_hdr *)d; 1059 struct tx_sw_desc *sd = &q->sdesc[pidx]; 1060 1061 sd->skb = skb; 1062 if (need_skb_unmap()) { 1063 sd->fragidx = 0; 1064 sd->addr_idx = 0; 1065 sd->sflit = flits; 1066 } 1067 1068 if (likely(ndesc == 1)) { 1069 sd->eop = 1; 1070 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | 1071 V_WR_SGLSFLT(flits)) | wr_hi; 1072 wmb(); 1073 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | 1074 V_WR_GEN(gen)) | wr_lo; 1075 wr_gen2(d, gen); 1076 } else { 1077 unsigned int ogen = gen; 1078 const u64 *fp = (const u64 *)sgl; 1079 struct work_request_hdr *wp = wrp; 1080 1081 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | 1082 V_WR_SGLSFLT(flits)) | wr_hi; 1083 1084 while (sgl_flits) { 1085 unsigned int avail = WR_FLITS - flits; 1086 1087 if (avail > sgl_flits) 1088 avail = sgl_flits; 1089 memcpy(&d->flit[flits], fp, avail * sizeof(*fp)); 1090 sgl_flits -= avail; 1091 ndesc--; 1092 if (!sgl_flits) 1093 break; 1094 1095 fp += avail; 1096 d++; 1097 sd->eop = 0; 1098 sd++; 1099 if (++pidx == q->size) { 1100 pidx = 0; 1101 gen ^= 1; 1102 d = q->desc; 1103 sd = q->sdesc; 1104 } 1105 1106 sd->skb = skb; 1107 wrp = (struct work_request_hdr *)d; 1108 wrp->wr_hi = htonl(V_WR_DATATYPE(1) | 1109 V_WR_SGLSFLT(1)) | wr_hi; 1110 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS, 1111 sgl_flits + 1)) | 1112 V_WR_GEN(gen)) | wr_lo; 1113 wr_gen2(d, gen); 1114 flits = 1; 1115 } 1116 sd->eop = 1; 1117 wrp->wr_hi |= htonl(F_WR_EOP); 1118 wmb(); 1119 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; 1120 wr_gen2((struct tx_desc *)wp, ogen); 1121 WARN_ON(ndesc != 0); 1122 } 1123} 1124 1125/** 1126 * write_tx_pkt_wr - write a TX_PKT work request 1127 * @adap: the adapter 1128 * @skb: the packet to send 1129 * @pi: the egress interface 1130 * @pidx: index of the first Tx descriptor to write 1131 * @gen: the generation value to use 1132 * @q: the Tx queue 1133 * @ndesc: number of descriptors the packet will occupy 1134 * @compl: the value of the COMPL bit to use 1135 * 1136 * Generate a TX_PKT work request to send the supplied packet. 1137 */ 1138static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, 1139 const struct port_info *pi, 1140 unsigned int pidx, unsigned int gen, 1141 struct sge_txq *q, unsigned int ndesc, 1142 unsigned int compl) 1143{ 1144 unsigned int flits, sgl_flits, cntrl, tso_info; 1145 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; 1146 struct tx_desc *d = &q->desc[pidx]; 1147 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; 1148 1149 cpl->len = htonl(skb->len); 1150 cntrl = V_TXPKT_INTF(pi->port_id); 1151 1152 if (vlan_tx_tag_present(skb) && pi->vlan_grp) 1153 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb)); 1154 1155 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); 1156 if (tso_info) { 1157 int eth_type; 1158 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl; 1159 1160 d->flit[2] = 0; 1161 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); 1162 hdr->cntrl = htonl(cntrl); 1163 eth_type = skb_network_offset(skb) == ETH_HLEN ? 1164 CPL_ETH_II : CPL_ETH_II_VLAN; 1165 tso_info |= V_LSO_ETH_TYPE(eth_type) | 1166 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | 1167 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); 1168 hdr->lso_info = htonl(tso_info); 1169 flits = 3; 1170 } else { 1171 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); 1172 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */ 1173 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL); 1174 cpl->cntrl = htonl(cntrl); 1175 1176 if (skb->len <= WR_LEN - sizeof(*cpl)) { 1177 q->sdesc[pidx].skb = NULL; 1178 if (!skb->data_len) 1179 skb_copy_from_linear_data(skb, &d->flit[2], 1180 skb->len); 1181 else 1182 skb_copy_bits(skb, 0, &d->flit[2], skb->len); 1183 1184 flits = (skb->len + 7) / 8 + 2; 1185 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) | 1186 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) 1187 | F_WR_SOP | F_WR_EOP | compl); 1188 wmb(); 1189 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | 1190 V_WR_TID(q->token)); 1191 wr_gen2(d, gen); 1192 kfree_skb(skb); 1193 return; 1194 } 1195 1196 flits = 2; 1197 } 1198 1199 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1200 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); 1201 1202 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, 1203 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), 1204 htonl(V_WR_TID(q->token))); 1205} 1206 1207static inline void t3_stop_tx_queue(struct netdev_queue *txq, 1208 struct sge_qset *qs, struct sge_txq *q) 1209{ 1210 netif_tx_stop_queue(txq); 1211 set_bit(TXQ_ETH, &qs->txq_stopped); 1212 q->stops++; 1213} 1214 1215/** 1216 * eth_xmit - add a packet to the Ethernet Tx queue 1217 * @skb: the packet 1218 * @dev: the egress net device 1219 * 1220 * Add a packet to an SGE Tx queue. Runs with softirqs disabled. 1221 */ 1222netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) 1223{ 1224 int qidx; 1225 unsigned int ndesc, pidx, credits, gen, compl; 1226 const struct port_info *pi = netdev_priv(dev); 1227 struct adapter *adap = pi->adapter; 1228 struct netdev_queue *txq; 1229 struct sge_qset *qs; 1230 struct sge_txq *q; 1231 1232 /* 1233 * The chip min packet length is 9 octets but play safe and reject 1234 * anything shorter than an Ethernet header. 1235 */ 1236 if (unlikely(skb->len < ETH_HLEN)) { 1237 dev_kfree_skb(skb); 1238 return NETDEV_TX_OK; 1239 } 1240 1241 qidx = skb_get_queue_mapping(skb); 1242 qs = &pi->qs[qidx]; 1243 q = &qs->txq[TXQ_ETH]; 1244 txq = netdev_get_tx_queue(dev, qidx); 1245 1246 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1247 1248 credits = q->size - q->in_use; 1249 ndesc = calc_tx_descs(skb); 1250 1251 if (unlikely(credits < ndesc)) { 1252 t3_stop_tx_queue(txq, qs, q); 1253 dev_err(&adap->pdev->dev, 1254 "%s: Tx ring %u full while queue awake!\n", 1255 dev->name, q->cntxt_id & 7); 1256 return NETDEV_TX_BUSY; 1257 } 1258 1259 q->in_use += ndesc; 1260 if (unlikely(credits - ndesc < q->stop_thres)) { 1261 t3_stop_tx_queue(txq, qs, q); 1262 1263 if (should_restart_tx(q) && 1264 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { 1265 q->restarts++; 1266 netif_tx_start_queue(txq); 1267 } 1268 } 1269 1270 gen = q->gen; 1271 q->unacked += ndesc; 1272 compl = (q->unacked & 8) << (S_WR_COMPL - 3); 1273 q->unacked &= 7; 1274 pidx = q->pidx; 1275 q->pidx += ndesc; 1276 if (q->pidx >= q->size) { 1277 q->pidx -= q->size; 1278 q->gen ^= 1; 1279 } 1280 1281 /* update port statistics */ 1282 if (skb->ip_summed == CHECKSUM_COMPLETE) 1283 qs->port_stats[SGE_PSTAT_TX_CSUM]++; 1284 if (skb_shinfo(skb)->gso_size) 1285 qs->port_stats[SGE_PSTAT_TSO]++; 1286 if (vlan_tx_tag_present(skb) && pi->vlan_grp) 1287 qs->port_stats[SGE_PSTAT_VLANINS]++; 1288 1289 /* 1290 * We do not use Tx completion interrupts to free DMAd Tx packets. 1291 * This is good for performance but means that we rely on new Tx 1292 * packets arriving to run the destructors of completed packets, 1293 * which open up space in their sockets' send queues. Sometimes 1294 * we do not get such new packets causing Tx to stall. A single 1295 * UDP transmitter is a good example of this situation. We have 1296 * a clean up timer that periodically reclaims completed packets 1297 * but it doesn't run often enough (nor do we want it to) to prevent 1298 * lengthy stalls. A solution to this problem is to run the 1299 * destructor early, after the packet is queued but before it's DMAd. 1300 * A cons is that we lie to socket memory accounting, but the amount 1301 * of extra memory is reasonable (limited by the number of Tx 1302 * descriptors), the packets do actually get freed quickly by new 1303 * packets almost always, and for protocols like TCP that wait for 1304 * acks to really free up the data the extra memory is even less. 1305 * On the positive side we run the destructors on the sending CPU 1306 * rather than on a potentially different completing CPU, usually a 1307 * good thing. We also run them without holding our Tx queue lock, 1308 * unlike what reclaim_completed_tx() would otherwise do. 1309 * 1310 * Run the destructor before telling the DMA engine about the packet 1311 * to make sure it doesn't complete and get freed prematurely. 1312 */ 1313 if (likely(!skb_shared(skb))) 1314 skb_orphan(skb); 1315 1316 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); 1317 check_ring_tx_db(adap, q); 1318 return NETDEV_TX_OK; 1319} 1320 1321/** 1322 * write_imm - write a packet into a Tx descriptor as immediate data 1323 * @d: the Tx descriptor to write 1324 * @skb: the packet 1325 * @len: the length of packet data to write as immediate data 1326 * @gen: the generation bit value to write 1327 * 1328 * Writes a packet as immediate data into a Tx descriptor. The packet 1329 * contains a work request at its beginning. We must write the packet 1330 * carefully so the SGE doesn't read it accidentally before it's written 1331 * in its entirety. 1332 */ 1333static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, 1334 unsigned int len, unsigned int gen) 1335{ 1336 struct work_request_hdr *from = (struct work_request_hdr *)skb->data; 1337 struct work_request_hdr *to = (struct work_request_hdr *)d; 1338 1339 if (likely(!skb->data_len)) 1340 memcpy(&to[1], &from[1], len - sizeof(*from)); 1341 else 1342 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); 1343 1344 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | 1345 V_WR_BCNTLFLT(len & 7)); 1346 wmb(); 1347 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | 1348 V_WR_LEN((len + 7) / 8)); 1349 wr_gen2(d, gen); 1350 kfree_skb(skb); 1351} 1352 1353/** 1354 * check_desc_avail - check descriptor availability on a send queue 1355 * @adap: the adapter 1356 * @q: the send queue 1357 * @skb: the packet needing the descriptors 1358 * @ndesc: the number of Tx descriptors needed 1359 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL) 1360 * 1361 * Checks if the requested number of Tx descriptors is available on an 1362 * SGE send queue. If the queue is already suspended or not enough 1363 * descriptors are available the packet is queued for later transmission. 1364 * Must be called with the Tx queue locked. 1365 * 1366 * Returns 0 if enough descriptors are available, 1 if there aren't 1367 * enough descriptors and the packet has been queued, and 2 if the caller 1368 * needs to retry because there weren't enough descriptors at the 1369 * beginning of the call but some freed up in the mean time. 1370 */ 1371static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, 1372 struct sk_buff *skb, unsigned int ndesc, 1373 unsigned int qid) 1374{ 1375 if (unlikely(!skb_queue_empty(&q->sendq))) { 1376 addq_exit:__skb_queue_tail(&q->sendq, skb); 1377 return 1; 1378 } 1379 if (unlikely(q->size - q->in_use < ndesc)) { 1380 struct sge_qset *qs = txq_to_qset(q, qid); 1381 1382 set_bit(qid, &qs->txq_stopped); 1383 smp_mb__after_clear_bit(); 1384 1385 if (should_restart_tx(q) && 1386 test_and_clear_bit(qid, &qs->txq_stopped)) 1387 return 2; 1388 1389 q->stops++; 1390 goto addq_exit; 1391 } 1392 return 0; 1393} 1394 1395/** 1396 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs 1397 * @q: the SGE control Tx queue 1398 * 1399 * This is a variant of reclaim_completed_tx() that is used for Tx queues 1400 * that send only immediate data (presently just the control queues) and 1401 * thus do not have any sk_buffs to release. 1402 */ 1403static inline void reclaim_completed_tx_imm(struct sge_txq *q) 1404{ 1405 unsigned int reclaim = q->processed - q->cleaned; 1406 1407 q->in_use -= reclaim; 1408 q->cleaned += reclaim; 1409} 1410 1411static inline int immediate(const struct sk_buff *skb) 1412{ 1413 return skb->len <= WR_LEN; 1414} 1415 1416/** 1417 * ctrl_xmit - send a packet through an SGE control Tx queue 1418 * @adap: the adapter 1419 * @q: the control queue 1420 * @skb: the packet 1421 * 1422 * Send a packet through an SGE control Tx queue. Packets sent through 1423 * a control queue must fit entirely as immediate data in a single Tx 1424 * descriptor and have no page fragments. 1425 */ 1426static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, 1427 struct sk_buff *skb) 1428{ 1429 int ret; 1430 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data; 1431 1432 if (unlikely(!immediate(skb))) { 1433 WARN_ON(1); 1434 dev_kfree_skb(skb); 1435 return NET_XMIT_SUCCESS; 1436 } 1437 1438 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP); 1439 wrp->wr_lo = htonl(V_WR_TID(q->token)); 1440 1441 spin_lock(&q->lock); 1442 again:reclaim_completed_tx_imm(q); 1443 1444 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); 1445 if (unlikely(ret)) { 1446 if (ret == 1) { 1447 spin_unlock(&q->lock); 1448 return NET_XMIT_CN; 1449 } 1450 goto again; 1451 } 1452 1453 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); 1454 1455 q->in_use++; 1456 if (++q->pidx >= q->size) { 1457 q->pidx = 0; 1458 q->gen ^= 1; 1459 } 1460 spin_unlock(&q->lock); 1461 wmb(); 1462 t3_write_reg(adap, A_SG_KDOORBELL, 1463 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1464 return NET_XMIT_SUCCESS; 1465} 1466 1467/** 1468 * restart_ctrlq - restart a suspended control queue 1469 * @qs: the queue set cotaining the control queue 1470 * 1471 * Resumes transmission on a suspended Tx control queue. 1472 */ 1473static void restart_ctrlq(unsigned long data) 1474{ 1475 struct sk_buff *skb; 1476 struct sge_qset *qs = (struct sge_qset *)data; 1477 struct sge_txq *q = &qs->txq[TXQ_CTRL]; 1478 1479 spin_lock(&q->lock); 1480 again:reclaim_completed_tx_imm(q); 1481 1482 while (q->in_use < q->size && 1483 (skb = __skb_dequeue(&q->sendq)) != NULL) { 1484 1485 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); 1486 1487 if (++q->pidx >= q->size) { 1488 q->pidx = 0; 1489 q->gen ^= 1; 1490 } 1491 q->in_use++; 1492 } 1493 1494 if (!skb_queue_empty(&q->sendq)) { 1495 set_bit(TXQ_CTRL, &qs->txq_stopped); 1496 smp_mb__after_clear_bit(); 1497 1498 if (should_restart_tx(q) && 1499 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) 1500 goto again; 1501 q->stops++; 1502 } 1503 1504 spin_unlock(&q->lock); 1505 wmb(); 1506 t3_write_reg(qs->adap, A_SG_KDOORBELL, 1507 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1508} 1509 1510/* 1511 * Send a management message through control queue 0 1512 */ 1513int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) 1514{ 1515 int ret; 1516 local_bh_disable(); 1517 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); 1518 local_bh_enable(); 1519 1520 return ret; 1521} 1522 1523/** 1524 * deferred_unmap_destructor - unmap a packet when it is freed 1525 * @skb: the packet 1526 * 1527 * This is the packet destructor used for Tx packets that need to remain 1528 * mapped until they are freed rather than until their Tx descriptors are 1529 * freed. 1530 */ 1531static void deferred_unmap_destructor(struct sk_buff *skb) 1532{ 1533 int i; 1534 const dma_addr_t *p; 1535 const struct skb_shared_info *si; 1536 const struct deferred_unmap_info *dui; 1537 1538 dui = (struct deferred_unmap_info *)skb->head; 1539 p = dui->addr; 1540 1541 if (skb->tail - skb->transport_header) 1542 pci_unmap_single(dui->pdev, *p++, 1543 skb->tail - skb->transport_header, 1544 PCI_DMA_TODEVICE); 1545 1546 si = skb_shinfo(skb); 1547 for (i = 0; i < si->nr_frags; i++) 1548 pci_unmap_page(dui->pdev, *p++, si->frags[i].size, 1549 PCI_DMA_TODEVICE); 1550} 1551 1552static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, 1553 const struct sg_ent *sgl, int sgl_flits) 1554{ 1555 dma_addr_t *p; 1556 struct deferred_unmap_info *dui; 1557 1558 dui = (struct deferred_unmap_info *)skb->head; 1559 dui->pdev = pdev; 1560 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) { 1561 *p++ = be64_to_cpu(sgl->addr[0]); 1562 *p++ = be64_to_cpu(sgl->addr[1]); 1563 } 1564 if (sgl_flits) 1565 *p = be64_to_cpu(sgl->addr[0]); 1566} 1567 1568/** 1569 * write_ofld_wr - write an offload work request 1570 * @adap: the adapter 1571 * @skb: the packet to send 1572 * @q: the Tx queue 1573 * @pidx: index of the first Tx descriptor to write 1574 * @gen: the generation value to use 1575 * @ndesc: number of descriptors the packet will occupy 1576 * 1577 * Write an offload work request to send the supplied packet. The packet 1578 * data already carry the work request with most fields populated. 1579 */ 1580static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, 1581 struct sge_txq *q, unsigned int pidx, 1582 unsigned int gen, unsigned int ndesc) 1583{ 1584 unsigned int sgl_flits, flits; 1585 struct work_request_hdr *from; 1586 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; 1587 struct tx_desc *d = &q->desc[pidx]; 1588 1589 if (immediate(skb)) { 1590 q->sdesc[pidx].skb = NULL; 1591 write_imm(d, skb, skb->len, gen); 1592 return; 1593 } 1594 1595 /* Only TX_DATA builds SGLs */ 1596 1597 from = (struct work_request_hdr *)skb->data; 1598 memcpy(&d->flit[1], &from[1], 1599 skb_transport_offset(skb) - sizeof(*from)); 1600 1601 flits = skb_transport_offset(skb) / 8; 1602 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1603 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), 1604 skb->tail - skb->transport_header, 1605 adap->pdev); 1606 if (need_skb_unmap()) { 1607 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); 1608 skb->destructor = deferred_unmap_destructor; 1609 } 1610 1611 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, 1612 gen, from->wr_hi, from->wr_lo); 1613} 1614 1615/** 1616 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet 1617 * @skb: the packet 1618 * 1619 * Returns the number of Tx descriptors needed for the given offload 1620 * packet. These packets are already fully constructed. 1621 */ 1622static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) 1623{ 1624 unsigned int flits, cnt; 1625 1626 if (skb->len <= WR_LEN) 1627 return 1; /* packet fits as immediate data */ 1628 1629 flits = skb_transport_offset(skb) / 8; /* headers */ 1630 cnt = skb_shinfo(skb)->nr_frags; 1631 if (skb->tail != skb->transport_header) 1632 cnt++; 1633 return flits_to_desc(flits + sgl_len(cnt)); 1634} 1635 1636/** 1637 * ofld_xmit - send a packet through an offload queue 1638 * @adap: the adapter 1639 * @q: the Tx offload queue 1640 * @skb: the packet 1641 * 1642 * Send an offload packet through an SGE offload queue. 1643 */ 1644static int ofld_xmit(struct adapter *adap, struct sge_txq *q, 1645 struct sk_buff *skb) 1646{ 1647 int ret; 1648 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen; 1649 1650 spin_lock(&q->lock); 1651again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1652 1653 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); 1654 if (unlikely(ret)) { 1655 if (ret == 1) { 1656 skb->priority = ndesc; /* save for restart */ 1657 spin_unlock(&q->lock); 1658 return NET_XMIT_CN; 1659 } 1660 goto again; 1661 } 1662 1663 gen = q->gen; 1664 q->in_use += ndesc; 1665 pidx = q->pidx; 1666 q->pidx += ndesc; 1667 if (q->pidx >= q->size) { 1668 q->pidx -= q->size; 1669 q->gen ^= 1; 1670 } 1671 spin_unlock(&q->lock); 1672 1673 write_ofld_wr(adap, skb, q, pidx, gen, ndesc); 1674 check_ring_tx_db(adap, q); 1675 return NET_XMIT_SUCCESS; 1676} 1677 1678/** 1679 * restart_offloadq - restart a suspended offload queue 1680 * @qs: the queue set cotaining the offload queue 1681 * 1682 * Resumes transmission on a suspended Tx offload queue. 1683 */ 1684static void restart_offloadq(unsigned long data) 1685{ 1686 struct sk_buff *skb; 1687 struct sge_qset *qs = (struct sge_qset *)data; 1688 struct sge_txq *q = &qs->txq[TXQ_OFLD]; 1689 const struct port_info *pi = netdev_priv(qs->netdev); 1690 struct adapter *adap = pi->adapter; 1691 1692 spin_lock(&q->lock); 1693again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1694 1695 while ((skb = skb_peek(&q->sendq)) != NULL) { 1696 unsigned int gen, pidx; 1697 unsigned int ndesc = skb->priority; 1698 1699 if (unlikely(q->size - q->in_use < ndesc)) { 1700 set_bit(TXQ_OFLD, &qs->txq_stopped); 1701 smp_mb__after_clear_bit(); 1702 1703 if (should_restart_tx(q) && 1704 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) 1705 goto again; 1706 q->stops++; 1707 break; 1708 } 1709 1710 gen = q->gen; 1711 q->in_use += ndesc; 1712 pidx = q->pidx; 1713 q->pidx += ndesc; 1714 if (q->pidx >= q->size) { 1715 q->pidx -= q->size; 1716 q->gen ^= 1; 1717 } 1718 __skb_unlink(skb, &q->sendq); 1719 spin_unlock(&q->lock); 1720 1721 write_ofld_wr(adap, skb, q, pidx, gen, ndesc); 1722 spin_lock(&q->lock); 1723 } 1724 spin_unlock(&q->lock); 1725 1726#if USE_GTS 1727 set_bit(TXQ_RUNNING, &q->flags); 1728 set_bit(TXQ_LAST_PKT_DB, &q->flags); 1729#endif 1730 wmb(); 1731 t3_write_reg(adap, A_SG_KDOORBELL, 1732 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); 1733} 1734 1735/** 1736 * queue_set - return the queue set a packet should use 1737 * @skb: the packet 1738 * 1739 * Maps a packet to the SGE queue set it should use. The desired queue 1740 * set is carried in bits 1-3 in the packet's priority. 1741 */ 1742static inline int queue_set(const struct sk_buff *skb) 1743{ 1744 return skb->priority >> 1; 1745} 1746 1747/** 1748 * is_ctrl_pkt - return whether an offload packet is a control packet 1749 * @skb: the packet 1750 * 1751 * Determines whether an offload packet should use an OFLD or a CTRL 1752 * Tx queue. This is indicated by bit 0 in the packet's priority. 1753 */ 1754static inline int is_ctrl_pkt(const struct sk_buff *skb) 1755{ 1756 return skb->priority & 1; 1757} 1758 1759/** 1760 * t3_offload_tx - send an offload packet 1761 * @tdev: the offload device to send to 1762 * @skb: the packet 1763 * 1764 * Sends an offload packet. We use the packet priority to select the 1765 * appropriate Tx queue as follows: bit 0 indicates whether the packet 1766 * should be sent as regular or control, bits 1-3 select the queue set. 1767 */ 1768int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb) 1769{ 1770 struct adapter *adap = tdev2adap(tdev); 1771 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; 1772 1773 if (unlikely(is_ctrl_pkt(skb))) 1774 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb); 1775 1776 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb); 1777} 1778 1779/** 1780 * offload_enqueue - add an offload packet to an SGE offload receive queue 1781 * @q: the SGE response queue 1782 * @skb: the packet 1783 * 1784 * Add a new offload packet to an SGE response queue's offload packet 1785 * queue. If the packet is the first on the queue it schedules the RX 1786 * softirq to process the queue. 1787 */ 1788static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) 1789{ 1790 int was_empty = skb_queue_empty(&q->rx_queue); 1791 1792 __skb_queue_tail(&q->rx_queue, skb); 1793 1794 if (was_empty) { 1795 struct sge_qset *qs = rspq_to_qset(q); 1796 1797 napi_schedule(&qs->napi); 1798 } 1799} 1800 1801/** 1802 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts 1803 * @tdev: the offload device that will be receiving the packets 1804 * @q: the SGE response queue that assembled the bundle 1805 * @skbs: the partial bundle 1806 * @n: the number of packets in the bundle 1807 * 1808 * Delivers a (partial) bundle of Rx offload packets to an offload device. 1809 */ 1810static inline void deliver_partial_bundle(struct t3cdev *tdev, 1811 struct sge_rspq *q, 1812 struct sk_buff *skbs[], int n) 1813{ 1814 if (n) { 1815 q->offload_bundles++; 1816 tdev->recv(tdev, skbs, n); 1817 } 1818} 1819 1820/** 1821 * ofld_poll - NAPI handler for offload packets in interrupt mode 1822 * @dev: the network device doing the polling 1823 * @budget: polling budget 1824 * 1825 * The NAPI handler for offload packets when a response queue is serviced 1826 * by the hard interrupt handler, i.e., when it's operating in non-polling 1827 * mode. Creates small packet batches and sends them through the offload 1828 * receive handler. Batches need to be of modest size as we do prefetches 1829 * on the packets in each. 1830 */ 1831static int ofld_poll(struct napi_struct *napi, int budget) 1832{ 1833 struct sge_qset *qs = container_of(napi, struct sge_qset, napi); 1834 struct sge_rspq *q = &qs->rspq; 1835 struct adapter *adapter = qs->adap; 1836 int work_done = 0; 1837 1838 while (work_done < budget) { 1839 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE]; 1840 struct sk_buff_head queue; 1841 int ngathered; 1842 1843 spin_lock_irq(&q->lock); 1844 __skb_queue_head_init(&queue); 1845 skb_queue_splice_init(&q->rx_queue, &queue); 1846 if (skb_queue_empty(&queue)) { 1847 napi_complete(napi); 1848 spin_unlock_irq(&q->lock); 1849 return work_done; 1850 } 1851 spin_unlock_irq(&q->lock); 1852 1853 ngathered = 0; 1854 skb_queue_walk_safe(&queue, skb, tmp) { 1855 if (work_done >= budget) 1856 break; 1857 work_done++; 1858 1859 __skb_unlink(skb, &queue); 1860 prefetch(skb->data); 1861 skbs[ngathered] = skb; 1862 if (++ngathered == RX_BUNDLE_SIZE) { 1863 q->offload_bundles++; 1864 adapter->tdev.recv(&adapter->tdev, skbs, 1865 ngathered); 1866 ngathered = 0; 1867 } 1868 } 1869 if (!skb_queue_empty(&queue)) { 1870 /* splice remaining packets back onto Rx queue */ 1871 spin_lock_irq(&q->lock); 1872 skb_queue_splice(&queue, &q->rx_queue); 1873 spin_unlock_irq(&q->lock); 1874 } 1875 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); 1876 } 1877 1878 return work_done; 1879} 1880 1881/** 1882 * rx_offload - process a received offload packet 1883 * @tdev: the offload device receiving the packet 1884 * @rq: the response queue that received the packet 1885 * @skb: the packet 1886 * @rx_gather: a gather list of packets if we are building a bundle 1887 * @gather_idx: index of the next available slot in the bundle 1888 * 1889 * Process an ingress offload pakcet and add it to the offload ingress 1890 * queue. Returns the index of the next available slot in the bundle. 1891 */ 1892static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, 1893 struct sk_buff *skb, struct sk_buff *rx_gather[], 1894 unsigned int gather_idx) 1895{ 1896 skb_reset_mac_header(skb); 1897 skb_reset_network_header(skb); 1898 skb_reset_transport_header(skb); 1899 1900 if (rq->polling) { 1901 rx_gather[gather_idx++] = skb; 1902 if (gather_idx == RX_BUNDLE_SIZE) { 1903 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE); 1904 gather_idx = 0; 1905 rq->offload_bundles++; 1906 } 1907 } else 1908 offload_enqueue(rq, skb); 1909 1910 return gather_idx; 1911} 1912 1913/** 1914 * restart_tx - check whether to restart suspended Tx queues 1915 * @qs: the queue set to resume 1916 * 1917 * Restarts suspended Tx queues of an SGE queue set if they have enough 1918 * free resources to resume operation. 1919 */ 1920static void restart_tx(struct sge_qset *qs) 1921{ 1922 if (test_bit(TXQ_ETH, &qs->txq_stopped) && 1923 should_restart_tx(&qs->txq[TXQ_ETH]) && 1924 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { 1925 qs->txq[TXQ_ETH].restarts++; 1926 if (netif_running(qs->netdev)) 1927 netif_tx_wake_queue(qs->tx_q); 1928 } 1929 1930 if (test_bit(TXQ_OFLD, &qs->txq_stopped) && 1931 should_restart_tx(&qs->txq[TXQ_OFLD]) && 1932 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { 1933 qs->txq[TXQ_OFLD].restarts++; 1934 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk); 1935 } 1936 if (test_bit(TXQ_CTRL, &qs->txq_stopped) && 1937 should_restart_tx(&qs->txq[TXQ_CTRL]) && 1938 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { 1939 qs->txq[TXQ_CTRL].restarts++; 1940 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk); 1941 } 1942} 1943 1944/** 1945 * cxgb3_arp_process - process an ARP request probing a private IP address 1946 * @adapter: the adapter 1947 * @skb: the skbuff containing the ARP request 1948 * 1949 * Check if the ARP request is probing the private IP address 1950 * dedicated to iSCSI, generate an ARP reply if so. 1951 */ 1952static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb) 1953{ 1954 struct net_device *dev = skb->dev; 1955 struct arphdr *arp; 1956 unsigned char *arp_ptr; 1957 unsigned char *sha; 1958 __be32 sip, tip; 1959 1960 if (!dev) 1961 return; 1962 1963 skb_reset_network_header(skb); 1964 arp = arp_hdr(skb); 1965 1966 if (arp->ar_op != htons(ARPOP_REQUEST)) 1967 return; 1968 1969 arp_ptr = (unsigned char *)(arp + 1); 1970 sha = arp_ptr; 1971 arp_ptr += dev->addr_len; 1972 memcpy(&sip, arp_ptr, sizeof(sip)); 1973 arp_ptr += sizeof(sip); 1974 arp_ptr += dev->addr_len; 1975 memcpy(&tip, arp_ptr, sizeof(tip)); 1976 1977 if (tip != pi->iscsi_ipv4addr) 1978 return; 1979 1980 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 1981 pi->iscsic.mac_addr, sha); 1982 1983} 1984 1985static inline int is_arp(struct sk_buff *skb) 1986{ 1987 return skb->protocol == htons(ETH_P_ARP); 1988} 1989 1990static void cxgb3_process_iscsi_prov_pack(struct port_info *pi, 1991 struct sk_buff *skb) 1992{ 1993 if (is_arp(skb)) { 1994 cxgb3_arp_process(pi, skb); 1995 return; 1996 } 1997 1998 if (pi->iscsic.recv) 1999 pi->iscsic.recv(pi, skb); 2000 2001} 2002 2003/** 2004 * rx_eth - process an ingress ethernet packet 2005 * @adap: the adapter 2006 * @rq: the response queue that received the packet 2007 * @skb: the packet 2008 * @pad: amount of padding at the start of the buffer 2009 * 2010 * Process an ingress ethernet pakcet and deliver it to the stack. 2011 * The padding is 2 if the packet was delivered in an Rx buffer and 0 2012 * if it was immediate data in a response. 2013 */ 2014static void rx_eth(struct adapter *adap, struct sge_rspq *rq, 2015 struct sk_buff *skb, int pad, int lro) 2016{ 2017 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); 2018 struct sge_qset *qs = rspq_to_qset(rq); 2019 struct port_info *pi; 2020 2021 skb_pull(skb, sizeof(*p) + pad); 2022 skb->protocol = eth_type_trans(skb, adap->port[p->iff]); 2023 pi = netdev_priv(skb->dev); 2024 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && 2025 p->csum == htons(0xffff) && !p->fragment) { 2026 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2027 skb->ip_summed = CHECKSUM_UNNECESSARY; 2028 } else 2029 skb->ip_summed = CHECKSUM_NONE; 2030 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2031 2032 if (unlikely(p->vlan_valid)) { 2033 struct vlan_group *grp = pi->vlan_grp; 2034 2035 qs->port_stats[SGE_PSTAT_VLANEX]++; 2036 if (likely(grp)) 2037 if (lro) 2038 vlan_gro_receive(&qs->napi, grp, 2039 ntohs(p->vlan), skb); 2040 else { 2041 if (unlikely(pi->iscsic.flags)) { 2042 unsigned short vtag = ntohs(p->vlan) & 2043 VLAN_VID_MASK; 2044 skb->dev = vlan_group_get_device(grp, 2045 vtag); 2046 cxgb3_process_iscsi_prov_pack(pi, skb); 2047 } 2048 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan), 2049 rq->polling); 2050 } 2051 else 2052 dev_kfree_skb_any(skb); 2053 } else if (rq->polling) { 2054 if (lro) 2055 napi_gro_receive(&qs->napi, skb); 2056 else { 2057 if (unlikely(pi->iscsic.flags)) 2058 cxgb3_process_iscsi_prov_pack(pi, skb); 2059 netif_receive_skb(skb); 2060 } 2061 } else 2062 netif_rx(skb); 2063} 2064 2065static inline int is_eth_tcp(u32 rss) 2066{ 2067 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE; 2068} 2069 2070/** 2071 * lro_add_page - add a page chunk to an LRO session 2072 * @adap: the adapter 2073 * @qs: the associated queue set 2074 * @fl: the free list containing the page chunk to add 2075 * @len: packet length 2076 * @complete: Indicates the last fragment of a frame 2077 * 2078 * Add a received packet contained in a page chunk to an existing LRO 2079 * session. 2080 */ 2081static void lro_add_page(struct adapter *adap, struct sge_qset *qs, 2082 struct sge_fl *fl, int len, int complete) 2083{ 2084 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2085 struct port_info *pi = netdev_priv(qs->netdev); 2086 struct sk_buff *skb = NULL; 2087 struct cpl_rx_pkt *cpl; 2088 struct skb_frag_struct *rx_frag; 2089 int nr_frags; 2090 int offset = 0; 2091 2092 if (!qs->nomem) { 2093 skb = napi_get_frags(&qs->napi); 2094 qs->nomem = !skb; 2095 } 2096 2097 fl->credits--; 2098 2099 pci_dma_sync_single_for_cpu(adap->pdev, 2100 dma_unmap_addr(sd, dma_addr), 2101 fl->buf_size - SGE_PG_RSVD, 2102 PCI_DMA_FROMDEVICE); 2103 2104 (*sd->pg_chunk.p_cnt)--; 2105 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) 2106 pci_unmap_page(adap->pdev, 2107 sd->pg_chunk.mapping, 2108 fl->alloc_size, 2109 PCI_DMA_FROMDEVICE); 2110 2111 if (!skb) { 2112 put_page(sd->pg_chunk.page); 2113 if (complete) 2114 qs->nomem = 0; 2115 return; 2116 } 2117 2118 rx_frag = skb_shinfo(skb)->frags; 2119 nr_frags = skb_shinfo(skb)->nr_frags; 2120 2121 if (!nr_frags) { 2122 offset = 2 + sizeof(struct cpl_rx_pkt); 2123 cpl = qs->lro_va = sd->pg_chunk.va + 2; 2124 2125 if ((pi->rx_offload & T3_RX_CSUM) && 2126 cpl->csum_valid && cpl->csum == htons(0xffff)) { 2127 skb->ip_summed = CHECKSUM_UNNECESSARY; 2128 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2129 } else 2130 skb->ip_summed = CHECKSUM_NONE; 2131 } else 2132 cpl = qs->lro_va; 2133 2134 len -= offset; 2135 2136 rx_frag += nr_frags; 2137 rx_frag->page = sd->pg_chunk.page; 2138 rx_frag->page_offset = sd->pg_chunk.offset + offset; 2139 rx_frag->size = len; 2140 2141 skb->len += len; 2142 skb->data_len += len; 2143 skb->truesize += len; 2144 skb_shinfo(skb)->nr_frags++; 2145 2146 if (!complete) 2147 return; 2148 2149 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2150 2151 if (unlikely(cpl->vlan_valid)) { 2152 struct vlan_group *grp = pi->vlan_grp; 2153 2154 if (likely(grp != NULL)) { 2155 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan)); 2156 return; 2157 } 2158 } 2159 napi_gro_frags(&qs->napi); 2160} 2161 2162/** 2163 * handle_rsp_cntrl_info - handles control information in a response 2164 * @qs: the queue set corresponding to the response 2165 * @flags: the response control flags 2166 * 2167 * Handles the control information of an SGE response, such as GTS 2168 * indications and completion credits for the queue set's Tx queues. 2169 * HW coalesces credits, we don't do any extra SW coalescing. 2170 */ 2171static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) 2172{ 2173 unsigned int credits; 2174 2175#if USE_GTS 2176 if (flags & F_RSPD_TXQ0_GTS) 2177 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); 2178#endif 2179 2180 credits = G_RSPD_TXQ0_CR(flags); 2181 if (credits) 2182 qs->txq[TXQ_ETH].processed += credits; 2183 2184 credits = G_RSPD_TXQ2_CR(flags); 2185 if (credits) 2186 qs->txq[TXQ_CTRL].processed += credits; 2187 2188# if USE_GTS 2189 if (flags & F_RSPD_TXQ1_GTS) 2190 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); 2191# endif 2192 credits = G_RSPD_TXQ1_CR(flags); 2193 if (credits) 2194 qs->txq[TXQ_OFLD].processed += credits; 2195} 2196 2197/** 2198 * check_ring_db - check if we need to ring any doorbells 2199 * @adapter: the adapter 2200 * @qs: the queue set whose Tx queues are to be examined 2201 * @sleeping: indicates which Tx queue sent GTS 2202 * 2203 * Checks if some of a queue set's Tx queues need to ring their doorbells 2204 * to resume transmission after idling while they still have unprocessed 2205 * descriptors. 2206 */ 2207static void check_ring_db(struct adapter *adap, struct sge_qset *qs, 2208 unsigned int sleeping) 2209{ 2210 if (sleeping & F_RSPD_TXQ0_GTS) { 2211 struct sge_txq *txq = &qs->txq[TXQ_ETH]; 2212 2213 if (txq->cleaned + txq->in_use != txq->processed && 2214 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { 2215 set_bit(TXQ_RUNNING, &txq->flags); 2216 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | 2217 V_EGRCNTX(txq->cntxt_id)); 2218 } 2219 } 2220 2221 if (sleeping & F_RSPD_TXQ1_GTS) { 2222 struct sge_txq *txq = &qs->txq[TXQ_OFLD]; 2223 2224 if (txq->cleaned + txq->in_use != txq->processed && 2225 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { 2226 set_bit(TXQ_RUNNING, &txq->flags); 2227 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | 2228 V_EGRCNTX(txq->cntxt_id)); 2229 } 2230 } 2231} 2232 2233/** 2234 * is_new_response - check if a response is newly written 2235 * @r: the response descriptor 2236 * @q: the response queue 2237 * 2238 * Returns true if a response descriptor contains a yet unprocessed 2239 * response. 2240 */ 2241static inline int is_new_response(const struct rsp_desc *r, 2242 const struct sge_rspq *q) 2243{ 2244 return (r->intr_gen & F_RSPD_GEN2) == q->gen; 2245} 2246 2247static inline void clear_rspq_bufstate(struct sge_rspq * const q) 2248{ 2249 q->pg_skb = NULL; 2250 q->rx_recycle_buf = 0; 2251} 2252 2253#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) 2254#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ 2255 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ 2256 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \ 2257 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR)) 2258 2259/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */ 2260#define NOMEM_INTR_DELAY 2500 2261 2262/** 2263 * process_responses - process responses from an SGE response queue 2264 * @adap: the adapter 2265 * @qs: the queue set to which the response queue belongs 2266 * @budget: how many responses can be processed in this round 2267 * 2268 * Process responses from an SGE response queue up to the supplied budget. 2269 * Responses include received packets as well as credits and other events 2270 * for the queues that belong to the response queue's queue set. 2271 * A negative budget is effectively unlimited. 2272 * 2273 * Additionally choose the interrupt holdoff time for the next interrupt 2274 * on this queue. If the system is under memory shortage use a fairly 2275 * long delay to help recovery. 2276 */ 2277static int process_responses(struct adapter *adap, struct sge_qset *qs, 2278 int budget) 2279{ 2280 struct sge_rspq *q = &qs->rspq; 2281 struct rsp_desc *r = &q->desc[q->cidx]; 2282 int budget_left = budget; 2283 unsigned int sleeping = 0; 2284 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE]; 2285 int ngathered = 0; 2286 2287 q->next_holdoff = q->holdoff_tmr; 2288 2289 while (likely(budget_left && is_new_response(r, q))) { 2290 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled; 2291 struct sk_buff *skb = NULL; 2292 u32 len, flags; 2293 __be32 rss_hi, rss_lo; 2294 2295 rmb(); 2296 eth = r->rss_hdr.opcode == CPL_RX_PKT; 2297 rss_hi = *(const __be32 *)r; 2298 rss_lo = r->rss_hdr.rss_hash_val; 2299 flags = ntohl(r->flags); 2300 2301 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) { 2302 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC); 2303 if (!skb) 2304 goto no_mem; 2305 2306 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE); 2307 skb->data[0] = CPL_ASYNC_NOTIF; 2308 rss_hi = htonl(CPL_ASYNC_NOTIF << 24); 2309 q->async_notif++; 2310 } else if (flags & F_RSPD_IMM_DATA_VALID) { 2311 skb = get_imm_packet(r); 2312 if (unlikely(!skb)) { 2313no_mem: 2314 q->next_holdoff = NOMEM_INTR_DELAY; 2315 q->nomem++; 2316 /* consume one credit since we tried */ 2317 budget_left--; 2318 break; 2319 } 2320 q->imm_data++; 2321 ethpad = 0; 2322 } else if ((len = ntohl(r->len_cq)) != 0) { 2323 struct sge_fl *fl; 2324 2325 lro &= eth && is_eth_tcp(rss_hi); 2326 2327 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; 2328 if (fl->use_pages) { 2329 void *addr = fl->sdesc[fl->cidx].pg_chunk.va; 2330 2331 prefetch(addr); 2332#if L1_CACHE_BYTES < 128 2333 prefetch(addr + L1_CACHE_BYTES); 2334#endif 2335 __refill_fl(adap, fl); 2336 if (lro > 0) { 2337 lro_add_page(adap, qs, fl, 2338 G_RSPD_LEN(len), 2339 flags & F_RSPD_EOP); 2340 goto next_fl; 2341 } 2342 2343 skb = get_packet_pg(adap, fl, q, 2344 G_RSPD_LEN(len), 2345 eth ? 2346 SGE_RX_DROP_THRES : 0); 2347 q->pg_skb = skb; 2348 } else 2349 skb = get_packet(adap, fl, G_RSPD_LEN(len), 2350 eth ? SGE_RX_DROP_THRES : 0); 2351 if (unlikely(!skb)) { 2352 if (!eth) 2353 goto no_mem; 2354 q->rx_drops++; 2355 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) 2356 __skb_pull(skb, 2); 2357next_fl: 2358 if (++fl->cidx == fl->size) 2359 fl->cidx = 0; 2360 } else 2361 q->pure_rsps++; 2362 2363 if (flags & RSPD_CTRL_MASK) { 2364 sleeping |= flags & RSPD_GTS_MASK; 2365 handle_rsp_cntrl_info(qs, flags); 2366 } 2367 2368 r++; 2369 if (unlikely(++q->cidx == q->size)) { 2370 q->cidx = 0; 2371 q->gen ^= 1; 2372 r = q->desc; 2373 } 2374 prefetch(r); 2375 2376 if (++q->credits >= (q->size / 4)) { 2377 refill_rspq(adap, q, q->credits); 2378 q->credits = 0; 2379 } 2380 2381 packet_complete = flags & 2382 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID | 2383 F_RSPD_ASYNC_NOTIF); 2384 2385 if (skb != NULL && packet_complete) { 2386 if (eth) 2387 rx_eth(adap, q, skb, ethpad, lro); 2388 else { 2389 q->offload_pkts++; 2390 /* Preserve the RSS info in csum & priority */ 2391 skb->csum = rss_hi; 2392 skb->priority = rss_lo; 2393 ngathered = rx_offload(&adap->tdev, q, skb, 2394 offload_skbs, 2395 ngathered); 2396 } 2397 2398 if (flags & F_RSPD_EOP) 2399 clear_rspq_bufstate(q); 2400 } 2401 --budget_left; 2402 } 2403 2404 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); 2405 2406 if (sleeping) 2407 check_ring_db(adap, qs, sleeping); 2408 2409 smp_mb(); /* commit Tx queue .processed updates */ 2410 if (unlikely(qs->txq_stopped != 0)) 2411 restart_tx(qs); 2412 2413 budget -= budget_left; 2414 return budget; 2415} 2416 2417static inline int is_pure_response(const struct rsp_desc *r) 2418{ 2419 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID); 2420 2421 return (n | r->len_cq) == 0; 2422} 2423 2424/** 2425 * napi_rx_handler - the NAPI handler for Rx processing 2426 * @napi: the napi instance 2427 * @budget: how many packets we can process in this round 2428 * 2429 * Handler for new data events when using NAPI. 2430 */ 2431static int napi_rx_handler(struct napi_struct *napi, int budget) 2432{ 2433 struct sge_qset *qs = container_of(napi, struct sge_qset, napi); 2434 struct adapter *adap = qs->adap; 2435 int work_done = process_responses(adap, qs, budget); 2436 2437 if (likely(work_done < budget)) { 2438 napi_complete(napi); 2439 2440 /* 2441 * Because we don't atomically flush the following 2442 * write it is possible that in very rare cases it can 2443 * reach the device in a way that races with a new 2444 * response being written plus an error interrupt 2445 * causing the NAPI interrupt handler below to return 2446 * unhandled status to the OS. To protect against 2447 * this would require flushing the write and doing 2448 * both the write and the flush with interrupts off. 2449 * Way too expensive and unjustifiable given the 2450 * rarity of the race. 2451 * 2452 * The race cannot happen at all with MSI-X. 2453 */ 2454 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | 2455 V_NEWTIMER(qs->rspq.next_holdoff) | 2456 V_NEWINDEX(qs->rspq.cidx)); 2457 } 2458 return work_done; 2459} 2460 2461/* 2462 * Returns true if the device is already scheduled for polling. 2463 */ 2464static inline int napi_is_scheduled(struct napi_struct *napi) 2465{ 2466 return test_bit(NAPI_STATE_SCHED, &napi->state); 2467} 2468 2469/** 2470 * process_pure_responses - process pure responses from a response queue 2471 * @adap: the adapter 2472 * @qs: the queue set owning the response queue 2473 * @r: the first pure response to process 2474 * 2475 * A simpler version of process_responses() that handles only pure (i.e., 2476 * non data-carrying) responses. Such respones are too light-weight to 2477 * justify calling a softirq under NAPI, so we handle them specially in 2478 * the interrupt handler. The function is called with a pointer to a 2479 * response, which the caller must ensure is a valid pure response. 2480 * 2481 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise. 2482 */ 2483static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, 2484 struct rsp_desc *r) 2485{ 2486 struct sge_rspq *q = &qs->rspq; 2487 unsigned int sleeping = 0; 2488 2489 do { 2490 u32 flags = ntohl(r->flags); 2491 2492 r++; 2493 if (unlikely(++q->cidx == q->size)) { 2494 q->cidx = 0; 2495 q->gen ^= 1; 2496 r = q->desc; 2497 } 2498 prefetch(r); 2499 2500 if (flags & RSPD_CTRL_MASK) { 2501 sleeping |= flags & RSPD_GTS_MASK; 2502 handle_rsp_cntrl_info(qs, flags); 2503 } 2504 2505 q->pure_rsps++; 2506 if (++q->credits >= (q->size / 4)) { 2507 refill_rspq(adap, q, q->credits); 2508 q->credits = 0; 2509 } 2510 if (!is_new_response(r, q)) 2511 break; 2512 rmb(); 2513 } while (is_pure_response(r)); 2514 2515 if (sleeping) 2516 check_ring_db(adap, qs, sleeping); 2517 2518 smp_mb(); /* commit Tx queue .processed updates */ 2519 if (unlikely(qs->txq_stopped != 0)) 2520 restart_tx(qs); 2521 2522 return is_new_response(r, q); 2523} 2524 2525/** 2526 * handle_responses - decide what to do with new responses in NAPI mode 2527 * @adap: the adapter 2528 * @q: the response queue 2529 * 2530 * This is used by the NAPI interrupt handlers to decide what to do with 2531 * new SGE responses. If there are no new responses it returns -1. If 2532 * there are new responses and they are pure (i.e., non-data carrying) 2533 * it handles them straight in hard interrupt context as they are very 2534 * cheap and don't deliver any packets. Finally, if there are any data 2535 * signaling responses it schedules the NAPI handler. Returns 1 if it 2536 * schedules NAPI, 0 if all new responses were pure. 2537 * 2538 * The caller must ascertain NAPI is not already running. 2539 */ 2540static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) 2541{ 2542 struct sge_qset *qs = rspq_to_qset(q); 2543 struct rsp_desc *r = &q->desc[q->cidx]; 2544 2545 if (!is_new_response(r, q)) 2546 return -1; 2547 rmb(); 2548 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { 2549 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | 2550 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); 2551 return 0; 2552 } 2553 napi_schedule(&qs->napi); 2554 return 1; 2555} 2556 2557/* 2558 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case 2559 * (i.e., response queue serviced in hard interrupt). 2560 */ 2561irqreturn_t t3_sge_intr_msix(int irq, void *cookie) 2562{ 2563 struct sge_qset *qs = cookie; 2564 struct adapter *adap = qs->adap; 2565 struct sge_rspq *q = &qs->rspq; 2566 2567 spin_lock(&q->lock); 2568 if (process_responses(adap, qs, -1) == 0) 2569 q->unhandled_irqs++; 2570 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | 2571 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); 2572 spin_unlock(&q->lock); 2573 return IRQ_HANDLED; 2574} 2575 2576/* 2577 * The MSI-X interrupt handler for an SGE response queue for the NAPI case 2578 * (i.e., response queue serviced by NAPI polling). 2579 */ 2580static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) 2581{ 2582 struct sge_qset *qs = cookie; 2583 struct sge_rspq *q = &qs->rspq; 2584 2585 spin_lock(&q->lock); 2586 2587 if (handle_responses(qs->adap, q) < 0) 2588 q->unhandled_irqs++; 2589 spin_unlock(&q->lock); 2590 return IRQ_HANDLED; 2591} 2592 2593/* 2594 * The non-NAPI MSI interrupt handler. This needs to handle data events from 2595 * SGE response queues as well as error and other async events as they all use 2596 * the same MSI vector. We use one SGE response queue per port in this mode 2597 * and protect all response queues with queue 0's lock. 2598 */ 2599static irqreturn_t t3_intr_msi(int irq, void *cookie) 2600{ 2601 int new_packets = 0; 2602 struct adapter *adap = cookie; 2603 struct sge_rspq *q = &adap->sge.qs[0].rspq; 2604 2605 spin_lock(&q->lock); 2606 2607 if (process_responses(adap, &adap->sge.qs[0], -1)) { 2608 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | 2609 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); 2610 new_packets = 1; 2611 } 2612 2613 if (adap->params.nports == 2 && 2614 process_responses(adap, &adap->sge.qs[1], -1)) { 2615 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; 2616 2617 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) | 2618 V_NEWTIMER(q1->next_holdoff) | 2619 V_NEWINDEX(q1->cidx)); 2620 new_packets = 1; 2621 } 2622 2623 if (!new_packets && t3_slow_intr_handler(adap) == 0) 2624 q->unhandled_irqs++; 2625 2626 spin_unlock(&q->lock); 2627 return IRQ_HANDLED; 2628} 2629 2630static int rspq_check_napi(struct sge_qset *qs) 2631{ 2632 struct sge_rspq *q = &qs->rspq; 2633 2634 if (!napi_is_scheduled(&qs->napi) && 2635 is_new_response(&q->desc[q->cidx], q)) { 2636 napi_schedule(&qs->napi); 2637 return 1; 2638 } 2639 return 0; 2640} 2641 2642/* 2643 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced 2644 * by NAPI polling). Handles data events from SGE response queues as well as 2645 * error and other async events as they all use the same MSI vector. We use 2646 * one SGE response queue per port in this mode and protect all response 2647 * queues with queue 0's lock. 2648 */ 2649static irqreturn_t t3_intr_msi_napi(int irq, void *cookie) 2650{ 2651 int new_packets; 2652 struct adapter *adap = cookie; 2653 struct sge_rspq *q = &adap->sge.qs[0].rspq; 2654 2655 spin_lock(&q->lock); 2656 2657 new_packets = rspq_check_napi(&adap->sge.qs[0]); 2658 if (adap->params.nports == 2) 2659 new_packets += rspq_check_napi(&adap->sge.qs[1]); 2660 if (!new_packets && t3_slow_intr_handler(adap) == 0) 2661 q->unhandled_irqs++; 2662 2663 spin_unlock(&q->lock); 2664 return IRQ_HANDLED; 2665} 2666 2667/* 2668 * A helper function that processes responses and issues GTS. 2669 */ 2670static inline int process_responses_gts(struct adapter *adap, 2671 struct sge_rspq *rq) 2672{ 2673 int work; 2674 2675 work = process_responses(adap, rspq_to_qset(rq), -1); 2676 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) | 2677 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx)); 2678 return work; 2679} 2680 2681/* 2682 * The legacy INTx interrupt handler. This needs to handle data events from 2683 * SGE response queues as well as error and other async events as they all use 2684 * the same interrupt pin. We use one SGE response queue per port in this mode 2685 * and protect all response queues with queue 0's lock. 2686 */ 2687static irqreturn_t t3_intr(int irq, void *cookie) 2688{ 2689 int work_done, w0, w1; 2690 struct adapter *adap = cookie; 2691 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; 2692 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; 2693 2694 spin_lock(&q0->lock); 2695 2696 w0 = is_new_response(&q0->desc[q0->cidx], q0); 2697 w1 = adap->params.nports == 2 && 2698 is_new_response(&q1->desc[q1->cidx], q1); 2699 2700 if (likely(w0 | w1)) { 2701 t3_write_reg(adap, A_PL_CLI, 0); 2702 t3_read_reg(adap, A_PL_CLI); /* flush */ 2703 2704 if (likely(w0)) 2705 process_responses_gts(adap, q0); 2706 2707 if (w1) 2708 process_responses_gts(adap, q1); 2709 2710 work_done = w0 | w1; 2711 } else 2712 work_done = t3_slow_intr_handler(adap); 2713 2714 spin_unlock(&q0->lock); 2715 return IRQ_RETVAL(work_done != 0); 2716} 2717 2718/* 2719 * Interrupt handler for legacy INTx interrupts for T3B-based cards. 2720 * Handles data events from SGE response queues as well as error and other 2721 * async events as they all use the same interrupt pin. We use one SGE 2722 * response queue per port in this mode and protect all response queues with 2723 * queue 0's lock. 2724 */ 2725static irqreturn_t t3b_intr(int irq, void *cookie) 2726{ 2727 u32 map; 2728 struct adapter *adap = cookie; 2729 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; 2730 2731 t3_write_reg(adap, A_PL_CLI, 0); 2732 map = t3_read_reg(adap, A_SG_DATA_INTR); 2733 2734 if (unlikely(!map)) /* shared interrupt, most likely */ 2735 return IRQ_NONE; 2736 2737 spin_lock(&q0->lock); 2738 2739 if (unlikely(map & F_ERRINTR)) 2740 t3_slow_intr_handler(adap); 2741 2742 if (likely(map & 1)) 2743 process_responses_gts(adap, q0); 2744 2745 if (map & 2) 2746 process_responses_gts(adap, &adap->sge.qs[1].rspq); 2747 2748 spin_unlock(&q0->lock); 2749 return IRQ_HANDLED; 2750} 2751 2752/* 2753 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards. 2754 * Handles data events from SGE response queues as well as error and other 2755 * async events as they all use the same interrupt pin. We use one SGE 2756 * response queue per port in this mode and protect all response queues with 2757 * queue 0's lock. 2758 */ 2759static irqreturn_t t3b_intr_napi(int irq, void *cookie) 2760{ 2761 u32 map; 2762 struct adapter *adap = cookie; 2763 struct sge_qset *qs0 = &adap->sge.qs[0]; 2764 struct sge_rspq *q0 = &qs0->rspq; 2765 2766 t3_write_reg(adap, A_PL_CLI, 0); 2767 map = t3_read_reg(adap, A_SG_DATA_INTR); 2768 2769 if (unlikely(!map)) /* shared interrupt, most likely */ 2770 return IRQ_NONE; 2771 2772 spin_lock(&q0->lock); 2773 2774 if (unlikely(map & F_ERRINTR)) 2775 t3_slow_intr_handler(adap); 2776 2777 if (likely(map & 1)) 2778 napi_schedule(&qs0->napi); 2779 2780 if (map & 2) 2781 napi_schedule(&adap->sge.qs[1].napi); 2782 2783 spin_unlock(&q0->lock); 2784 return IRQ_HANDLED; 2785} 2786 2787/** 2788 * t3_intr_handler - select the top-level interrupt handler 2789 * @adap: the adapter 2790 * @polling: whether using NAPI to service response queues 2791 * 2792 * Selects the top-level interrupt handler based on the type of interrupts 2793 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the 2794 * response queues. 2795 */ 2796irq_handler_t t3_intr_handler(struct adapter *adap, int polling) 2797{ 2798 if (adap->flags & USING_MSIX) 2799 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix; 2800 if (adap->flags & USING_MSI) 2801 return polling ? t3_intr_msi_napi : t3_intr_msi; 2802 if (adap->params.rev > 0) 2803 return polling ? t3b_intr_napi : t3b_intr; 2804 return t3_intr; 2805} 2806 2807#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ 2808 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ 2809 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ 2810 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ 2811 F_HIRCQPARITYERROR) 2812#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR) 2813#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \ 2814 F_RSPQDISABLED) 2815 2816/** 2817 * t3_sge_err_intr_handler - SGE async event interrupt handler 2818 * @adapter: the adapter 2819 * 2820 * Interrupt handler for SGE asynchronous (non-data) events. 2821 */ 2822void t3_sge_err_intr_handler(struct adapter *adapter) 2823{ 2824 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) & 2825 ~F_FLEMPTY; 2826 2827 if (status & SGE_PARERR) 2828 CH_ALERT(adapter, "SGE parity error (0x%x)\n", 2829 status & SGE_PARERR); 2830 if (status & SGE_FRAMINGERR) 2831 CH_ALERT(adapter, "SGE framing error (0x%x)\n", 2832 status & SGE_FRAMINGERR); 2833 2834 if (status & F_RSPQCREDITOVERFOW) 2835 CH_ALERT(adapter, "SGE response queue credit overflow\n"); 2836 2837 if (status & F_RSPQDISABLED) { 2838 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS); 2839 2840 CH_ALERT(adapter, 2841 "packet delivered to disabled response queue " 2842 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff); 2843 } 2844 2845 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) 2846 queue_work(cxgb3_wq, &adapter->db_drop_task); 2847 2848 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL)) 2849 queue_work(cxgb3_wq, &adapter->db_full_task); 2850 2851 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY)) 2852 queue_work(cxgb3_wq, &adapter->db_empty_task); 2853 2854 t3_write_reg(adapter, A_SG_INT_CAUSE, status); 2855 if (status & SGE_FATALERR) 2856 t3_fatal_err(adapter); 2857} 2858 2859/** 2860 * sge_timer_tx - perform periodic maintenance of an SGE qset 2861 * @data: the SGE queue set to maintain 2862 * 2863 * Runs periodically from a timer to perform maintenance of an SGE queue 2864 * set. It performs two tasks: 2865 * 2866 * Cleans up any completed Tx descriptors that may still be pending. 2867 * Normal descriptor cleanup happens when new packets are added to a Tx 2868 * queue so this timer is relatively infrequent and does any cleanup only 2869 * if the Tx queue has not seen any new packets in a while. We make a 2870 * best effort attempt to reclaim descriptors, in that we don't wait 2871 * around if we cannot get a queue's lock (which most likely is because 2872 * someone else is queueing new packets and so will also handle the clean 2873 * up). Since control queues use immediate data exclusively we don't 2874 * bother cleaning them up here. 2875 * 2876 */ 2877static void sge_timer_tx(unsigned long data) 2878{ 2879 struct sge_qset *qs = (struct sge_qset *)data; 2880 struct port_info *pi = netdev_priv(qs->netdev); 2881 struct adapter *adap = pi->adapter; 2882 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0}; 2883 unsigned long next_period; 2884 2885 if (__netif_tx_trylock(qs->tx_q)) { 2886 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], 2887 TX_RECLAIM_TIMER_CHUNK); 2888 __netif_tx_unlock(qs->tx_q); 2889 } 2890 2891 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { 2892 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], 2893 TX_RECLAIM_TIMER_CHUNK); 2894 spin_unlock(&qs->txq[TXQ_OFLD].lock); 2895 } 2896 2897 next_period = TX_RECLAIM_PERIOD >> 2898 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) / 2899 TX_RECLAIM_TIMER_CHUNK); 2900 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); 2901} 2902 2903/* 2904 * sge_timer_rx - perform periodic maintenance of an SGE qset 2905 * @data: the SGE queue set to maintain 2906 * 2907 * a) Replenishes Rx queues that have run out due to memory shortage. 2908 * Normally new Rx buffers are added when existing ones are consumed but 2909 * when out of memory a queue can become empty. We try to add only a few 2910 * buffers here, the queue will be replenished fully as these new buffers 2911 * are used up if memory shortage has subsided. 2912 * 2913 * b) Return coalesced response queue credits in case a response queue is 2914 * starved. 2915 * 2916 */ 2917static void sge_timer_rx(unsigned long data) 2918{ 2919 spinlock_t *lock; 2920 struct sge_qset *qs = (struct sge_qset *)data; 2921 struct port_info *pi = netdev_priv(qs->netdev); 2922 struct adapter *adap = pi->adapter; 2923 u32 status; 2924 2925 lock = adap->params.rev > 0 ? 2926 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; 2927 2928 if (!spin_trylock_irq(lock)) 2929 goto out; 2930 2931 if (napi_is_scheduled(&qs->napi)) 2932 goto unlock; 2933 2934 if (adap->params.rev < 4) { 2935 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); 2936 2937 if (status & (1 << qs->rspq.cntxt_id)) { 2938 qs->rspq.starved++; 2939 if (qs->rspq.credits) { 2940 qs->rspq.credits--; 2941 refill_rspq(adap, &qs->rspq, 1); 2942 qs->rspq.restarted++; 2943 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, 2944 1 << qs->rspq.cntxt_id); 2945 } 2946 } 2947 } 2948 2949 if (qs->fl[0].credits < qs->fl[0].size) 2950 __refill_fl(adap, &qs->fl[0]); 2951 if (qs->fl[1].credits < qs->fl[1].size) 2952 __refill_fl(adap, &qs->fl[1]); 2953 2954unlock: 2955 spin_unlock_irq(lock); 2956out: 2957 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); 2958} 2959 2960/** 2961 * t3_update_qset_coalesce - update coalescing settings for a queue set 2962 * @qs: the SGE queue set 2963 * @p: new queue set parameters 2964 * 2965 * Update the coalescing settings for an SGE queue set. Nothing is done 2966 * if the queue set is not initialized yet. 2967 */ 2968void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) 2969{ 2970 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ 2971 qs->rspq.polling = p->polling; 2972 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; 2973} 2974 2975/** 2976 * t3_sge_alloc_qset - initialize an SGE queue set 2977 * @adapter: the adapter 2978 * @id: the queue set id 2979 * @nports: how many Ethernet ports will be using this queue set 2980 * @irq_vec_idx: the IRQ vector index for response queue interrupts 2981 * @p: configuration parameters for this queue set 2982 * @ntxq: number of Tx queues for the queue set 2983 * @netdev: net device associated with this queue set 2984 * @netdevq: net device TX queue associated with this queue set 2985 * 2986 * Allocate resources and initialize an SGE queue set. A queue set 2987 * comprises a response queue, two Rx free-buffer queues, and up to 3 2988 * Tx queues. The Tx queues are assigned roles in the order Ethernet 2989 * queue, offload queue, and control queue. 2990 */ 2991int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, 2992 int irq_vec_idx, const struct qset_params *p, 2993 int ntxq, struct net_device *dev, 2994 struct netdev_queue *netdevq) 2995{ 2996 int i, avail, ret = -ENOMEM; 2997 struct sge_qset *q = &adapter->sge.qs[id]; 2998 2999 init_qset_cntxt(q, id); 3000 setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q); 3001 setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q); 3002 3003 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, 3004 sizeof(struct rx_desc), 3005 sizeof(struct rx_sw_desc), 3006 &q->fl[0].phys_addr, &q->fl[0].sdesc); 3007 if (!q->fl[0].desc) 3008 goto err; 3009 3010 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, 3011 sizeof(struct rx_desc), 3012 sizeof(struct rx_sw_desc), 3013 &q->fl[1].phys_addr, &q->fl[1].sdesc); 3014 if (!q->fl[1].desc) 3015 goto err; 3016 3017 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, 3018 sizeof(struct rsp_desc), 0, 3019 &q->rspq.phys_addr, NULL); 3020 if (!q->rspq.desc) 3021 goto err; 3022 3023 for (i = 0; i < ntxq; ++i) { 3024 /* 3025 * The control queue always uses immediate data so does not 3026 * need to keep track of any sk_buffs. 3027 */ 3028 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc); 3029 3030 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], 3031 sizeof(struct tx_desc), sz, 3032 &q->txq[i].phys_addr, 3033 &q->txq[i].sdesc); 3034 if (!q->txq[i].desc) 3035 goto err; 3036 3037 q->txq[i].gen = 1; 3038 q->txq[i].size = p->txq_size[i]; 3039 spin_lock_init(&q->txq[i].lock); 3040 skb_queue_head_init(&q->txq[i].sendq); 3041 } 3042 3043 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq, 3044 (unsigned long)q); 3045 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq, 3046 (unsigned long)q); 3047 3048 q->fl[0].gen = q->fl[1].gen = 1; 3049 q->fl[0].size = p->fl_size; 3050 q->fl[1].size = p->jumbo_size; 3051 3052 q->rspq.gen = 1; 3053 q->rspq.size = p->rspq_size; 3054 spin_lock_init(&q->rspq.lock); 3055 skb_queue_head_init(&q->rspq.rx_queue); 3056 3057 q->txq[TXQ_ETH].stop_thres = nports * 3058 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); 3059 3060#if FL0_PG_CHUNK_SIZE > 0 3061 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; 3062#else 3063 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); 3064#endif 3065#if FL1_PG_CHUNK_SIZE > 0 3066 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; 3067#else 3068 q->fl[1].buf_size = is_offload(adapter) ? 3069 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 3070 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); 3071#endif 3072 3073 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; 3074 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; 3075 q->fl[0].order = FL0_PG_ORDER; 3076 q->fl[1].order = FL1_PG_ORDER; 3077 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; 3078 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; 3079 3080 spin_lock_irq(&adapter->sge.reg_lock); 3081 3082 /* FL threshold comparison uses < */ 3083 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, 3084 q->rspq.phys_addr, q->rspq.size, 3085 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); 3086 if (ret) 3087 goto err_unlock; 3088 3089 for (i = 0; i < SGE_RXQ_PER_SET; ++i) { 3090 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, 3091 q->fl[i].phys_addr, q->fl[i].size, 3092 q->fl[i].buf_size - SGE_PG_RSVD, 3093 p->cong_thres, 1, 0); 3094 if (ret) 3095 goto err_unlock; 3096 } 3097 3098 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, 3099 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, 3100 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, 3101 1, 0); 3102 if (ret) 3103 goto err_unlock; 3104 3105 if (ntxq > 1) { 3106 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, 3107 USE_GTS, SGE_CNTXT_OFLD, id, 3108 q->txq[TXQ_OFLD].phys_addr, 3109 q->txq[TXQ_OFLD].size, 0, 1, 0); 3110 if (ret) 3111 goto err_unlock; 3112 } 3113 3114 if (ntxq > 2) { 3115 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, 3116 SGE_CNTXT_CTRL, id, 3117 q->txq[TXQ_CTRL].phys_addr, 3118 q->txq[TXQ_CTRL].size, 3119 q->txq[TXQ_CTRL].token, 1, 0); 3120 if (ret) 3121 goto err_unlock; 3122 } 3123 3124 spin_unlock_irq(&adapter->sge.reg_lock); 3125 3126 q->adap = adapter; 3127 q->netdev = dev; 3128 q->tx_q = netdevq; 3129 t3_update_qset_coalesce(q, p); 3130 3131 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, 3132 GFP_KERNEL | __GFP_COMP); 3133 if (!avail) { 3134 CH_ALERT(adapter, "free list queue 0 initialization failed\n"); 3135 goto err; 3136 } 3137 if (avail < q->fl[0].size) 3138 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n", 3139 avail); 3140 3141 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, 3142 GFP_KERNEL | __GFP_COMP); 3143 if (avail < q->fl[1].size) 3144 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", 3145 avail); 3146 refill_rspq(adapter, &q->rspq, q->rspq.size - 1); 3147 3148 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | 3149 V_NEWTIMER(q->rspq.holdoff_tmr)); 3150 3151 return 0; 3152 3153err_unlock: 3154 spin_unlock_irq(&adapter->sge.reg_lock); 3155err: 3156 t3_free_qset(adapter, q); 3157 return ret; 3158} 3159 3160/** 3161 * t3_start_sge_timers - start SGE timer call backs 3162 * @adap: the adapter 3163 * 3164 * Starts each SGE queue set's timer call back 3165 */ 3166void t3_start_sge_timers(struct adapter *adap) 3167{ 3168 int i; 3169 3170 for (i = 0; i < SGE_QSETS; ++i) { 3171 struct sge_qset *q = &adap->sge.qs[i]; 3172 3173 if (q->tx_reclaim_timer.function) 3174 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 3175 3176 if (q->rx_reclaim_timer.function) 3177 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); 3178 } 3179} 3180 3181/** 3182 * t3_stop_sge_timers - stop SGE timer call backs 3183 * @adap: the adapter 3184 * 3185 * Stops each SGE queue set's timer call back 3186 */ 3187void t3_stop_sge_timers(struct adapter *adap) 3188{ 3189 int i; 3190 3191 for (i = 0; i < SGE_QSETS; ++i) { 3192 struct sge_qset *q = &adap->sge.qs[i]; 3193 3194 if (q->tx_reclaim_timer.function) 3195 del_timer_sync(&q->tx_reclaim_timer); 3196 if (q->rx_reclaim_timer.function) 3197 del_timer_sync(&q->rx_reclaim_timer); 3198 } 3199} 3200 3201/** 3202 * t3_free_sge_resources - free SGE resources 3203 * @adap: the adapter 3204 * 3205 * Frees resources used by the SGE queue sets. 3206 */ 3207void t3_free_sge_resources(struct adapter *adap) 3208{ 3209 int i; 3210 3211 for (i = 0; i < SGE_QSETS; ++i) 3212 t3_free_qset(adap, &adap->sge.qs[i]); 3213} 3214 3215/** 3216 * t3_sge_start - enable SGE 3217 * @adap: the adapter 3218 * 3219 * Enables the SGE for DMAs. This is the last step in starting packet 3220 * transfers. 3221 */ 3222void t3_sge_start(struct adapter *adap) 3223{ 3224 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE); 3225} 3226 3227/** 3228 * t3_sge_stop - disable SGE operation 3229 * @adap: the adapter 3230 * 3231 * Disables the DMA engine. This can be called in emeregencies (e.g., 3232 * from error interrupts) or from normal process context. In the latter 3233 * case it also disables any pending queue restart tasklets. Note that 3234 * if it is called in interrupt context it cannot disable the restart 3235 * tasklets as it cannot wait, however the tasklets will have no effect 3236 * since the doorbells are disabled and the driver will call this again 3237 * later from process context, at which time the tasklets will be stopped 3238 * if they are still running. 3239 */ 3240void t3_sge_stop(struct adapter *adap) 3241{ 3242 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0); 3243 if (!in_interrupt()) { 3244 int i; 3245 3246 for (i = 0; i < SGE_QSETS; ++i) { 3247 struct sge_qset *qs = &adap->sge.qs[i]; 3248 3249 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk); 3250 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk); 3251 } 3252 } 3253} 3254 3255/** 3256 * t3_sge_init - initialize SGE 3257 * @adap: the adapter 3258 * @p: the SGE parameters 3259 * 3260 * Performs SGE initialization needed every time after a chip reset. 3261 * We do not initialize any of the queue sets here, instead the driver 3262 * top-level must request those individually. We also do not enable DMA 3263 * here, that should be done after the queues have been set up. 3264 */ 3265void t3_sge_init(struct adapter *adap, struct sge_params *p) 3266{ 3267 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12); 3268 3269 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL | 3270 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN | 3271 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS | 3272 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; 3273#if SGE_NUM_GENBITS == 1 3274 ctrl |= F_EGRGENCTRL; 3275#endif 3276 if (adap->params.rev > 0) { 3277 if (!(adap->flags & (USING_MSIX | USING_MSI))) 3278 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ; 3279 } 3280 t3_write_reg(adap, A_SG_CONTROL, ctrl); 3281 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) | 3282 V_LORCQDRBTHRSH(512)); 3283 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); 3284 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | 3285 V_TIMEOUT(200 * core_ticks_per_usec(adap))); 3286 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 3287 adap->params.rev < T3_REV_C ? 1000 : 500); 3288 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); 3289 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); 3290 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256); 3291 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff)); 3292 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024); 3293} 3294 3295/** 3296 * t3_sge_prep - one-time SGE initialization 3297 * @adap: the associated adapter 3298 * @p: SGE parameters 3299 * 3300 * Performs one-time initialization of SGE SW state. Includes determining 3301 * defaults for the assorted SGE parameters, which admins can change until 3302 * they are used to initialize the SGE. 3303 */ 3304void t3_sge_prep(struct adapter *adap, struct sge_params *p) 3305{ 3306 int i; 3307 3308 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) - 3309 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3310 3311 for (i = 0; i < SGE_QSETS; ++i) { 3312 struct qset_params *q = p->qset + i; 3313 3314 q->polling = adap->params.rev > 0; 3315 q->coalesce_usecs = 5; 3316 q->rspq_size = 1024; 3317 q->fl_size = 1024; 3318 q->jumbo_size = 512; 3319 q->txq_size[TXQ_ETH] = 1024; 3320 q->txq_size[TXQ_OFLD] = 1024; 3321 q->txq_size[TXQ_CTRL] = 256; 3322 q->cong_thres = 0; 3323 } 3324 3325 spin_lock_init(&adap->sge.reg_lock); 3326} 3327 3328/** 3329 * t3_get_desc - dump an SGE descriptor for debugging purposes 3330 * @qs: the queue set 3331 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx) 3332 * @idx: the descriptor index in the queue 3333 * @data: where to dump the descriptor contents 3334 * 3335 * Dumps the contents of a HW descriptor of an SGE queue. Returns the 3336 * size of the descriptor. 3337 */ 3338int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 3339 unsigned char *data) 3340{ 3341 if (qnum >= 6) 3342 return -EINVAL; 3343 3344 if (qnum < 3) { 3345 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size) 3346 return -EINVAL; 3347 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc)); 3348 return sizeof(struct tx_desc); 3349 } 3350 3351 if (qnum == 3) { 3352 if (!qs->rspq.desc || idx >= qs->rspq.size) 3353 return -EINVAL; 3354 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc)); 3355 return sizeof(struct rsp_desc); 3356 } 3357 3358 qnum -= 4; 3359 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size) 3360 return -EINVAL; 3361 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc)); 3362 return sizeof(struct rx_desc); 3363}