Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 17431928194b36a0f88082df875e2e036da7fddf 2142 lines 60 kB view raw
1/***************************************************************************** 2 * * 3 * File: sge.c * 4 * $Revision: 1.26 $ * 5 * $Date: 2005/06/21 18:29:48 $ * 6 * Description: * 7 * DMA engine. * 8 * part of the Chelsio 10Gb Ethernet Driver. * 9 * * 10 * This program is free software; you can redistribute it and/or modify * 11 * it under the terms of the GNU General Public License, version 2, as * 12 * published by the Free Software Foundation. * 13 * * 14 * You should have received a copy of the GNU General Public License along * 15 * with this program; if not, write to the Free Software Foundation, Inc., * 16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * 17 * * 18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * 19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * 20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * 21 * * 22 * http://www.chelsio.com * 23 * * 24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * 25 * All rights reserved. * 26 * * 27 * Maintainers: maintainers@chelsio.com * 28 * * 29 * Authors: Dimitrios Michailidis <dm@chelsio.com> * 30 * Tina Yang <tainay@chelsio.com> * 31 * Felix Marti <felix@chelsio.com> * 32 * Scott Bardone <sbardone@chelsio.com> * 33 * Kurt Ottaway <kottaway@chelsio.com> * 34 * Frank DiMambro <frank@chelsio.com> * 35 * * 36 * History: * 37 * * 38 ****************************************************************************/ 39 40#include "common.h" 41 42#include <linux/types.h> 43#include <linux/errno.h> 44#include <linux/pci.h> 45#include <linux/ktime.h> 46#include <linux/netdevice.h> 47#include <linux/etherdevice.h> 48#include <linux/if_vlan.h> 49#include <linux/skbuff.h> 50#include <linux/init.h> 51#include <linux/mm.h> 52#include <linux/tcp.h> 53#include <linux/ip.h> 54#include <linux/in.h> 55#include <linux/if_arp.h> 56#include <linux/slab.h> 57 58#include "cpl5_cmd.h" 59#include "sge.h" 60#include "regs.h" 61#include "espi.h" 62 63/* This belongs in if_ether.h */ 64#define ETH_P_CPL5 0xf 65 66#define SGE_CMDQ_N 2 67#define SGE_FREELQ_N 2 68#define SGE_CMDQ0_E_N 1024 69#define SGE_CMDQ1_E_N 128 70#define SGE_FREEL_SIZE 4096 71#define SGE_JUMBO_FREEL_SIZE 512 72#define SGE_FREEL_REFILL_THRESH 16 73#define SGE_RESPQ_E_N 1024 74#define SGE_INTRTIMER_NRES 1000 75#define SGE_RX_SM_BUF_SIZE 1536 76#define SGE_TX_DESC_MAX_PLEN 16384 77 78#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) 79 80/* 81 * Period of the TX buffer reclaim timer. This timer does not need to run 82 * frequently as TX buffers are usually reclaimed by new TX packets. 83 */ 84#define TX_RECLAIM_PERIOD (HZ / 4) 85 86#define M_CMD_LEN 0x7fffffff 87#define V_CMD_LEN(v) (v) 88#define G_CMD_LEN(v) ((v) & M_CMD_LEN) 89#define V_CMD_GEN1(v) ((v) << 31) 90#define V_CMD_GEN2(v) (v) 91#define F_CMD_DATAVALID (1 << 1) 92#define F_CMD_SOP (1 << 2) 93#define V_CMD_EOP(v) ((v) << 3) 94 95/* 96 * Command queue, receive buffer list, and response queue descriptors. 97 */ 98#if defined(__BIG_ENDIAN_BITFIELD) 99struct cmdQ_e { 100 u32 addr_lo; 101 u32 len_gen; 102 u32 flags; 103 u32 addr_hi; 104}; 105 106struct freelQ_e { 107 u32 addr_lo; 108 u32 len_gen; 109 u32 gen2; 110 u32 addr_hi; 111}; 112 113struct respQ_e { 114 u32 Qsleeping : 4; 115 u32 Cmdq1CreditReturn : 5; 116 u32 Cmdq1DmaComplete : 5; 117 u32 Cmdq0CreditReturn : 5; 118 u32 Cmdq0DmaComplete : 5; 119 u32 FreelistQid : 2; 120 u32 CreditValid : 1; 121 u32 DataValid : 1; 122 u32 Offload : 1; 123 u32 Eop : 1; 124 u32 Sop : 1; 125 u32 GenerationBit : 1; 126 u32 BufferLength; 127}; 128#elif defined(__LITTLE_ENDIAN_BITFIELD) 129struct cmdQ_e { 130 u32 len_gen; 131 u32 addr_lo; 132 u32 addr_hi; 133 u32 flags; 134}; 135 136struct freelQ_e { 137 u32 len_gen; 138 u32 addr_lo; 139 u32 addr_hi; 140 u32 gen2; 141}; 142 143struct respQ_e { 144 u32 BufferLength; 145 u32 GenerationBit : 1; 146 u32 Sop : 1; 147 u32 Eop : 1; 148 u32 Offload : 1; 149 u32 DataValid : 1; 150 u32 CreditValid : 1; 151 u32 FreelistQid : 2; 152 u32 Cmdq0DmaComplete : 5; 153 u32 Cmdq0CreditReturn : 5; 154 u32 Cmdq1DmaComplete : 5; 155 u32 Cmdq1CreditReturn : 5; 156 u32 Qsleeping : 4; 157} ; 158#endif 159 160/* 161 * SW Context Command and Freelist Queue Descriptors 162 */ 163struct cmdQ_ce { 164 struct sk_buff *skb; 165 DEFINE_DMA_UNMAP_ADDR(dma_addr); 166 DEFINE_DMA_UNMAP_LEN(dma_len); 167}; 168 169struct freelQ_ce { 170 struct sk_buff *skb; 171 DEFINE_DMA_UNMAP_ADDR(dma_addr); 172 DEFINE_DMA_UNMAP_LEN(dma_len); 173}; 174 175/* 176 * SW command, freelist and response rings 177 */ 178struct cmdQ { 179 unsigned long status; /* HW DMA fetch status */ 180 unsigned int in_use; /* # of in-use command descriptors */ 181 unsigned int size; /* # of descriptors */ 182 unsigned int processed; /* total # of descs HW has processed */ 183 unsigned int cleaned; /* total # of descs SW has reclaimed */ 184 unsigned int stop_thres; /* SW TX queue suspend threshold */ 185 u16 pidx; /* producer index (SW) */ 186 u16 cidx; /* consumer index (HW) */ 187 u8 genbit; /* current generation (=valid) bit */ 188 u8 sop; /* is next entry start of packet? */ 189 struct cmdQ_e *entries; /* HW command descriptor Q */ 190 struct cmdQ_ce *centries; /* SW command context descriptor Q */ 191 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ 192 spinlock_t lock; /* Lock to protect cmdQ enqueuing */ 193}; 194 195struct freelQ { 196 unsigned int credits; /* # of available RX buffers */ 197 unsigned int size; /* free list capacity */ 198 u16 pidx; /* producer index (SW) */ 199 u16 cidx; /* consumer index (HW) */ 200 u16 rx_buffer_size; /* Buffer size on this free list */ 201 u16 dma_offset; /* DMA offset to align IP headers */ 202 u16 recycleq_idx; /* skb recycle q to use */ 203 u8 genbit; /* current generation (=valid) bit */ 204 struct freelQ_e *entries; /* HW freelist descriptor Q */ 205 struct freelQ_ce *centries; /* SW freelist context descriptor Q */ 206 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ 207}; 208 209struct respQ { 210 unsigned int credits; /* credits to be returned to SGE */ 211 unsigned int size; /* # of response Q descriptors */ 212 u16 cidx; /* consumer index (SW) */ 213 u8 genbit; /* current generation(=valid) bit */ 214 struct respQ_e *entries; /* HW response descriptor Q */ 215 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ 216}; 217 218/* Bit flags for cmdQ.status */ 219enum { 220 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ 221 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ 222}; 223 224/* T204 TX SW scheduler */ 225 226/* Per T204 TX port */ 227struct sched_port { 228 unsigned int avail; /* available bits - quota */ 229 unsigned int drain_bits_per_1024ns; /* drain rate */ 230 unsigned int speed; /* drain rate, mbps */ 231 unsigned int mtu; /* mtu size */ 232 struct sk_buff_head skbq; /* pending skbs */ 233}; 234 235/* Per T204 device */ 236struct sched { 237 ktime_t last_updated; /* last time quotas were computed */ 238 unsigned int max_avail; /* max bits to be sent to any port */ 239 unsigned int port; /* port index (round robin ports) */ 240 unsigned int num; /* num skbs in per port queues */ 241 struct sched_port p[MAX_NPORTS]; 242 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ 243}; 244static void restart_sched(unsigned long); 245 246 247/* 248 * Main SGE data structure 249 * 250 * Interrupts are handled by a single CPU and it is likely that on a MP system 251 * the application is migrated to another CPU. In that scenario, we try to 252 * separate the RX(in irq context) and TX state in order to decrease memory 253 * contention. 254 */ 255struct sge { 256 struct adapter *adapter; /* adapter backpointer */ 257 struct net_device *netdev; /* netdevice backpointer */ 258 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ 259 struct respQ respQ; /* response Q */ 260 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ 261 unsigned int rx_pkt_pad; /* RX padding for L2 packets */ 262 unsigned int jumbo_fl; /* jumbo freelist Q index */ 263 unsigned int intrtimer_nres; /* no-resource interrupt timer */ 264 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ 265 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ 266 struct timer_list espibug_timer; 267 unsigned long espibug_timeout; 268 struct sk_buff *espibug_skb[MAX_NPORTS]; 269 u32 sge_control; /* shadow value of sge control reg */ 270 struct sge_intr_counts stats; 271 struct sge_port_stats __percpu *port_stats[MAX_NPORTS]; 272 struct sched *tx_sched; 273 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; 274}; 275 276/* 277 * stop tasklet and free all pending skb's 278 */ 279static void tx_sched_stop(struct sge *sge) 280{ 281 struct sched *s = sge->tx_sched; 282 int i; 283 284 tasklet_kill(&s->sched_tsk); 285 286 for (i = 0; i < MAX_NPORTS; i++) 287 __skb_queue_purge(&s->p[s->port].skbq); 288} 289 290/* 291 * t1_sched_update_parms() is called when the MTU or link speed changes. It 292 * re-computes scheduler parameters to scope with the change. 293 */ 294unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, 295 unsigned int mtu, unsigned int speed) 296{ 297 struct sched *s = sge->tx_sched; 298 struct sched_port *p = &s->p[port]; 299 unsigned int max_avail_segs; 300 301 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed); 302 if (speed) 303 p->speed = speed; 304 if (mtu) 305 p->mtu = mtu; 306 307 if (speed || mtu) { 308 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40); 309 do_div(drain, (p->mtu + 50) * 1000); 310 p->drain_bits_per_1024ns = (unsigned int) drain; 311 312 if (p->speed < 1000) 313 p->drain_bits_per_1024ns = 314 90 * p->drain_bits_per_1024ns / 100; 315 } 316 317 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { 318 p->drain_bits_per_1024ns -= 16; 319 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4); 320 max_avail_segs = max(1U, 4096 / (p->mtu - 40)); 321 } else { 322 s->max_avail = 16384; 323 max_avail_segs = max(1U, 9000 / (p->mtu - 40)); 324 } 325 326 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u " 327 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu, 328 p->speed, s->max_avail, max_avail_segs, 329 p->drain_bits_per_1024ns); 330 331 return max_avail_segs * (p->mtu - 40); 332} 333 334#if 0 335 336/* 337 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of 338 * data that can be pushed per port. 339 */ 340void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) 341{ 342 struct sched *s = sge->tx_sched; 343 unsigned int i; 344 345 s->max_avail = val; 346 for (i = 0; i < MAX_NPORTS; i++) 347 t1_sched_update_parms(sge, i, 0, 0); 348} 349 350/* 351 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port 352 * is draining. 353 */ 354void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port, 355 unsigned int val) 356{ 357 struct sched *s = sge->tx_sched; 358 struct sched_port *p = &s->p[port]; 359 p->drain_bits_per_1024ns = val * 1024 / 1000; 360 t1_sched_update_parms(sge, port, 0, 0); 361} 362 363#endif /* 0 */ 364 365 366/* 367 * get_clock() implements a ns clock (see ktime_get) 368 */ 369static inline ktime_t get_clock(void) 370{ 371 struct timespec ts; 372 373 ktime_get_ts(&ts); 374 return timespec_to_ktime(ts); 375} 376 377/* 378 * tx_sched_init() allocates resources and does basic initialization. 379 */ 380static int tx_sched_init(struct sge *sge) 381{ 382 struct sched *s; 383 int i; 384 385 s = kzalloc(sizeof (struct sched), GFP_KERNEL); 386 if (!s) 387 return -ENOMEM; 388 389 pr_debug("tx_sched_init\n"); 390 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); 391 sge->tx_sched = s; 392 393 for (i = 0; i < MAX_NPORTS; i++) { 394 skb_queue_head_init(&s->p[i].skbq); 395 t1_sched_update_parms(sge, i, 1500, 1000); 396 } 397 398 return 0; 399} 400 401/* 402 * sched_update_avail() computes the delta since the last time it was called 403 * and updates the per port quota (number of bits that can be sent to the any 404 * port). 405 */ 406static inline int sched_update_avail(struct sge *sge) 407{ 408 struct sched *s = sge->tx_sched; 409 ktime_t now = get_clock(); 410 unsigned int i; 411 long long delta_time_ns; 412 413 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated)); 414 415 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns); 416 if (delta_time_ns < 15000) 417 return 0; 418 419 for (i = 0; i < MAX_NPORTS; i++) { 420 struct sched_port *p = &s->p[i]; 421 unsigned int delta_avail; 422 423 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13; 424 p->avail = min(p->avail + delta_avail, s->max_avail); 425 } 426 427 s->last_updated = now; 428 429 return 1; 430} 431 432/* 433 * sched_skb() is called from two different places. In the tx path, any 434 * packet generating load on an output port will call sched_skb() 435 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq 436 * context (skb == NULL). 437 * The scheduler only returns a skb (which will then be sent) if the 438 * length of the skb is <= the current quota of the output port. 439 */ 440static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, 441 unsigned int credits) 442{ 443 struct sched *s = sge->tx_sched; 444 struct sk_buff_head *skbq; 445 unsigned int i, len, update = 1; 446 447 pr_debug("sched_skb %p\n", skb); 448 if (!skb) { 449 if (!s->num) 450 return NULL; 451 } else { 452 skbq = &s->p[skb->dev->if_port].skbq; 453 __skb_queue_tail(skbq, skb); 454 s->num++; 455 skb = NULL; 456 } 457 458 if (credits < MAX_SKB_FRAGS + 1) 459 goto out; 460 461again: 462 for (i = 0; i < MAX_NPORTS; i++) { 463 s->port = (s->port + 1) & (MAX_NPORTS - 1); 464 skbq = &s->p[s->port].skbq; 465 466 skb = skb_peek(skbq); 467 468 if (!skb) 469 continue; 470 471 len = skb->len; 472 if (len <= s->p[s->port].avail) { 473 s->p[s->port].avail -= len; 474 s->num--; 475 __skb_unlink(skb, skbq); 476 goto out; 477 } 478 skb = NULL; 479 } 480 481 if (update-- && sched_update_avail(sge)) 482 goto again; 483 484out: 485 /* If there are more pending skbs, we use the hardware to schedule us 486 * again. 487 */ 488 if (s->num && !skb) { 489 struct cmdQ *q = &sge->cmdQ[0]; 490 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 491 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { 492 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 493 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); 494 } 495 } 496 pr_debug("sched_skb ret %p\n", skb); 497 498 return skb; 499} 500 501/* 502 * PIO to indicate that memory mapped Q contains valid descriptor(s). 503 */ 504static inline void doorbell_pio(struct adapter *adapter, u32 val) 505{ 506 wmb(); 507 writel(val, adapter->regs + A_SG_DOORBELL); 508} 509 510/* 511 * Frees all RX buffers on the freelist Q. The caller must make sure that 512 * the SGE is turned off before calling this function. 513 */ 514static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) 515{ 516 unsigned int cidx = q->cidx; 517 518 while (q->credits--) { 519 struct freelQ_ce *ce = &q->centries[cidx]; 520 521 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), 522 dma_unmap_len(ce, dma_len), 523 PCI_DMA_FROMDEVICE); 524 dev_kfree_skb(ce->skb); 525 ce->skb = NULL; 526 if (++cidx == q->size) 527 cidx = 0; 528 } 529} 530 531/* 532 * Free RX free list and response queue resources. 533 */ 534static void free_rx_resources(struct sge *sge) 535{ 536 struct pci_dev *pdev = sge->adapter->pdev; 537 unsigned int size, i; 538 539 if (sge->respQ.entries) { 540 size = sizeof(struct respQ_e) * sge->respQ.size; 541 pci_free_consistent(pdev, size, sge->respQ.entries, 542 sge->respQ.dma_addr); 543 } 544 545 for (i = 0; i < SGE_FREELQ_N; i++) { 546 struct freelQ *q = &sge->freelQ[i]; 547 548 if (q->centries) { 549 free_freelQ_buffers(pdev, q); 550 kfree(q->centries); 551 } 552 if (q->entries) { 553 size = sizeof(struct freelQ_e) * q->size; 554 pci_free_consistent(pdev, size, q->entries, 555 q->dma_addr); 556 } 557 } 558} 559 560/* 561 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a 562 * response queue. 563 */ 564static int alloc_rx_resources(struct sge *sge, struct sge_params *p) 565{ 566 struct pci_dev *pdev = sge->adapter->pdev; 567 unsigned int size, i; 568 569 for (i = 0; i < SGE_FREELQ_N; i++) { 570 struct freelQ *q = &sge->freelQ[i]; 571 572 q->genbit = 1; 573 q->size = p->freelQ_size[i]; 574 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; 575 size = sizeof(struct freelQ_e) * q->size; 576 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); 577 if (!q->entries) 578 goto err_no_mem; 579 580 size = sizeof(struct freelQ_ce) * q->size; 581 q->centries = kzalloc(size, GFP_KERNEL); 582 if (!q->centries) 583 goto err_no_mem; 584 } 585 586 /* 587 * Calculate the buffer sizes for the two free lists. FL0 accommodates 588 * regular sized Ethernet frames, FL1 is sized not to exceed 16K, 589 * including all the sk_buff overhead. 590 * 591 * Note: For T2 FL0 and FL1 are reversed. 592 */ 593 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + 594 sizeof(struct cpl_rx_data) + 595 sge->freelQ[!sge->jumbo_fl].dma_offset; 596 597 size = (16 * 1024) - 598 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 599 600 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; 601 602 /* 603 * Setup which skb recycle Q should be used when recycling buffers from 604 * each free list. 605 */ 606 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; 607 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; 608 609 sge->respQ.genbit = 1; 610 sge->respQ.size = SGE_RESPQ_E_N; 611 sge->respQ.credits = 0; 612 size = sizeof(struct respQ_e) * sge->respQ.size; 613 sge->respQ.entries = 614 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); 615 if (!sge->respQ.entries) 616 goto err_no_mem; 617 return 0; 618 619err_no_mem: 620 free_rx_resources(sge); 621 return -ENOMEM; 622} 623 624/* 625 * Reclaims n TX descriptors and frees the buffers associated with them. 626 */ 627static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) 628{ 629 struct cmdQ_ce *ce; 630 struct pci_dev *pdev = sge->adapter->pdev; 631 unsigned int cidx = q->cidx; 632 633 q->in_use -= n; 634 ce = &q->centries[cidx]; 635 while (n--) { 636 if (likely(dma_unmap_len(ce, dma_len))) { 637 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), 638 dma_unmap_len(ce, dma_len), 639 PCI_DMA_TODEVICE); 640 if (q->sop) 641 q->sop = 0; 642 } 643 if (ce->skb) { 644 dev_kfree_skb_any(ce->skb); 645 q->sop = 1; 646 } 647 ce++; 648 if (++cidx == q->size) { 649 cidx = 0; 650 ce = q->centries; 651 } 652 } 653 q->cidx = cidx; 654} 655 656/* 657 * Free TX resources. 658 * 659 * Assumes that SGE is stopped and all interrupts are disabled. 660 */ 661static void free_tx_resources(struct sge *sge) 662{ 663 struct pci_dev *pdev = sge->adapter->pdev; 664 unsigned int size, i; 665 666 for (i = 0; i < SGE_CMDQ_N; i++) { 667 struct cmdQ *q = &sge->cmdQ[i]; 668 669 if (q->centries) { 670 if (q->in_use) 671 free_cmdQ_buffers(sge, q, q->in_use); 672 kfree(q->centries); 673 } 674 if (q->entries) { 675 size = sizeof(struct cmdQ_e) * q->size; 676 pci_free_consistent(pdev, size, q->entries, 677 q->dma_addr); 678 } 679 } 680} 681 682/* 683 * Allocates basic TX resources, consisting of memory mapped command Qs. 684 */ 685static int alloc_tx_resources(struct sge *sge, struct sge_params *p) 686{ 687 struct pci_dev *pdev = sge->adapter->pdev; 688 unsigned int size, i; 689 690 for (i = 0; i < SGE_CMDQ_N; i++) { 691 struct cmdQ *q = &sge->cmdQ[i]; 692 693 q->genbit = 1; 694 q->sop = 1; 695 q->size = p->cmdQ_size[i]; 696 q->in_use = 0; 697 q->status = 0; 698 q->processed = q->cleaned = 0; 699 q->stop_thres = 0; 700 spin_lock_init(&q->lock); 701 size = sizeof(struct cmdQ_e) * q->size; 702 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); 703 if (!q->entries) 704 goto err_no_mem; 705 706 size = sizeof(struct cmdQ_ce) * q->size; 707 q->centries = kzalloc(size, GFP_KERNEL); 708 if (!q->centries) 709 goto err_no_mem; 710 } 711 712 /* 713 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE 714 * only. For queue 0 set the stop threshold so we can handle one more 715 * packet from each port, plus reserve an additional 24 entries for 716 * Ethernet packets only. Queue 1 never suspends nor do we reserve 717 * space for Ethernet packets. 718 */ 719 sge->cmdQ[0].stop_thres = sge->adapter->params.nports * 720 (MAX_SKB_FRAGS + 1); 721 return 0; 722 723err_no_mem: 724 free_tx_resources(sge); 725 return -ENOMEM; 726} 727 728static inline void setup_ring_params(struct adapter *adapter, u64 addr, 729 u32 size, int base_reg_lo, 730 int base_reg_hi, int size_reg) 731{ 732 writel((u32)addr, adapter->regs + base_reg_lo); 733 writel(addr >> 32, adapter->regs + base_reg_hi); 734 writel(size, adapter->regs + size_reg); 735} 736 737/* 738 * Enable/disable VLAN acceleration. 739 */ 740void t1_set_vlan_accel(struct adapter *adapter, int on_off) 741{ 742 struct sge *sge = adapter->sge; 743 744 sge->sge_control &= ~F_VLAN_XTRACT; 745 if (on_off) 746 sge->sge_control |= F_VLAN_XTRACT; 747 if (adapter->open_device_map) { 748 writel(sge->sge_control, adapter->regs + A_SG_CONTROL); 749 readl(adapter->regs + A_SG_CONTROL); /* flush */ 750 } 751} 752 753/* 754 * Programs the various SGE registers. However, the engine is not yet enabled, 755 * but sge->sge_control is setup and ready to go. 756 */ 757static void configure_sge(struct sge *sge, struct sge_params *p) 758{ 759 struct adapter *ap = sge->adapter; 760 761 writel(0, ap->regs + A_SG_CONTROL); 762 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, 763 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); 764 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, 765 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); 766 setup_ring_params(ap, sge->freelQ[0].dma_addr, 767 sge->freelQ[0].size, A_SG_FL0BASELWR, 768 A_SG_FL0BASEUPR, A_SG_FL0SIZE); 769 setup_ring_params(ap, sge->freelQ[1].dma_addr, 770 sge->freelQ[1].size, A_SG_FL1BASELWR, 771 A_SG_FL1BASEUPR, A_SG_FL1SIZE); 772 773 /* The threshold comparison uses <. */ 774 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); 775 776 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, 777 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); 778 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); 779 780 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | 781 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | 782 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | 783 V_RX_PKT_OFFSET(sge->rx_pkt_pad); 784 785#if defined(__BIG_ENDIAN_BITFIELD) 786 sge->sge_control |= F_ENABLE_BIG_ENDIAN; 787#endif 788 789 /* Initialize no-resource timer */ 790 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); 791 792 t1_sge_set_coalesce_params(sge, p); 793} 794 795/* 796 * Return the payload capacity of the jumbo free-list buffers. 797 */ 798static inline unsigned int jumbo_payload_capacity(const struct sge *sge) 799{ 800 return sge->freelQ[sge->jumbo_fl].rx_buffer_size - 801 sge->freelQ[sge->jumbo_fl].dma_offset - 802 sizeof(struct cpl_rx_data); 803} 804 805/* 806 * Frees all SGE related resources and the sge structure itself 807 */ 808void t1_sge_destroy(struct sge *sge) 809{ 810 int i; 811 812 for_each_port(sge->adapter, i) 813 free_percpu(sge->port_stats[i]); 814 815 kfree(sge->tx_sched); 816 free_tx_resources(sge); 817 free_rx_resources(sge); 818 kfree(sge); 819} 820 821/* 822 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist 823 * context Q) until the Q is full or alloc_skb fails. 824 * 825 * It is possible that the generation bits already match, indicating that the 826 * buffer is already valid and nothing needs to be done. This happens when we 827 * copied a received buffer into a new sk_buff during the interrupt processing. 828 * 829 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), 830 * we specify a RX_OFFSET in order to make sure that the IP header is 4B 831 * aligned. 832 */ 833static void refill_free_list(struct sge *sge, struct freelQ *q) 834{ 835 struct pci_dev *pdev = sge->adapter->pdev; 836 struct freelQ_ce *ce = &q->centries[q->pidx]; 837 struct freelQ_e *e = &q->entries[q->pidx]; 838 unsigned int dma_len = q->rx_buffer_size - q->dma_offset; 839 840 while (q->credits < q->size) { 841 struct sk_buff *skb; 842 dma_addr_t mapping; 843 844 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); 845 if (!skb) 846 break; 847 848 skb_reserve(skb, q->dma_offset); 849 mapping = pci_map_single(pdev, skb->data, dma_len, 850 PCI_DMA_FROMDEVICE); 851 skb_reserve(skb, sge->rx_pkt_pad); 852 853 ce->skb = skb; 854 dma_unmap_addr_set(ce, dma_addr, mapping); 855 dma_unmap_len_set(ce, dma_len, dma_len); 856 e->addr_lo = (u32)mapping; 857 e->addr_hi = (u64)mapping >> 32; 858 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); 859 wmb(); 860 e->gen2 = V_CMD_GEN2(q->genbit); 861 862 e++; 863 ce++; 864 if (++q->pidx == q->size) { 865 q->pidx = 0; 866 q->genbit ^= 1; 867 ce = q->centries; 868 e = q->entries; 869 } 870 q->credits++; 871 } 872} 873 874/* 875 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 876 * of both rings, we go into 'few interrupt mode' in order to give the system 877 * time to free up resources. 878 */ 879static void freelQs_empty(struct sge *sge) 880{ 881 struct adapter *adapter = sge->adapter; 882 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); 883 u32 irqholdoff_reg; 884 885 refill_free_list(sge, &sge->freelQ[0]); 886 refill_free_list(sge, &sge->freelQ[1]); 887 888 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && 889 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { 890 irq_reg |= F_FL_EXHAUSTED; 891 irqholdoff_reg = sge->fixed_intrtimer; 892 } else { 893 /* Clear the F_FL_EXHAUSTED interrupts for now */ 894 irq_reg &= ~F_FL_EXHAUSTED; 895 irqholdoff_reg = sge->intrtimer_nres; 896 } 897 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); 898 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); 899 900 /* We reenable the Qs to force a freelist GTS interrupt later */ 901 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); 902} 903 904#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) 905#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) 906#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ 907 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) 908 909/* 910 * Disable SGE Interrupts 911 */ 912void t1_sge_intr_disable(struct sge *sge) 913{ 914 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); 915 916 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); 917 writel(0, sge->adapter->regs + A_SG_INT_ENABLE); 918} 919 920/* 921 * Enable SGE interrupts. 922 */ 923void t1_sge_intr_enable(struct sge *sge) 924{ 925 u32 en = SGE_INT_ENABLE; 926 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); 927 928 if (sge->adapter->flags & TSO_CAPABLE) 929 en &= ~F_PACKET_TOO_BIG; 930 writel(en, sge->adapter->regs + A_SG_INT_ENABLE); 931 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); 932} 933 934/* 935 * Clear SGE interrupts. 936 */ 937void t1_sge_intr_clear(struct sge *sge) 938{ 939 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); 940 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); 941} 942 943/* 944 * SGE 'Error' interrupt handler 945 */ 946int t1_sge_intr_error_handler(struct sge *sge) 947{ 948 struct adapter *adapter = sge->adapter; 949 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); 950 951 if (adapter->flags & TSO_CAPABLE) 952 cause &= ~F_PACKET_TOO_BIG; 953 if (cause & F_RESPQ_EXHAUSTED) 954 sge->stats.respQ_empty++; 955 if (cause & F_RESPQ_OVERFLOW) { 956 sge->stats.respQ_overflow++; 957 pr_alert("%s: SGE response queue overflow\n", 958 adapter->name); 959 } 960 if (cause & F_FL_EXHAUSTED) { 961 sge->stats.freelistQ_empty++; 962 freelQs_empty(sge); 963 } 964 if (cause & F_PACKET_TOO_BIG) { 965 sge->stats.pkt_too_big++; 966 pr_alert("%s: SGE max packet size exceeded\n", 967 adapter->name); 968 } 969 if (cause & F_PACKET_MISMATCH) { 970 sge->stats.pkt_mismatch++; 971 pr_alert("%s: SGE packet mismatch\n", adapter->name); 972 } 973 if (cause & SGE_INT_FATAL) 974 t1_fatal_err(adapter); 975 976 writel(cause, adapter->regs + A_SG_INT_CAUSE); 977 return 0; 978} 979 980const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) 981{ 982 return &sge->stats; 983} 984 985void t1_sge_get_port_stats(const struct sge *sge, int port, 986 struct sge_port_stats *ss) 987{ 988 int cpu; 989 990 memset(ss, 0, sizeof(*ss)); 991 for_each_possible_cpu(cpu) { 992 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); 993 994 ss->rx_cso_good += st->rx_cso_good; 995 ss->tx_cso += st->tx_cso; 996 ss->tx_tso += st->tx_tso; 997 ss->tx_need_hdrroom += st->tx_need_hdrroom; 998 ss->vlan_xtract += st->vlan_xtract; 999 ss->vlan_insert += st->vlan_insert; 1000 } 1001} 1002 1003/** 1004 * recycle_fl_buf - recycle a free list buffer 1005 * @fl: the free list 1006 * @idx: index of buffer to recycle 1007 * 1008 * Recycles the specified buffer on the given free list by adding it at 1009 * the next available slot on the list. 1010 */ 1011static void recycle_fl_buf(struct freelQ *fl, int idx) 1012{ 1013 struct freelQ_e *from = &fl->entries[idx]; 1014 struct freelQ_e *to = &fl->entries[fl->pidx]; 1015 1016 fl->centries[fl->pidx] = fl->centries[idx]; 1017 to->addr_lo = from->addr_lo; 1018 to->addr_hi = from->addr_hi; 1019 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); 1020 wmb(); 1021 to->gen2 = V_CMD_GEN2(fl->genbit); 1022 fl->credits++; 1023 1024 if (++fl->pidx == fl->size) { 1025 fl->pidx = 0; 1026 fl->genbit ^= 1; 1027 } 1028} 1029 1030static int copybreak __read_mostly = 256; 1031module_param(copybreak, int, 0); 1032MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 1033 1034/** 1035 * get_packet - return the next ingress packet buffer 1036 * @pdev: the PCI device that received the packet 1037 * @fl: the SGE free list holding the packet 1038 * @len: the actual packet length, excluding any SGE padding 1039 * 1040 * Get the next packet from a free list and complete setup of the 1041 * sk_buff. If the packet is small we make a copy and recycle the 1042 * original buffer, otherwise we use the original buffer itself. If a 1043 * positive drop threshold is supplied packets are dropped and their 1044 * buffers recycled if (a) the number of remaining buffers is under the 1045 * threshold and the packet is too big to copy, or (b) the packet should 1046 * be copied but there is no memory for the copy. 1047 */ 1048static inline struct sk_buff *get_packet(struct pci_dev *pdev, 1049 struct freelQ *fl, unsigned int len) 1050{ 1051 struct sk_buff *skb; 1052 const struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1053 1054 if (len < copybreak) { 1055 skb = alloc_skb(len + 2, GFP_ATOMIC); 1056 if (!skb) 1057 goto use_orig_buf; 1058 1059 skb_reserve(skb, 2); /* align IP header */ 1060 skb_put(skb, len); 1061 pci_dma_sync_single_for_cpu(pdev, 1062 dma_unmap_addr(ce, dma_addr), 1063 dma_unmap_len(ce, dma_len), 1064 PCI_DMA_FROMDEVICE); 1065 skb_copy_from_linear_data(ce->skb, skb->data, len); 1066 pci_dma_sync_single_for_device(pdev, 1067 dma_unmap_addr(ce, dma_addr), 1068 dma_unmap_len(ce, dma_len), 1069 PCI_DMA_FROMDEVICE); 1070 recycle_fl_buf(fl, fl->cidx); 1071 return skb; 1072 } 1073 1074use_orig_buf: 1075 if (fl->credits < 2) { 1076 recycle_fl_buf(fl, fl->cidx); 1077 return NULL; 1078 } 1079 1080 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), 1081 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1082 skb = ce->skb; 1083 prefetch(skb->data); 1084 1085 skb_put(skb, len); 1086 return skb; 1087} 1088 1089/** 1090 * unexpected_offload - handle an unexpected offload packet 1091 * @adapter: the adapter 1092 * @fl: the free list that received the packet 1093 * 1094 * Called when we receive an unexpected offload packet (e.g., the TOE 1095 * function is disabled or the card is a NIC). Prints a message and 1096 * recycles the buffer. 1097 */ 1098static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) 1099{ 1100 struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1101 struct sk_buff *skb = ce->skb; 1102 1103 pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), 1104 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1105 pr_err("%s: unexpected offload packet, cmd %u\n", 1106 adapter->name, *skb->data); 1107 recycle_fl_buf(fl, fl->cidx); 1108} 1109 1110/* 1111 * T1/T2 SGE limits the maximum DMA size per TX descriptor to 1112 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the 1113 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner. 1114 * Note that the *_large_page_tx_descs stuff will be optimized out when 1115 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN. 1116 * 1117 * compute_large_page_descs() computes how many additional descriptors are 1118 * required to break down the stack's request. 1119 */ 1120static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) 1121{ 1122 unsigned int count = 0; 1123 1124 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1125 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 1126 unsigned int i, len = skb_headlen(skb); 1127 while (len > SGE_TX_DESC_MAX_PLEN) { 1128 count++; 1129 len -= SGE_TX_DESC_MAX_PLEN; 1130 } 1131 for (i = 0; nfrags--; i++) { 1132 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1133 len = frag->size; 1134 while (len > SGE_TX_DESC_MAX_PLEN) { 1135 count++; 1136 len -= SGE_TX_DESC_MAX_PLEN; 1137 } 1138 } 1139 } 1140 return count; 1141} 1142 1143/* 1144 * Write a cmdQ entry. 1145 * 1146 * Since this function writes the 'flags' field, it must not be used to 1147 * write the first cmdQ entry. 1148 */ 1149static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping, 1150 unsigned int len, unsigned int gen, 1151 unsigned int eop) 1152{ 1153 BUG_ON(len > SGE_TX_DESC_MAX_PLEN); 1154 1155 e->addr_lo = (u32)mapping; 1156 e->addr_hi = (u64)mapping >> 32; 1157 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen); 1158 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen); 1159} 1160 1161/* 1162 * See comment for previous function. 1163 * 1164 * write_tx_descs_large_page() writes additional SGE tx descriptors if 1165 * *desc_len exceeds HW's capability. 1166 */ 1167static inline unsigned int write_large_page_tx_descs(unsigned int pidx, 1168 struct cmdQ_e **e, 1169 struct cmdQ_ce **ce, 1170 unsigned int *gen, 1171 dma_addr_t *desc_mapping, 1172 unsigned int *desc_len, 1173 unsigned int nfrags, 1174 struct cmdQ *q) 1175{ 1176 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1177 struct cmdQ_e *e1 = *e; 1178 struct cmdQ_ce *ce1 = *ce; 1179 1180 while (*desc_len > SGE_TX_DESC_MAX_PLEN) { 1181 *desc_len -= SGE_TX_DESC_MAX_PLEN; 1182 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, 1183 *gen, nfrags == 0 && *desc_len == 0); 1184 ce1->skb = NULL; 1185 dma_unmap_len_set(ce1, dma_len, 0); 1186 *desc_mapping += SGE_TX_DESC_MAX_PLEN; 1187 if (*desc_len) { 1188 ce1++; 1189 e1++; 1190 if (++pidx == q->size) { 1191 pidx = 0; 1192 *gen ^= 1; 1193 ce1 = q->centries; 1194 e1 = q->entries; 1195 } 1196 } 1197 } 1198 *e = e1; 1199 *ce = ce1; 1200 } 1201 return pidx; 1202} 1203 1204/* 1205 * Write the command descriptors to transmit the given skb starting at 1206 * descriptor pidx with the given generation. 1207 */ 1208static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, 1209 unsigned int pidx, unsigned int gen, 1210 struct cmdQ *q) 1211{ 1212 dma_addr_t mapping, desc_mapping; 1213 struct cmdQ_e *e, *e1; 1214 struct cmdQ_ce *ce; 1215 unsigned int i, flags, first_desc_len, desc_len, 1216 nfrags = skb_shinfo(skb)->nr_frags; 1217 1218 e = e1 = &q->entries[pidx]; 1219 ce = &q->centries[pidx]; 1220 1221 mapping = pci_map_single(adapter->pdev, skb->data, 1222 skb_headlen(skb), PCI_DMA_TODEVICE); 1223 1224 desc_mapping = mapping; 1225 desc_len = skb_headlen(skb); 1226 1227 flags = F_CMD_DATAVALID | F_CMD_SOP | 1228 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | 1229 V_CMD_GEN2(gen); 1230 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ? 1231 desc_len : SGE_TX_DESC_MAX_PLEN; 1232 e->addr_lo = (u32)desc_mapping; 1233 e->addr_hi = (u64)desc_mapping >> 32; 1234 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); 1235 ce->skb = NULL; 1236 dma_unmap_len_set(ce, dma_len, 0); 1237 1238 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && 1239 desc_len > SGE_TX_DESC_MAX_PLEN) { 1240 desc_mapping += first_desc_len; 1241 desc_len -= first_desc_len; 1242 e1++; 1243 ce++; 1244 if (++pidx == q->size) { 1245 pidx = 0; 1246 gen ^= 1; 1247 e1 = q->entries; 1248 ce = q->centries; 1249 } 1250 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, 1251 &desc_mapping, &desc_len, 1252 nfrags, q); 1253 1254 if (likely(desc_len)) 1255 write_tx_desc(e1, desc_mapping, desc_len, gen, 1256 nfrags == 0); 1257 } 1258 1259 ce->skb = NULL; 1260 dma_unmap_addr_set(ce, dma_addr, mapping); 1261 dma_unmap_len_set(ce, dma_len, skb_headlen(skb)); 1262 1263 for (i = 0; nfrags--; i++) { 1264 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1265 e1++; 1266 ce++; 1267 if (++pidx == q->size) { 1268 pidx = 0; 1269 gen ^= 1; 1270 e1 = q->entries; 1271 ce = q->centries; 1272 } 1273 1274 mapping = pci_map_page(adapter->pdev, frag->page, 1275 frag->page_offset, frag->size, 1276 PCI_DMA_TODEVICE); 1277 desc_mapping = mapping; 1278 desc_len = frag->size; 1279 1280 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, 1281 &desc_mapping, &desc_len, 1282 nfrags, q); 1283 if (likely(desc_len)) 1284 write_tx_desc(e1, desc_mapping, desc_len, gen, 1285 nfrags == 0); 1286 ce->skb = NULL; 1287 dma_unmap_addr_set(ce, dma_addr, mapping); 1288 dma_unmap_len_set(ce, dma_len, frag->size); 1289 } 1290 ce->skb = skb; 1291 wmb(); 1292 e->flags = flags; 1293} 1294 1295/* 1296 * Clean up completed Tx buffers. 1297 */ 1298static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) 1299{ 1300 unsigned int reclaim = q->processed - q->cleaned; 1301 1302 if (reclaim) { 1303 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n", 1304 q->processed, q->cleaned); 1305 free_cmdQ_buffers(sge, q, reclaim); 1306 q->cleaned += reclaim; 1307 } 1308} 1309 1310/* 1311 * Called from tasklet. Checks the scheduler for any 1312 * pending skbs that can be sent. 1313 */ 1314static void restart_sched(unsigned long arg) 1315{ 1316 struct sge *sge = (struct sge *) arg; 1317 struct adapter *adapter = sge->adapter; 1318 struct cmdQ *q = &sge->cmdQ[0]; 1319 struct sk_buff *skb; 1320 unsigned int credits, queued_skb = 0; 1321 1322 spin_lock(&q->lock); 1323 reclaim_completed_tx(sge, q); 1324 1325 credits = q->size - q->in_use; 1326 pr_debug("restart_sched credits=%d\n", credits); 1327 while ((skb = sched_skb(sge, NULL, credits)) != NULL) { 1328 unsigned int genbit, pidx, count; 1329 count = 1 + skb_shinfo(skb)->nr_frags; 1330 count += compute_large_page_tx_descs(skb); 1331 q->in_use += count; 1332 genbit = q->genbit; 1333 pidx = q->pidx; 1334 q->pidx += count; 1335 if (q->pidx >= q->size) { 1336 q->pidx -= q->size; 1337 q->genbit ^= 1; 1338 } 1339 write_tx_descs(adapter, skb, pidx, genbit, q); 1340 credits = q->size - q->in_use; 1341 queued_skb = 1; 1342 } 1343 1344 if (queued_skb) { 1345 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1346 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { 1347 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1348 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1349 } 1350 } 1351 spin_unlock(&q->lock); 1352} 1353 1354/** 1355 * sge_rx - process an ingress ethernet packet 1356 * @sge: the sge structure 1357 * @fl: the free list that contains the packet buffer 1358 * @len: the packet length 1359 * 1360 * Process an ingress ethernet pakcet and deliver it to the stack. 1361 */ 1362static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) 1363{ 1364 struct sk_buff *skb; 1365 const struct cpl_rx_pkt *p; 1366 struct adapter *adapter = sge->adapter; 1367 struct sge_port_stats *st; 1368 1369 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); 1370 if (unlikely(!skb)) { 1371 sge->stats.rx_drops++; 1372 return; 1373 } 1374 1375 p = (const struct cpl_rx_pkt *) skb->data; 1376 if (p->iff >= adapter->params.nports) { 1377 kfree_skb(skb); 1378 return; 1379 } 1380 __skb_pull(skb, sizeof(*p)); 1381 1382 st = this_cpu_ptr(sge->port_stats[p->iff]); 1383 1384 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); 1385 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && 1386 skb->protocol == htons(ETH_P_IP) && 1387 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { 1388 ++st->rx_cso_good; 1389 skb->ip_summed = CHECKSUM_UNNECESSARY; 1390 } else 1391 skb->ip_summed = CHECKSUM_NONE; 1392 1393 if (unlikely(adapter->vlan_grp && p->vlan_valid)) { 1394 st->vlan_xtract++; 1395 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, 1396 ntohs(p->vlan)); 1397 } else 1398 netif_receive_skb(skb); 1399} 1400 1401/* 1402 * Returns true if a command queue has enough available descriptors that 1403 * we can resume Tx operation after temporarily disabling its packet queue. 1404 */ 1405static inline int enough_free_Tx_descs(const struct cmdQ *q) 1406{ 1407 unsigned int r = q->processed - q->cleaned; 1408 1409 return q->in_use - r < (q->size >> 1); 1410} 1411 1412/* 1413 * Called when sufficient space has become available in the SGE command queues 1414 * after the Tx packet schedulers have been suspended to restart the Tx path. 1415 */ 1416static void restart_tx_queues(struct sge *sge) 1417{ 1418 struct adapter *adap = sge->adapter; 1419 int i; 1420 1421 if (!enough_free_Tx_descs(&sge->cmdQ[0])) 1422 return; 1423 1424 for_each_port(adap, i) { 1425 struct net_device *nd = adap->port[i].dev; 1426 1427 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && 1428 netif_running(nd)) { 1429 sge->stats.cmdQ_restarted[2]++; 1430 netif_wake_queue(nd); 1431 } 1432 } 1433} 1434 1435/* 1436 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 1437 * information. 1438 */ 1439static unsigned int update_tx_info(struct adapter *adapter, 1440 unsigned int flags, 1441 unsigned int pr0) 1442{ 1443 struct sge *sge = adapter->sge; 1444 struct cmdQ *cmdq = &sge->cmdQ[0]; 1445 1446 cmdq->processed += pr0; 1447 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) { 1448 freelQs_empty(sge); 1449 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE); 1450 } 1451 if (flags & F_CMDQ0_ENABLE) { 1452 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); 1453 1454 if (cmdq->cleaned + cmdq->in_use != cmdq->processed && 1455 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { 1456 set_bit(CMDQ_STAT_RUNNING, &cmdq->status); 1457 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1458 } 1459 if (sge->tx_sched) 1460 tasklet_hi_schedule(&sge->tx_sched->sched_tsk); 1461 1462 flags &= ~F_CMDQ0_ENABLE; 1463 } 1464 1465 if (unlikely(sge->stopped_tx_queues != 0)) 1466 restart_tx_queues(sge); 1467 1468 return flags; 1469} 1470 1471/* 1472 * Process SGE responses, up to the supplied budget. Returns the number of 1473 * responses processed. A negative budget is effectively unlimited. 1474 */ 1475static int process_responses(struct adapter *adapter, int budget) 1476{ 1477 struct sge *sge = adapter->sge; 1478 struct respQ *q = &sge->respQ; 1479 struct respQ_e *e = &q->entries[q->cidx]; 1480 int done = 0; 1481 unsigned int flags = 0; 1482 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1483 1484 while (done < budget && e->GenerationBit == q->genbit) { 1485 flags |= e->Qsleeping; 1486 1487 cmdq_processed[0] += e->Cmdq0CreditReturn; 1488 cmdq_processed[1] += e->Cmdq1CreditReturn; 1489 1490 /* We batch updates to the TX side to avoid cacheline 1491 * ping-pong of TX state information on MP where the sender 1492 * might run on a different CPU than this function... 1493 */ 1494 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) { 1495 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1496 cmdq_processed[0] = 0; 1497 } 1498 1499 if (unlikely(cmdq_processed[1] > 16)) { 1500 sge->cmdQ[1].processed += cmdq_processed[1]; 1501 cmdq_processed[1] = 0; 1502 } 1503 1504 if (likely(e->DataValid)) { 1505 struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1506 1507 BUG_ON(!e->Sop || !e->Eop); 1508 if (unlikely(e->Offload)) 1509 unexpected_offload(adapter, fl); 1510 else 1511 sge_rx(sge, fl, e->BufferLength); 1512 1513 ++done; 1514 1515 /* 1516 * Note: this depends on each packet consuming a 1517 * single free-list buffer; cf. the BUG above. 1518 */ 1519 if (++fl->cidx == fl->size) 1520 fl->cidx = 0; 1521 prefetch(fl->centries[fl->cidx].skb); 1522 1523 if (unlikely(--fl->credits < 1524 fl->size - SGE_FREEL_REFILL_THRESH)) 1525 refill_free_list(sge, fl); 1526 } else 1527 sge->stats.pure_rsps++; 1528 1529 e++; 1530 if (unlikely(++q->cidx == q->size)) { 1531 q->cidx = 0; 1532 q->genbit ^= 1; 1533 e = q->entries; 1534 } 1535 prefetch(e); 1536 1537 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { 1538 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); 1539 q->credits = 0; 1540 } 1541 } 1542 1543 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1544 sge->cmdQ[1].processed += cmdq_processed[1]; 1545 1546 return done; 1547} 1548 1549static inline int responses_pending(const struct adapter *adapter) 1550{ 1551 const struct respQ *Q = &adapter->sge->respQ; 1552 const struct respQ_e *e = &Q->entries[Q->cidx]; 1553 1554 return (e->GenerationBit == Q->genbit); 1555} 1556 1557/* 1558 * A simpler version of process_responses() that handles only pure (i.e., 1559 * non data-carrying) responses. Such respones are too light-weight to justify 1560 * calling a softirq when using NAPI, so we handle them specially in hard 1561 * interrupt context. The function is called with a pointer to a response, 1562 * which the caller must ensure is a valid pure response. Returns 1 if it 1563 * encounters a valid data-carrying response, 0 otherwise. 1564 */ 1565static int process_pure_responses(struct adapter *adapter) 1566{ 1567 struct sge *sge = adapter->sge; 1568 struct respQ *q = &sge->respQ; 1569 struct respQ_e *e = &q->entries[q->cidx]; 1570 const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1571 unsigned int flags = 0; 1572 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1573 1574 prefetch(fl->centries[fl->cidx].skb); 1575 if (e->DataValid) 1576 return 1; 1577 1578 do { 1579 flags |= e->Qsleeping; 1580 1581 cmdq_processed[0] += e->Cmdq0CreditReturn; 1582 cmdq_processed[1] += e->Cmdq1CreditReturn; 1583 1584 e++; 1585 if (unlikely(++q->cidx == q->size)) { 1586 q->cidx = 0; 1587 q->genbit ^= 1; 1588 e = q->entries; 1589 } 1590 prefetch(e); 1591 1592 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { 1593 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); 1594 q->credits = 0; 1595 } 1596 sge->stats.pure_rsps++; 1597 } while (e->GenerationBit == q->genbit && !e->DataValid); 1598 1599 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1600 sge->cmdQ[1].processed += cmdq_processed[1]; 1601 1602 return e->GenerationBit == q->genbit; 1603} 1604 1605/* 1606 * Handler for new data events when using NAPI. This does not need any locking 1607 * or protection from interrupts as data interrupts are off at this point and 1608 * other adapter interrupts do not interfere. 1609 */ 1610int t1_poll(struct napi_struct *napi, int budget) 1611{ 1612 struct adapter *adapter = container_of(napi, struct adapter, napi); 1613 int work_done = process_responses(adapter, budget); 1614 1615 if (likely(work_done < budget)) { 1616 napi_complete(napi); 1617 writel(adapter->sge->respQ.cidx, 1618 adapter->regs + A_SG_SLEEPING); 1619 } 1620 return work_done; 1621} 1622 1623irqreturn_t t1_interrupt(int irq, void *data) 1624{ 1625 struct adapter *adapter = data; 1626 struct sge *sge = adapter->sge; 1627 int handled; 1628 1629 if (likely(responses_pending(adapter))) { 1630 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); 1631 1632 if (napi_schedule_prep(&adapter->napi)) { 1633 if (process_pure_responses(adapter)) 1634 __napi_schedule(&adapter->napi); 1635 else { 1636 /* no data, no NAPI needed */ 1637 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1638 /* undo schedule_prep */ 1639 napi_enable(&adapter->napi); 1640 } 1641 } 1642 return IRQ_HANDLED; 1643 } 1644 1645 spin_lock(&adapter->async_lock); 1646 handled = t1_slow_intr_handler(adapter); 1647 spin_unlock(&adapter->async_lock); 1648 1649 if (!handled) 1650 sge->stats.unhandled_irqs++; 1651 1652 return IRQ_RETVAL(handled != 0); 1653} 1654 1655/* 1656 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. 1657 * 1658 * The code figures out how many entries the sk_buff will require in the 1659 * cmdQ and updates the cmdQ data structure with the state once the enqueue 1660 * has complete. Then, it doesn't access the global structure anymore, but 1661 * uses the corresponding fields on the stack. In conjuction with a spinlock 1662 * around that code, we can make the function reentrant without holding the 1663 * lock when we actually enqueue (which might be expensive, especially on 1664 * architectures with IO MMUs). 1665 * 1666 * This runs with softirqs disabled. 1667 */ 1668static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, 1669 unsigned int qid, struct net_device *dev) 1670{ 1671 struct sge *sge = adapter->sge; 1672 struct cmdQ *q = &sge->cmdQ[qid]; 1673 unsigned int credits, pidx, genbit, count, use_sched_skb = 0; 1674 1675 if (!spin_trylock(&q->lock)) 1676 return NETDEV_TX_LOCKED; 1677 1678 reclaim_completed_tx(sge, q); 1679 1680 pidx = q->pidx; 1681 credits = q->size - q->in_use; 1682 count = 1 + skb_shinfo(skb)->nr_frags; 1683 count += compute_large_page_tx_descs(skb); 1684 1685 /* Ethernet packet */ 1686 if (unlikely(credits < count)) { 1687 if (!netif_queue_stopped(dev)) { 1688 netif_stop_queue(dev); 1689 set_bit(dev->if_port, &sge->stopped_tx_queues); 1690 sge->stats.cmdQ_full[2]++; 1691 pr_err("%s: Tx ring full while queue awake!\n", 1692 adapter->name); 1693 } 1694 spin_unlock(&q->lock); 1695 return NETDEV_TX_BUSY; 1696 } 1697 1698 if (unlikely(credits - count < q->stop_thres)) { 1699 netif_stop_queue(dev); 1700 set_bit(dev->if_port, &sge->stopped_tx_queues); 1701 sge->stats.cmdQ_full[2]++; 1702 } 1703 1704 /* T204 cmdQ0 skbs that are destined for a certain port have to go 1705 * through the scheduler. 1706 */ 1707 if (sge->tx_sched && !qid && skb->dev) { 1708use_sched: 1709 use_sched_skb = 1; 1710 /* Note that the scheduler might return a different skb than 1711 * the one passed in. 1712 */ 1713 skb = sched_skb(sge, skb, credits); 1714 if (!skb) { 1715 spin_unlock(&q->lock); 1716 return NETDEV_TX_OK; 1717 } 1718 pidx = q->pidx; 1719 count = 1 + skb_shinfo(skb)->nr_frags; 1720 count += compute_large_page_tx_descs(skb); 1721 } 1722 1723 q->in_use += count; 1724 genbit = q->genbit; 1725 pidx = q->pidx; 1726 q->pidx += count; 1727 if (q->pidx >= q->size) { 1728 q->pidx -= q->size; 1729 q->genbit ^= 1; 1730 } 1731 spin_unlock(&q->lock); 1732 1733 write_tx_descs(adapter, skb, pidx, genbit, q); 1734 1735 /* 1736 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring 1737 * the doorbell if the Q is asleep. There is a natural race, where 1738 * the hardware is going to sleep just after we checked, however, 1739 * then the interrupt handler will detect the outstanding TX packet 1740 * and ring the doorbell for us. 1741 */ 1742 if (qid) 1743 doorbell_pio(adapter, F_CMDQ1_ENABLE); 1744 else { 1745 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1746 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { 1747 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1748 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1749 } 1750 } 1751 1752 if (use_sched_skb) { 1753 if (spin_trylock(&q->lock)) { 1754 credits = q->size - q->in_use; 1755 skb = NULL; 1756 goto use_sched; 1757 } 1758 } 1759 return NETDEV_TX_OK; 1760} 1761 1762#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) 1763 1764/* 1765 * eth_hdr_len - return the length of an Ethernet header 1766 * @data: pointer to the start of the Ethernet header 1767 * 1768 * Returns the length of an Ethernet header, including optional VLAN tag. 1769 */ 1770static inline int eth_hdr_len(const void *data) 1771{ 1772 const struct ethhdr *e = data; 1773 1774 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; 1775} 1776 1777/* 1778 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. 1779 */ 1780netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) 1781{ 1782 struct adapter *adapter = dev->ml_priv; 1783 struct sge *sge = adapter->sge; 1784 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); 1785 struct cpl_tx_pkt *cpl; 1786 struct sk_buff *orig_skb = skb; 1787 int ret; 1788 1789 if (skb->protocol == htons(ETH_P_CPL5)) 1790 goto send; 1791 1792 /* 1793 * We are using a non-standard hard_header_len. 1794 * Allocate more header room in the rare cases it is not big enough. 1795 */ 1796 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { 1797 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); 1798 ++st->tx_need_hdrroom; 1799 dev_kfree_skb_any(orig_skb); 1800 if (!skb) 1801 return NETDEV_TX_OK; 1802 } 1803 1804 if (skb_shinfo(skb)->gso_size) { 1805 int eth_type; 1806 struct cpl_tx_pkt_lso *hdr; 1807 1808 ++st->tx_tso; 1809 1810 eth_type = skb_network_offset(skb) == ETH_HLEN ? 1811 CPL_ETH_II : CPL_ETH_II_VLAN; 1812 1813 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); 1814 hdr->opcode = CPL_TX_PKT_LSO; 1815 hdr->ip_csum_dis = hdr->l4_csum_dis = 0; 1816 hdr->ip_hdr_words = ip_hdr(skb)->ihl; 1817 hdr->tcp_hdr_words = tcp_hdr(skb)->doff; 1818 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, 1819 skb_shinfo(skb)->gso_size)); 1820 hdr->len = htonl(skb->len - sizeof(*hdr)); 1821 cpl = (struct cpl_tx_pkt *)hdr; 1822 } else { 1823 /* 1824 * Packets shorter than ETH_HLEN can break the MAC, drop them 1825 * early. Also, we may get oversized packets because some 1826 * parts of the kernel don't handle our unusual hard_header_len 1827 * right, drop those too. 1828 */ 1829 if (unlikely(skb->len < ETH_HLEN || 1830 skb->len > dev->mtu + eth_hdr_len(skb->data))) { 1831 pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name, 1832 skb->len, eth_hdr_len(skb->data), dev->mtu); 1833 dev_kfree_skb_any(skb); 1834 return NETDEV_TX_OK; 1835 } 1836 1837 if (!(adapter->flags & UDP_CSUM_CAPABLE) && 1838 skb->ip_summed == CHECKSUM_PARTIAL && 1839 ip_hdr(skb)->protocol == IPPROTO_UDP) { 1840 if (unlikely(skb_checksum_help(skb))) { 1841 pr_debug("%s: unable to do udp checksum\n", dev->name); 1842 dev_kfree_skb_any(skb); 1843 return NETDEV_TX_OK; 1844 } 1845 } 1846 1847 /* Hmmm, assuming to catch the gratious arp... and we'll use 1848 * it to flush out stuck espi packets... 1849 */ 1850 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { 1851 if (skb->protocol == htons(ETH_P_ARP) && 1852 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { 1853 adapter->sge->espibug_skb[dev->if_port] = skb; 1854 /* We want to re-use this skb later. We 1855 * simply bump the reference count and it 1856 * will not be freed... 1857 */ 1858 skb = skb_get(skb); 1859 } 1860 } 1861 1862 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); 1863 cpl->opcode = CPL_TX_PKT; 1864 cpl->ip_csum_dis = 1; /* SW calculates IP csum */ 1865 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1; 1866 /* the length field isn't used so don't bother setting it */ 1867 1868 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL); 1869 } 1870 cpl->iff = dev->if_port; 1871 1872#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 1873 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) { 1874 cpl->vlan_valid = 1; 1875 cpl->vlan = htons(vlan_tx_tag_get(skb)); 1876 st->vlan_insert++; 1877 } else 1878#endif 1879 cpl->vlan_valid = 0; 1880 1881send: 1882 ret = t1_sge_tx(skb, adapter, 0, dev); 1883 1884 /* If transmit busy, and we reallocated skb's due to headroom limit, 1885 * then silently discard to avoid leak. 1886 */ 1887 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { 1888 dev_kfree_skb_any(skb); 1889 ret = NETDEV_TX_OK; 1890 } 1891 return ret; 1892} 1893 1894/* 1895 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. 1896 */ 1897static void sge_tx_reclaim_cb(unsigned long data) 1898{ 1899 int i; 1900 struct sge *sge = (struct sge *)data; 1901 1902 for (i = 0; i < SGE_CMDQ_N; ++i) { 1903 struct cmdQ *q = &sge->cmdQ[i]; 1904 1905 if (!spin_trylock(&q->lock)) 1906 continue; 1907 1908 reclaim_completed_tx(sge, q); 1909 if (i == 0 && q->in_use) { /* flush pending credits */ 1910 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); 1911 } 1912 spin_unlock(&q->lock); 1913 } 1914 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 1915} 1916 1917/* 1918 * Propagate changes of the SGE coalescing parameters to the HW. 1919 */ 1920int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) 1921{ 1922 sge->fixed_intrtimer = p->rx_coalesce_usecs * 1923 core_ticks_per_usec(sge->adapter); 1924 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); 1925 return 0; 1926} 1927 1928/* 1929 * Allocates both RX and TX resources and configures the SGE. However, 1930 * the hardware is not enabled yet. 1931 */ 1932int t1_sge_configure(struct sge *sge, struct sge_params *p) 1933{ 1934 if (alloc_rx_resources(sge, p)) 1935 return -ENOMEM; 1936 if (alloc_tx_resources(sge, p)) { 1937 free_rx_resources(sge); 1938 return -ENOMEM; 1939 } 1940 configure_sge(sge, p); 1941 1942 /* 1943 * Now that we have sized the free lists calculate the payload 1944 * capacity of the large buffers. Other parts of the driver use 1945 * this to set the max offload coalescing size so that RX packets 1946 * do not overflow our large buffers. 1947 */ 1948 p->large_buf_capacity = jumbo_payload_capacity(sge); 1949 return 0; 1950} 1951 1952/* 1953 * Disables the DMA engine. 1954 */ 1955void t1_sge_stop(struct sge *sge) 1956{ 1957 int i; 1958 writel(0, sge->adapter->regs + A_SG_CONTROL); 1959 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ 1960 1961 if (is_T2(sge->adapter)) 1962 del_timer_sync(&sge->espibug_timer); 1963 1964 del_timer_sync(&sge->tx_reclaim_timer); 1965 if (sge->tx_sched) 1966 tx_sched_stop(sge); 1967 1968 for (i = 0; i < MAX_NPORTS; i++) 1969 kfree_skb(sge->espibug_skb[i]); 1970} 1971 1972/* 1973 * Enables the DMA engine. 1974 */ 1975void t1_sge_start(struct sge *sge) 1976{ 1977 refill_free_list(sge, &sge->freelQ[0]); 1978 refill_free_list(sge, &sge->freelQ[1]); 1979 1980 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); 1981 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); 1982 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ 1983 1984 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 1985 1986 if (is_T2(sge->adapter)) 1987 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 1988} 1989 1990/* 1991 * Callback for the T2 ESPI 'stuck packet feature' workaorund 1992 */ 1993static void espibug_workaround_t204(unsigned long data) 1994{ 1995 struct adapter *adapter = (struct adapter *)data; 1996 struct sge *sge = adapter->sge; 1997 unsigned int nports = adapter->params.nports; 1998 u32 seop[MAX_NPORTS]; 1999 2000 if (adapter->open_device_map & PORT_MASK) { 2001 int i; 2002 2003 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) 2004 return; 2005 2006 for (i = 0; i < nports; i++) { 2007 struct sk_buff *skb = sge->espibug_skb[i]; 2008 2009 if (!netif_running(adapter->port[i].dev) || 2010 netif_queue_stopped(adapter->port[i].dev) || 2011 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb) 2012 continue; 2013 2014 if (!skb->cb[0]) { 2015 u8 ch_mac_addr[ETH_ALEN] = { 2016 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 2017 }; 2018 2019 skb_copy_to_linear_data_offset(skb, 2020 sizeof(struct cpl_tx_pkt), 2021 ch_mac_addr, 2022 ETH_ALEN); 2023 skb_copy_to_linear_data_offset(skb, 2024 skb->len - 10, 2025 ch_mac_addr, 2026 ETH_ALEN); 2027 skb->cb[0] = 0xff; 2028 } 2029 2030 /* bump the reference count to avoid freeing of 2031 * the skb once the DMA has completed. 2032 */ 2033 skb = skb_get(skb); 2034 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev); 2035 } 2036 } 2037 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2038} 2039 2040static void espibug_workaround(unsigned long data) 2041{ 2042 struct adapter *adapter = (struct adapter *)data; 2043 struct sge *sge = adapter->sge; 2044 2045 if (netif_running(adapter->port[0].dev)) { 2046 struct sk_buff *skb = sge->espibug_skb[0]; 2047 u32 seop = t1_espi_get_mon(adapter, 0x930, 0); 2048 2049 if ((seop & 0xfff0fff) == 0xfff && skb) { 2050 if (!skb->cb[0]) { 2051 u8 ch_mac_addr[ETH_ALEN] = 2052 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; 2053 skb_copy_to_linear_data_offset(skb, 2054 sizeof(struct cpl_tx_pkt), 2055 ch_mac_addr, 2056 ETH_ALEN); 2057 skb_copy_to_linear_data_offset(skb, 2058 skb->len - 10, 2059 ch_mac_addr, 2060 ETH_ALEN); 2061 skb->cb[0] = 0xff; 2062 } 2063 2064 /* bump the reference count to avoid freeing of the 2065 * skb once the DMA has completed. 2066 */ 2067 skb = skb_get(skb); 2068 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); 2069 } 2070 } 2071 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2072} 2073 2074/* 2075 * Creates a t1_sge structure and returns suggested resource parameters. 2076 */ 2077struct sge * __devinit t1_sge_create(struct adapter *adapter, 2078 struct sge_params *p) 2079{ 2080 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); 2081 int i; 2082 2083 if (!sge) 2084 return NULL; 2085 2086 sge->adapter = adapter; 2087 sge->netdev = adapter->port[0].dev; 2088 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; 2089 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; 2090 2091 for_each_port(adapter, i) { 2092 sge->port_stats[i] = alloc_percpu(struct sge_port_stats); 2093 if (!sge->port_stats[i]) 2094 goto nomem_port; 2095 } 2096 2097 init_timer(&sge->tx_reclaim_timer); 2098 sge->tx_reclaim_timer.data = (unsigned long)sge; 2099 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; 2100 2101 if (is_T2(sge->adapter)) { 2102 init_timer(&sge->espibug_timer); 2103 2104 if (adapter->params.nports > 1) { 2105 tx_sched_init(sge); 2106 sge->espibug_timer.function = espibug_workaround_t204; 2107 } else 2108 sge->espibug_timer.function = espibug_workaround; 2109 sge->espibug_timer.data = (unsigned long)sge->adapter; 2110 2111 sge->espibug_timeout = 1; 2112 /* for T204, every 10ms */ 2113 if (adapter->params.nports > 1) 2114 sge->espibug_timeout = HZ/100; 2115 } 2116 2117 2118 p->cmdQ_size[0] = SGE_CMDQ0_E_N; 2119 p->cmdQ_size[1] = SGE_CMDQ1_E_N; 2120 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; 2121 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; 2122 if (sge->tx_sched) { 2123 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) 2124 p->rx_coalesce_usecs = 15; 2125 else 2126 p->rx_coalesce_usecs = 50; 2127 } else 2128 p->rx_coalesce_usecs = 50; 2129 2130 p->coalesce_enable = 0; 2131 p->sample_interval_usecs = 0; 2132 2133 return sge; 2134nomem_port: 2135 while (i >= 0) { 2136 free_percpu(sge->port_stats[i]); 2137 --i; 2138 } 2139 kfree(sge); 2140 return NULL; 2141 2142}