Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.0-rc7 2142 lines 60 kB view raw
1/***************************************************************************** 2 * * 3 * File: sge.c * 4 * $Revision: 1.26 $ * 5 * $Date: 2005/06/21 18:29:48 $ * 6 * Description: * 7 * DMA engine. * 8 * part of the Chelsio 10Gb Ethernet Driver. * 9 * * 10 * This program is free software; you can redistribute it and/or modify * 11 * it under the terms of the GNU General Public License, version 2, as * 12 * published by the Free Software Foundation. * 13 * * 14 * You should have received a copy of the GNU General Public License along * 15 * with this program; if not, write to the Free Software Foundation, Inc., * 16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * 17 * * 18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * 19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * 20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * 21 * * 22 * http://www.chelsio.com * 23 * * 24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * 25 * All rights reserved. * 26 * * 27 * Maintainers: maintainers@chelsio.com * 28 * * 29 * Authors: Dimitrios Michailidis <dm@chelsio.com> * 30 * Tina Yang <tainay@chelsio.com> * 31 * Felix Marti <felix@chelsio.com> * 32 * Scott Bardone <sbardone@chelsio.com> * 33 * Kurt Ottaway <kottaway@chelsio.com> * 34 * Frank DiMambro <frank@chelsio.com> * 35 * * 36 * History: * 37 * * 38 ****************************************************************************/ 39 40#include "common.h" 41 42#include <linux/types.h> 43#include <linux/errno.h> 44#include <linux/pci.h> 45#include <linux/ktime.h> 46#include <linux/netdevice.h> 47#include <linux/etherdevice.h> 48#include <linux/if_vlan.h> 49#include <linux/skbuff.h> 50#include <linux/init.h> 51#include <linux/mm.h> 52#include <linux/tcp.h> 53#include <linux/ip.h> 54#include <linux/in.h> 55#include <linux/if_arp.h> 56#include <linux/slab.h> 57#include <linux/prefetch.h> 58 59#include "cpl5_cmd.h" 60#include "sge.h" 61#include "regs.h" 62#include "espi.h" 63 64/* This belongs in if_ether.h */ 65#define ETH_P_CPL5 0xf 66 67#define SGE_CMDQ_N 2 68#define SGE_FREELQ_N 2 69#define SGE_CMDQ0_E_N 1024 70#define SGE_CMDQ1_E_N 128 71#define SGE_FREEL_SIZE 4096 72#define SGE_JUMBO_FREEL_SIZE 512 73#define SGE_FREEL_REFILL_THRESH 16 74#define SGE_RESPQ_E_N 1024 75#define SGE_INTRTIMER_NRES 1000 76#define SGE_RX_SM_BUF_SIZE 1536 77#define SGE_TX_DESC_MAX_PLEN 16384 78 79#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) 80 81/* 82 * Period of the TX buffer reclaim timer. This timer does not need to run 83 * frequently as TX buffers are usually reclaimed by new TX packets. 84 */ 85#define TX_RECLAIM_PERIOD (HZ / 4) 86 87#define M_CMD_LEN 0x7fffffff 88#define V_CMD_LEN(v) (v) 89#define G_CMD_LEN(v) ((v) & M_CMD_LEN) 90#define V_CMD_GEN1(v) ((v) << 31) 91#define V_CMD_GEN2(v) (v) 92#define F_CMD_DATAVALID (1 << 1) 93#define F_CMD_SOP (1 << 2) 94#define V_CMD_EOP(v) ((v) << 3) 95 96/* 97 * Command queue, receive buffer list, and response queue descriptors. 98 */ 99#if defined(__BIG_ENDIAN_BITFIELD) 100struct cmdQ_e { 101 u32 addr_lo; 102 u32 len_gen; 103 u32 flags; 104 u32 addr_hi; 105}; 106 107struct freelQ_e { 108 u32 addr_lo; 109 u32 len_gen; 110 u32 gen2; 111 u32 addr_hi; 112}; 113 114struct respQ_e { 115 u32 Qsleeping : 4; 116 u32 Cmdq1CreditReturn : 5; 117 u32 Cmdq1DmaComplete : 5; 118 u32 Cmdq0CreditReturn : 5; 119 u32 Cmdq0DmaComplete : 5; 120 u32 FreelistQid : 2; 121 u32 CreditValid : 1; 122 u32 DataValid : 1; 123 u32 Offload : 1; 124 u32 Eop : 1; 125 u32 Sop : 1; 126 u32 GenerationBit : 1; 127 u32 BufferLength; 128}; 129#elif defined(__LITTLE_ENDIAN_BITFIELD) 130struct cmdQ_e { 131 u32 len_gen; 132 u32 addr_lo; 133 u32 addr_hi; 134 u32 flags; 135}; 136 137struct freelQ_e { 138 u32 len_gen; 139 u32 addr_lo; 140 u32 addr_hi; 141 u32 gen2; 142}; 143 144struct respQ_e { 145 u32 BufferLength; 146 u32 GenerationBit : 1; 147 u32 Sop : 1; 148 u32 Eop : 1; 149 u32 Offload : 1; 150 u32 DataValid : 1; 151 u32 CreditValid : 1; 152 u32 FreelistQid : 2; 153 u32 Cmdq0DmaComplete : 5; 154 u32 Cmdq0CreditReturn : 5; 155 u32 Cmdq1DmaComplete : 5; 156 u32 Cmdq1CreditReturn : 5; 157 u32 Qsleeping : 4; 158} ; 159#endif 160 161/* 162 * SW Context Command and Freelist Queue Descriptors 163 */ 164struct cmdQ_ce { 165 struct sk_buff *skb; 166 DEFINE_DMA_UNMAP_ADDR(dma_addr); 167 DEFINE_DMA_UNMAP_LEN(dma_len); 168}; 169 170struct freelQ_ce { 171 struct sk_buff *skb; 172 DEFINE_DMA_UNMAP_ADDR(dma_addr); 173 DEFINE_DMA_UNMAP_LEN(dma_len); 174}; 175 176/* 177 * SW command, freelist and response rings 178 */ 179struct cmdQ { 180 unsigned long status; /* HW DMA fetch status */ 181 unsigned int in_use; /* # of in-use command descriptors */ 182 unsigned int size; /* # of descriptors */ 183 unsigned int processed; /* total # of descs HW has processed */ 184 unsigned int cleaned; /* total # of descs SW has reclaimed */ 185 unsigned int stop_thres; /* SW TX queue suspend threshold */ 186 u16 pidx; /* producer index (SW) */ 187 u16 cidx; /* consumer index (HW) */ 188 u8 genbit; /* current generation (=valid) bit */ 189 u8 sop; /* is next entry start of packet? */ 190 struct cmdQ_e *entries; /* HW command descriptor Q */ 191 struct cmdQ_ce *centries; /* SW command context descriptor Q */ 192 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ 193 spinlock_t lock; /* Lock to protect cmdQ enqueuing */ 194}; 195 196struct freelQ { 197 unsigned int credits; /* # of available RX buffers */ 198 unsigned int size; /* free list capacity */ 199 u16 pidx; /* producer index (SW) */ 200 u16 cidx; /* consumer index (HW) */ 201 u16 rx_buffer_size; /* Buffer size on this free list */ 202 u16 dma_offset; /* DMA offset to align IP headers */ 203 u16 recycleq_idx; /* skb recycle q to use */ 204 u8 genbit; /* current generation (=valid) bit */ 205 struct freelQ_e *entries; /* HW freelist descriptor Q */ 206 struct freelQ_ce *centries; /* SW freelist context descriptor Q */ 207 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ 208}; 209 210struct respQ { 211 unsigned int credits; /* credits to be returned to SGE */ 212 unsigned int size; /* # of response Q descriptors */ 213 u16 cidx; /* consumer index (SW) */ 214 u8 genbit; /* current generation(=valid) bit */ 215 struct respQ_e *entries; /* HW response descriptor Q */ 216 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ 217}; 218 219/* Bit flags for cmdQ.status */ 220enum { 221 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ 222 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ 223}; 224 225/* T204 TX SW scheduler */ 226 227/* Per T204 TX port */ 228struct sched_port { 229 unsigned int avail; /* available bits - quota */ 230 unsigned int drain_bits_per_1024ns; /* drain rate */ 231 unsigned int speed; /* drain rate, mbps */ 232 unsigned int mtu; /* mtu size */ 233 struct sk_buff_head skbq; /* pending skbs */ 234}; 235 236/* Per T204 device */ 237struct sched { 238 ktime_t last_updated; /* last time quotas were computed */ 239 unsigned int max_avail; /* max bits to be sent to any port */ 240 unsigned int port; /* port index (round robin ports) */ 241 unsigned int num; /* num skbs in per port queues */ 242 struct sched_port p[MAX_NPORTS]; 243 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ 244}; 245static void restart_sched(unsigned long); 246 247 248/* 249 * Main SGE data structure 250 * 251 * Interrupts are handled by a single CPU and it is likely that on a MP system 252 * the application is migrated to another CPU. In that scenario, we try to 253 * separate the RX(in irq context) and TX state in order to decrease memory 254 * contention. 255 */ 256struct sge { 257 struct adapter *adapter; /* adapter backpointer */ 258 struct net_device *netdev; /* netdevice backpointer */ 259 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ 260 struct respQ respQ; /* response Q */ 261 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ 262 unsigned int rx_pkt_pad; /* RX padding for L2 packets */ 263 unsigned int jumbo_fl; /* jumbo freelist Q index */ 264 unsigned int intrtimer_nres; /* no-resource interrupt timer */ 265 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ 266 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ 267 struct timer_list espibug_timer; 268 unsigned long espibug_timeout; 269 struct sk_buff *espibug_skb[MAX_NPORTS]; 270 u32 sge_control; /* shadow value of sge control reg */ 271 struct sge_intr_counts stats; 272 struct sge_port_stats __percpu *port_stats[MAX_NPORTS]; 273 struct sched *tx_sched; 274 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; 275}; 276 277static const u8 ch_mac_addr[ETH_ALEN] = { 278 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 279}; 280 281/* 282 * stop tasklet and free all pending skb's 283 */ 284static void tx_sched_stop(struct sge *sge) 285{ 286 struct sched *s = sge->tx_sched; 287 int i; 288 289 tasklet_kill(&s->sched_tsk); 290 291 for (i = 0; i < MAX_NPORTS; i++) 292 __skb_queue_purge(&s->p[s->port].skbq); 293} 294 295/* 296 * t1_sched_update_parms() is called when the MTU or link speed changes. It 297 * re-computes scheduler parameters to scope with the change. 298 */ 299unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, 300 unsigned int mtu, unsigned int speed) 301{ 302 struct sched *s = sge->tx_sched; 303 struct sched_port *p = &s->p[port]; 304 unsigned int max_avail_segs; 305 306 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed); 307 if (speed) 308 p->speed = speed; 309 if (mtu) 310 p->mtu = mtu; 311 312 if (speed || mtu) { 313 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40); 314 do_div(drain, (p->mtu + 50) * 1000); 315 p->drain_bits_per_1024ns = (unsigned int) drain; 316 317 if (p->speed < 1000) 318 p->drain_bits_per_1024ns = 319 90 * p->drain_bits_per_1024ns / 100; 320 } 321 322 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { 323 p->drain_bits_per_1024ns -= 16; 324 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4); 325 max_avail_segs = max(1U, 4096 / (p->mtu - 40)); 326 } else { 327 s->max_avail = 16384; 328 max_avail_segs = max(1U, 9000 / (p->mtu - 40)); 329 } 330 331 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u " 332 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu, 333 p->speed, s->max_avail, max_avail_segs, 334 p->drain_bits_per_1024ns); 335 336 return max_avail_segs * (p->mtu - 40); 337} 338 339#if 0 340 341/* 342 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of 343 * data that can be pushed per port. 344 */ 345void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) 346{ 347 struct sched *s = sge->tx_sched; 348 unsigned int i; 349 350 s->max_avail = val; 351 for (i = 0; i < MAX_NPORTS; i++) 352 t1_sched_update_parms(sge, i, 0, 0); 353} 354 355/* 356 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port 357 * is draining. 358 */ 359void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port, 360 unsigned int val) 361{ 362 struct sched *s = sge->tx_sched; 363 struct sched_port *p = &s->p[port]; 364 p->drain_bits_per_1024ns = val * 1024 / 1000; 365 t1_sched_update_parms(sge, port, 0, 0); 366} 367 368#endif /* 0 */ 369 370 371/* 372 * get_clock() implements a ns clock (see ktime_get) 373 */ 374static inline ktime_t get_clock(void) 375{ 376 struct timespec ts; 377 378 ktime_get_ts(&ts); 379 return timespec_to_ktime(ts); 380} 381 382/* 383 * tx_sched_init() allocates resources and does basic initialization. 384 */ 385static int tx_sched_init(struct sge *sge) 386{ 387 struct sched *s; 388 int i; 389 390 s = kzalloc(sizeof (struct sched), GFP_KERNEL); 391 if (!s) 392 return -ENOMEM; 393 394 pr_debug("tx_sched_init\n"); 395 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); 396 sge->tx_sched = s; 397 398 for (i = 0; i < MAX_NPORTS; i++) { 399 skb_queue_head_init(&s->p[i].skbq); 400 t1_sched_update_parms(sge, i, 1500, 1000); 401 } 402 403 return 0; 404} 405 406/* 407 * sched_update_avail() computes the delta since the last time it was called 408 * and updates the per port quota (number of bits that can be sent to the any 409 * port). 410 */ 411static inline int sched_update_avail(struct sge *sge) 412{ 413 struct sched *s = sge->tx_sched; 414 ktime_t now = get_clock(); 415 unsigned int i; 416 long long delta_time_ns; 417 418 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated)); 419 420 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns); 421 if (delta_time_ns < 15000) 422 return 0; 423 424 for (i = 0; i < MAX_NPORTS; i++) { 425 struct sched_port *p = &s->p[i]; 426 unsigned int delta_avail; 427 428 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13; 429 p->avail = min(p->avail + delta_avail, s->max_avail); 430 } 431 432 s->last_updated = now; 433 434 return 1; 435} 436 437/* 438 * sched_skb() is called from two different places. In the tx path, any 439 * packet generating load on an output port will call sched_skb() 440 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq 441 * context (skb == NULL). 442 * The scheduler only returns a skb (which will then be sent) if the 443 * length of the skb is <= the current quota of the output port. 444 */ 445static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, 446 unsigned int credits) 447{ 448 struct sched *s = sge->tx_sched; 449 struct sk_buff_head *skbq; 450 unsigned int i, len, update = 1; 451 452 pr_debug("sched_skb %p\n", skb); 453 if (!skb) { 454 if (!s->num) 455 return NULL; 456 } else { 457 skbq = &s->p[skb->dev->if_port].skbq; 458 __skb_queue_tail(skbq, skb); 459 s->num++; 460 skb = NULL; 461 } 462 463 if (credits < MAX_SKB_FRAGS + 1) 464 goto out; 465 466again: 467 for (i = 0; i < MAX_NPORTS; i++) { 468 s->port = (s->port + 1) & (MAX_NPORTS - 1); 469 skbq = &s->p[s->port].skbq; 470 471 skb = skb_peek(skbq); 472 473 if (!skb) 474 continue; 475 476 len = skb->len; 477 if (len <= s->p[s->port].avail) { 478 s->p[s->port].avail -= len; 479 s->num--; 480 __skb_unlink(skb, skbq); 481 goto out; 482 } 483 skb = NULL; 484 } 485 486 if (update-- && sched_update_avail(sge)) 487 goto again; 488 489out: 490 /* If there are more pending skbs, we use the hardware to schedule us 491 * again. 492 */ 493 if (s->num && !skb) { 494 struct cmdQ *q = &sge->cmdQ[0]; 495 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 496 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { 497 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 498 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); 499 } 500 } 501 pr_debug("sched_skb ret %p\n", skb); 502 503 return skb; 504} 505 506/* 507 * PIO to indicate that memory mapped Q contains valid descriptor(s). 508 */ 509static inline void doorbell_pio(struct adapter *adapter, u32 val) 510{ 511 wmb(); 512 writel(val, adapter->regs + A_SG_DOORBELL); 513} 514 515/* 516 * Frees all RX buffers on the freelist Q. The caller must make sure that 517 * the SGE is turned off before calling this function. 518 */ 519static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) 520{ 521 unsigned int cidx = q->cidx; 522 523 while (q->credits--) { 524 struct freelQ_ce *ce = &q->centries[cidx]; 525 526 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), 527 dma_unmap_len(ce, dma_len), 528 PCI_DMA_FROMDEVICE); 529 dev_kfree_skb(ce->skb); 530 ce->skb = NULL; 531 if (++cidx == q->size) 532 cidx = 0; 533 } 534} 535 536/* 537 * Free RX free list and response queue resources. 538 */ 539static void free_rx_resources(struct sge *sge) 540{ 541 struct pci_dev *pdev = sge->adapter->pdev; 542 unsigned int size, i; 543 544 if (sge->respQ.entries) { 545 size = sizeof(struct respQ_e) * sge->respQ.size; 546 pci_free_consistent(pdev, size, sge->respQ.entries, 547 sge->respQ.dma_addr); 548 } 549 550 for (i = 0; i < SGE_FREELQ_N; i++) { 551 struct freelQ *q = &sge->freelQ[i]; 552 553 if (q->centries) { 554 free_freelQ_buffers(pdev, q); 555 kfree(q->centries); 556 } 557 if (q->entries) { 558 size = sizeof(struct freelQ_e) * q->size; 559 pci_free_consistent(pdev, size, q->entries, 560 q->dma_addr); 561 } 562 } 563} 564 565/* 566 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a 567 * response queue. 568 */ 569static int alloc_rx_resources(struct sge *sge, struct sge_params *p) 570{ 571 struct pci_dev *pdev = sge->adapter->pdev; 572 unsigned int size, i; 573 574 for (i = 0; i < SGE_FREELQ_N; i++) { 575 struct freelQ *q = &sge->freelQ[i]; 576 577 q->genbit = 1; 578 q->size = p->freelQ_size[i]; 579 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; 580 size = sizeof(struct freelQ_e) * q->size; 581 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); 582 if (!q->entries) 583 goto err_no_mem; 584 585 size = sizeof(struct freelQ_ce) * q->size; 586 q->centries = kzalloc(size, GFP_KERNEL); 587 if (!q->centries) 588 goto err_no_mem; 589 } 590 591 /* 592 * Calculate the buffer sizes for the two free lists. FL0 accommodates 593 * regular sized Ethernet frames, FL1 is sized not to exceed 16K, 594 * including all the sk_buff overhead. 595 * 596 * Note: For T2 FL0 and FL1 are reversed. 597 */ 598 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + 599 sizeof(struct cpl_rx_data) + 600 sge->freelQ[!sge->jumbo_fl].dma_offset; 601 602 size = (16 * 1024) - 603 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 604 605 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; 606 607 /* 608 * Setup which skb recycle Q should be used when recycling buffers from 609 * each free list. 610 */ 611 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; 612 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; 613 614 sge->respQ.genbit = 1; 615 sge->respQ.size = SGE_RESPQ_E_N; 616 sge->respQ.credits = 0; 617 size = sizeof(struct respQ_e) * sge->respQ.size; 618 sge->respQ.entries = 619 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); 620 if (!sge->respQ.entries) 621 goto err_no_mem; 622 return 0; 623 624err_no_mem: 625 free_rx_resources(sge); 626 return -ENOMEM; 627} 628 629/* 630 * Reclaims n TX descriptors and frees the buffers associated with them. 631 */ 632static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) 633{ 634 struct cmdQ_ce *ce; 635 struct pci_dev *pdev = sge->adapter->pdev; 636 unsigned int cidx = q->cidx; 637 638 q->in_use -= n; 639 ce = &q->centries[cidx]; 640 while (n--) { 641 if (likely(dma_unmap_len(ce, dma_len))) { 642 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), 643 dma_unmap_len(ce, dma_len), 644 PCI_DMA_TODEVICE); 645 if (q->sop) 646 q->sop = 0; 647 } 648 if (ce->skb) { 649 dev_kfree_skb_any(ce->skb); 650 q->sop = 1; 651 } 652 ce++; 653 if (++cidx == q->size) { 654 cidx = 0; 655 ce = q->centries; 656 } 657 } 658 q->cidx = cidx; 659} 660 661/* 662 * Free TX resources. 663 * 664 * Assumes that SGE is stopped and all interrupts are disabled. 665 */ 666static void free_tx_resources(struct sge *sge) 667{ 668 struct pci_dev *pdev = sge->adapter->pdev; 669 unsigned int size, i; 670 671 for (i = 0; i < SGE_CMDQ_N; i++) { 672 struct cmdQ *q = &sge->cmdQ[i]; 673 674 if (q->centries) { 675 if (q->in_use) 676 free_cmdQ_buffers(sge, q, q->in_use); 677 kfree(q->centries); 678 } 679 if (q->entries) { 680 size = sizeof(struct cmdQ_e) * q->size; 681 pci_free_consistent(pdev, size, q->entries, 682 q->dma_addr); 683 } 684 } 685} 686 687/* 688 * Allocates basic TX resources, consisting of memory mapped command Qs. 689 */ 690static int alloc_tx_resources(struct sge *sge, struct sge_params *p) 691{ 692 struct pci_dev *pdev = sge->adapter->pdev; 693 unsigned int size, i; 694 695 for (i = 0; i < SGE_CMDQ_N; i++) { 696 struct cmdQ *q = &sge->cmdQ[i]; 697 698 q->genbit = 1; 699 q->sop = 1; 700 q->size = p->cmdQ_size[i]; 701 q->in_use = 0; 702 q->status = 0; 703 q->processed = q->cleaned = 0; 704 q->stop_thres = 0; 705 spin_lock_init(&q->lock); 706 size = sizeof(struct cmdQ_e) * q->size; 707 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); 708 if (!q->entries) 709 goto err_no_mem; 710 711 size = sizeof(struct cmdQ_ce) * q->size; 712 q->centries = kzalloc(size, GFP_KERNEL); 713 if (!q->centries) 714 goto err_no_mem; 715 } 716 717 /* 718 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE 719 * only. For queue 0 set the stop threshold so we can handle one more 720 * packet from each port, plus reserve an additional 24 entries for 721 * Ethernet packets only. Queue 1 never suspends nor do we reserve 722 * space for Ethernet packets. 723 */ 724 sge->cmdQ[0].stop_thres = sge->adapter->params.nports * 725 (MAX_SKB_FRAGS + 1); 726 return 0; 727 728err_no_mem: 729 free_tx_resources(sge); 730 return -ENOMEM; 731} 732 733static inline void setup_ring_params(struct adapter *adapter, u64 addr, 734 u32 size, int base_reg_lo, 735 int base_reg_hi, int size_reg) 736{ 737 writel((u32)addr, adapter->regs + base_reg_lo); 738 writel(addr >> 32, adapter->regs + base_reg_hi); 739 writel(size, adapter->regs + size_reg); 740} 741 742/* 743 * Enable/disable VLAN acceleration. 744 */ 745void t1_set_vlan_accel(struct adapter *adapter, int on_off) 746{ 747 struct sge *sge = adapter->sge; 748 749 sge->sge_control &= ~F_VLAN_XTRACT; 750 if (on_off) 751 sge->sge_control |= F_VLAN_XTRACT; 752 if (adapter->open_device_map) { 753 writel(sge->sge_control, adapter->regs + A_SG_CONTROL); 754 readl(adapter->regs + A_SG_CONTROL); /* flush */ 755 } 756} 757 758/* 759 * Programs the various SGE registers. However, the engine is not yet enabled, 760 * but sge->sge_control is setup and ready to go. 761 */ 762static void configure_sge(struct sge *sge, struct sge_params *p) 763{ 764 struct adapter *ap = sge->adapter; 765 766 writel(0, ap->regs + A_SG_CONTROL); 767 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, 768 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); 769 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, 770 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); 771 setup_ring_params(ap, sge->freelQ[0].dma_addr, 772 sge->freelQ[0].size, A_SG_FL0BASELWR, 773 A_SG_FL0BASEUPR, A_SG_FL0SIZE); 774 setup_ring_params(ap, sge->freelQ[1].dma_addr, 775 sge->freelQ[1].size, A_SG_FL1BASELWR, 776 A_SG_FL1BASEUPR, A_SG_FL1SIZE); 777 778 /* The threshold comparison uses <. */ 779 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); 780 781 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, 782 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); 783 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); 784 785 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | 786 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | 787 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | 788 V_RX_PKT_OFFSET(sge->rx_pkt_pad); 789 790#if defined(__BIG_ENDIAN_BITFIELD) 791 sge->sge_control |= F_ENABLE_BIG_ENDIAN; 792#endif 793 794 /* Initialize no-resource timer */ 795 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); 796 797 t1_sge_set_coalesce_params(sge, p); 798} 799 800/* 801 * Return the payload capacity of the jumbo free-list buffers. 802 */ 803static inline unsigned int jumbo_payload_capacity(const struct sge *sge) 804{ 805 return sge->freelQ[sge->jumbo_fl].rx_buffer_size - 806 sge->freelQ[sge->jumbo_fl].dma_offset - 807 sizeof(struct cpl_rx_data); 808} 809 810/* 811 * Frees all SGE related resources and the sge structure itself 812 */ 813void t1_sge_destroy(struct sge *sge) 814{ 815 int i; 816 817 for_each_port(sge->adapter, i) 818 free_percpu(sge->port_stats[i]); 819 820 kfree(sge->tx_sched); 821 free_tx_resources(sge); 822 free_rx_resources(sge); 823 kfree(sge); 824} 825 826/* 827 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist 828 * context Q) until the Q is full or alloc_skb fails. 829 * 830 * It is possible that the generation bits already match, indicating that the 831 * buffer is already valid and nothing needs to be done. This happens when we 832 * copied a received buffer into a new sk_buff during the interrupt processing. 833 * 834 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), 835 * we specify a RX_OFFSET in order to make sure that the IP header is 4B 836 * aligned. 837 */ 838static void refill_free_list(struct sge *sge, struct freelQ *q) 839{ 840 struct pci_dev *pdev = sge->adapter->pdev; 841 struct freelQ_ce *ce = &q->centries[q->pidx]; 842 struct freelQ_e *e = &q->entries[q->pidx]; 843 unsigned int dma_len = q->rx_buffer_size - q->dma_offset; 844 845 while (q->credits < q->size) { 846 struct sk_buff *skb; 847 dma_addr_t mapping; 848 849 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); 850 if (!skb) 851 break; 852 853 skb_reserve(skb, q->dma_offset); 854 mapping = pci_map_single(pdev, skb->data, dma_len, 855 PCI_DMA_FROMDEVICE); 856 skb_reserve(skb, sge->rx_pkt_pad); 857 858 ce->skb = skb; 859 dma_unmap_addr_set(ce, dma_addr, mapping); 860 dma_unmap_len_set(ce, dma_len, dma_len); 861 e->addr_lo = (u32)mapping; 862 e->addr_hi = (u64)mapping >> 32; 863 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); 864 wmb(); 865 e->gen2 = V_CMD_GEN2(q->genbit); 866 867 e++; 868 ce++; 869 if (++q->pidx == q->size) { 870 q->pidx = 0; 871 q->genbit ^= 1; 872 ce = q->centries; 873 e = q->entries; 874 } 875 q->credits++; 876 } 877} 878 879/* 880 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 881 * of both rings, we go into 'few interrupt mode' in order to give the system 882 * time to free up resources. 883 */ 884static void freelQs_empty(struct sge *sge) 885{ 886 struct adapter *adapter = sge->adapter; 887 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); 888 u32 irqholdoff_reg; 889 890 refill_free_list(sge, &sge->freelQ[0]); 891 refill_free_list(sge, &sge->freelQ[1]); 892 893 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && 894 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { 895 irq_reg |= F_FL_EXHAUSTED; 896 irqholdoff_reg = sge->fixed_intrtimer; 897 } else { 898 /* Clear the F_FL_EXHAUSTED interrupts for now */ 899 irq_reg &= ~F_FL_EXHAUSTED; 900 irqholdoff_reg = sge->intrtimer_nres; 901 } 902 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); 903 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); 904 905 /* We reenable the Qs to force a freelist GTS interrupt later */ 906 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); 907} 908 909#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) 910#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) 911#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ 912 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) 913 914/* 915 * Disable SGE Interrupts 916 */ 917void t1_sge_intr_disable(struct sge *sge) 918{ 919 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); 920 921 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); 922 writel(0, sge->adapter->regs + A_SG_INT_ENABLE); 923} 924 925/* 926 * Enable SGE interrupts. 927 */ 928void t1_sge_intr_enable(struct sge *sge) 929{ 930 u32 en = SGE_INT_ENABLE; 931 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); 932 933 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO) 934 en &= ~F_PACKET_TOO_BIG; 935 writel(en, sge->adapter->regs + A_SG_INT_ENABLE); 936 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); 937} 938 939/* 940 * Clear SGE interrupts. 941 */ 942void t1_sge_intr_clear(struct sge *sge) 943{ 944 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); 945 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); 946} 947 948/* 949 * SGE 'Error' interrupt handler 950 */ 951int t1_sge_intr_error_handler(struct sge *sge) 952{ 953 struct adapter *adapter = sge->adapter; 954 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); 955 956 if (adapter->port[0].dev->hw_features & NETIF_F_TSO) 957 cause &= ~F_PACKET_TOO_BIG; 958 if (cause & F_RESPQ_EXHAUSTED) 959 sge->stats.respQ_empty++; 960 if (cause & F_RESPQ_OVERFLOW) { 961 sge->stats.respQ_overflow++; 962 pr_alert("%s: SGE response queue overflow\n", 963 adapter->name); 964 } 965 if (cause & F_FL_EXHAUSTED) { 966 sge->stats.freelistQ_empty++; 967 freelQs_empty(sge); 968 } 969 if (cause & F_PACKET_TOO_BIG) { 970 sge->stats.pkt_too_big++; 971 pr_alert("%s: SGE max packet size exceeded\n", 972 adapter->name); 973 } 974 if (cause & F_PACKET_MISMATCH) { 975 sge->stats.pkt_mismatch++; 976 pr_alert("%s: SGE packet mismatch\n", adapter->name); 977 } 978 if (cause & SGE_INT_FATAL) 979 t1_fatal_err(adapter); 980 981 writel(cause, adapter->regs + A_SG_INT_CAUSE); 982 return 0; 983} 984 985const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) 986{ 987 return &sge->stats; 988} 989 990void t1_sge_get_port_stats(const struct sge *sge, int port, 991 struct sge_port_stats *ss) 992{ 993 int cpu; 994 995 memset(ss, 0, sizeof(*ss)); 996 for_each_possible_cpu(cpu) { 997 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); 998 999 ss->rx_cso_good += st->rx_cso_good; 1000 ss->tx_cso += st->tx_cso; 1001 ss->tx_tso += st->tx_tso; 1002 ss->tx_need_hdrroom += st->tx_need_hdrroom; 1003 ss->vlan_xtract += st->vlan_xtract; 1004 ss->vlan_insert += st->vlan_insert; 1005 } 1006} 1007 1008/** 1009 * recycle_fl_buf - recycle a free list buffer 1010 * @fl: the free list 1011 * @idx: index of buffer to recycle 1012 * 1013 * Recycles the specified buffer on the given free list by adding it at 1014 * the next available slot on the list. 1015 */ 1016static void recycle_fl_buf(struct freelQ *fl, int idx) 1017{ 1018 struct freelQ_e *from = &fl->entries[idx]; 1019 struct freelQ_e *to = &fl->entries[fl->pidx]; 1020 1021 fl->centries[fl->pidx] = fl->centries[idx]; 1022 to->addr_lo = from->addr_lo; 1023 to->addr_hi = from->addr_hi; 1024 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); 1025 wmb(); 1026 to->gen2 = V_CMD_GEN2(fl->genbit); 1027 fl->credits++; 1028 1029 if (++fl->pidx == fl->size) { 1030 fl->pidx = 0; 1031 fl->genbit ^= 1; 1032 } 1033} 1034 1035static int copybreak __read_mostly = 256; 1036module_param(copybreak, int, 0); 1037MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 1038 1039/** 1040 * get_packet - return the next ingress packet buffer 1041 * @pdev: the PCI device that received the packet 1042 * @fl: the SGE free list holding the packet 1043 * @len: the actual packet length, excluding any SGE padding 1044 * 1045 * Get the next packet from a free list and complete setup of the 1046 * sk_buff. If the packet is small we make a copy and recycle the 1047 * original buffer, otherwise we use the original buffer itself. If a 1048 * positive drop threshold is supplied packets are dropped and their 1049 * buffers recycled if (a) the number of remaining buffers is under the 1050 * threshold and the packet is too big to copy, or (b) the packet should 1051 * be copied but there is no memory for the copy. 1052 */ 1053static inline struct sk_buff *get_packet(struct pci_dev *pdev, 1054 struct freelQ *fl, unsigned int len) 1055{ 1056 struct sk_buff *skb; 1057 const struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1058 1059 if (len < copybreak) { 1060 skb = alloc_skb(len + 2, GFP_ATOMIC); 1061 if (!skb) 1062 goto use_orig_buf; 1063 1064 skb_reserve(skb, 2); /* align IP header */ 1065 skb_put(skb, len); 1066 pci_dma_sync_single_for_cpu(pdev, 1067 dma_unmap_addr(ce, dma_addr), 1068 dma_unmap_len(ce, dma_len), 1069 PCI_DMA_FROMDEVICE); 1070 skb_copy_from_linear_data(ce->skb, skb->data, len); 1071 pci_dma_sync_single_for_device(pdev, 1072 dma_unmap_addr(ce, dma_addr), 1073 dma_unmap_len(ce, dma_len), 1074 PCI_DMA_FROMDEVICE); 1075 recycle_fl_buf(fl, fl->cidx); 1076 return skb; 1077 } 1078 1079use_orig_buf: 1080 if (fl->credits < 2) { 1081 recycle_fl_buf(fl, fl->cidx); 1082 return NULL; 1083 } 1084 1085 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), 1086 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1087 skb = ce->skb; 1088 prefetch(skb->data); 1089 1090 skb_put(skb, len); 1091 return skb; 1092} 1093 1094/** 1095 * unexpected_offload - handle an unexpected offload packet 1096 * @adapter: the adapter 1097 * @fl: the free list that received the packet 1098 * 1099 * Called when we receive an unexpected offload packet (e.g., the TOE 1100 * function is disabled or the card is a NIC). Prints a message and 1101 * recycles the buffer. 1102 */ 1103static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) 1104{ 1105 struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1106 struct sk_buff *skb = ce->skb; 1107 1108 pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), 1109 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1110 pr_err("%s: unexpected offload packet, cmd %u\n", 1111 adapter->name, *skb->data); 1112 recycle_fl_buf(fl, fl->cidx); 1113} 1114 1115/* 1116 * T1/T2 SGE limits the maximum DMA size per TX descriptor to 1117 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the 1118 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner. 1119 * Note that the *_large_page_tx_descs stuff will be optimized out when 1120 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN. 1121 * 1122 * compute_large_page_descs() computes how many additional descriptors are 1123 * required to break down the stack's request. 1124 */ 1125static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) 1126{ 1127 unsigned int count = 0; 1128 1129 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1130 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 1131 unsigned int i, len = skb_headlen(skb); 1132 while (len > SGE_TX_DESC_MAX_PLEN) { 1133 count++; 1134 len -= SGE_TX_DESC_MAX_PLEN; 1135 } 1136 for (i = 0; nfrags--; i++) { 1137 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1138 len = frag->size; 1139 while (len > SGE_TX_DESC_MAX_PLEN) { 1140 count++; 1141 len -= SGE_TX_DESC_MAX_PLEN; 1142 } 1143 } 1144 } 1145 return count; 1146} 1147 1148/* 1149 * Write a cmdQ entry. 1150 * 1151 * Since this function writes the 'flags' field, it must not be used to 1152 * write the first cmdQ entry. 1153 */ 1154static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping, 1155 unsigned int len, unsigned int gen, 1156 unsigned int eop) 1157{ 1158 BUG_ON(len > SGE_TX_DESC_MAX_PLEN); 1159 1160 e->addr_lo = (u32)mapping; 1161 e->addr_hi = (u64)mapping >> 32; 1162 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen); 1163 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen); 1164} 1165 1166/* 1167 * See comment for previous function. 1168 * 1169 * write_tx_descs_large_page() writes additional SGE tx descriptors if 1170 * *desc_len exceeds HW's capability. 1171 */ 1172static inline unsigned int write_large_page_tx_descs(unsigned int pidx, 1173 struct cmdQ_e **e, 1174 struct cmdQ_ce **ce, 1175 unsigned int *gen, 1176 dma_addr_t *desc_mapping, 1177 unsigned int *desc_len, 1178 unsigned int nfrags, 1179 struct cmdQ *q) 1180{ 1181 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1182 struct cmdQ_e *e1 = *e; 1183 struct cmdQ_ce *ce1 = *ce; 1184 1185 while (*desc_len > SGE_TX_DESC_MAX_PLEN) { 1186 *desc_len -= SGE_TX_DESC_MAX_PLEN; 1187 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, 1188 *gen, nfrags == 0 && *desc_len == 0); 1189 ce1->skb = NULL; 1190 dma_unmap_len_set(ce1, dma_len, 0); 1191 *desc_mapping += SGE_TX_DESC_MAX_PLEN; 1192 if (*desc_len) { 1193 ce1++; 1194 e1++; 1195 if (++pidx == q->size) { 1196 pidx = 0; 1197 *gen ^= 1; 1198 ce1 = q->centries; 1199 e1 = q->entries; 1200 } 1201 } 1202 } 1203 *e = e1; 1204 *ce = ce1; 1205 } 1206 return pidx; 1207} 1208 1209/* 1210 * Write the command descriptors to transmit the given skb starting at 1211 * descriptor pidx with the given generation. 1212 */ 1213static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, 1214 unsigned int pidx, unsigned int gen, 1215 struct cmdQ *q) 1216{ 1217 dma_addr_t mapping, desc_mapping; 1218 struct cmdQ_e *e, *e1; 1219 struct cmdQ_ce *ce; 1220 unsigned int i, flags, first_desc_len, desc_len, 1221 nfrags = skb_shinfo(skb)->nr_frags; 1222 1223 e = e1 = &q->entries[pidx]; 1224 ce = &q->centries[pidx]; 1225 1226 mapping = pci_map_single(adapter->pdev, skb->data, 1227 skb_headlen(skb), PCI_DMA_TODEVICE); 1228 1229 desc_mapping = mapping; 1230 desc_len = skb_headlen(skb); 1231 1232 flags = F_CMD_DATAVALID | F_CMD_SOP | 1233 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | 1234 V_CMD_GEN2(gen); 1235 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ? 1236 desc_len : SGE_TX_DESC_MAX_PLEN; 1237 e->addr_lo = (u32)desc_mapping; 1238 e->addr_hi = (u64)desc_mapping >> 32; 1239 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); 1240 ce->skb = NULL; 1241 dma_unmap_len_set(ce, dma_len, 0); 1242 1243 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && 1244 desc_len > SGE_TX_DESC_MAX_PLEN) { 1245 desc_mapping += first_desc_len; 1246 desc_len -= first_desc_len; 1247 e1++; 1248 ce++; 1249 if (++pidx == q->size) { 1250 pidx = 0; 1251 gen ^= 1; 1252 e1 = q->entries; 1253 ce = q->centries; 1254 } 1255 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, 1256 &desc_mapping, &desc_len, 1257 nfrags, q); 1258 1259 if (likely(desc_len)) 1260 write_tx_desc(e1, desc_mapping, desc_len, gen, 1261 nfrags == 0); 1262 } 1263 1264 ce->skb = NULL; 1265 dma_unmap_addr_set(ce, dma_addr, mapping); 1266 dma_unmap_len_set(ce, dma_len, skb_headlen(skb)); 1267 1268 for (i = 0; nfrags--; i++) { 1269 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1270 e1++; 1271 ce++; 1272 if (++pidx == q->size) { 1273 pidx = 0; 1274 gen ^= 1; 1275 e1 = q->entries; 1276 ce = q->centries; 1277 } 1278 1279 mapping = pci_map_page(adapter->pdev, frag->page, 1280 frag->page_offset, frag->size, 1281 PCI_DMA_TODEVICE); 1282 desc_mapping = mapping; 1283 desc_len = frag->size; 1284 1285 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, 1286 &desc_mapping, &desc_len, 1287 nfrags, q); 1288 if (likely(desc_len)) 1289 write_tx_desc(e1, desc_mapping, desc_len, gen, 1290 nfrags == 0); 1291 ce->skb = NULL; 1292 dma_unmap_addr_set(ce, dma_addr, mapping); 1293 dma_unmap_len_set(ce, dma_len, frag->size); 1294 } 1295 ce->skb = skb; 1296 wmb(); 1297 e->flags = flags; 1298} 1299 1300/* 1301 * Clean up completed Tx buffers. 1302 */ 1303static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) 1304{ 1305 unsigned int reclaim = q->processed - q->cleaned; 1306 1307 if (reclaim) { 1308 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n", 1309 q->processed, q->cleaned); 1310 free_cmdQ_buffers(sge, q, reclaim); 1311 q->cleaned += reclaim; 1312 } 1313} 1314 1315/* 1316 * Called from tasklet. Checks the scheduler for any 1317 * pending skbs that can be sent. 1318 */ 1319static void restart_sched(unsigned long arg) 1320{ 1321 struct sge *sge = (struct sge *) arg; 1322 struct adapter *adapter = sge->adapter; 1323 struct cmdQ *q = &sge->cmdQ[0]; 1324 struct sk_buff *skb; 1325 unsigned int credits, queued_skb = 0; 1326 1327 spin_lock(&q->lock); 1328 reclaim_completed_tx(sge, q); 1329 1330 credits = q->size - q->in_use; 1331 pr_debug("restart_sched credits=%d\n", credits); 1332 while ((skb = sched_skb(sge, NULL, credits)) != NULL) { 1333 unsigned int genbit, pidx, count; 1334 count = 1 + skb_shinfo(skb)->nr_frags; 1335 count += compute_large_page_tx_descs(skb); 1336 q->in_use += count; 1337 genbit = q->genbit; 1338 pidx = q->pidx; 1339 q->pidx += count; 1340 if (q->pidx >= q->size) { 1341 q->pidx -= q->size; 1342 q->genbit ^= 1; 1343 } 1344 write_tx_descs(adapter, skb, pidx, genbit, q); 1345 credits = q->size - q->in_use; 1346 queued_skb = 1; 1347 } 1348 1349 if (queued_skb) { 1350 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1351 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { 1352 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1353 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1354 } 1355 } 1356 spin_unlock(&q->lock); 1357} 1358 1359/** 1360 * sge_rx - process an ingress ethernet packet 1361 * @sge: the sge structure 1362 * @fl: the free list that contains the packet buffer 1363 * @len: the packet length 1364 * 1365 * Process an ingress ethernet pakcet and deliver it to the stack. 1366 */ 1367static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) 1368{ 1369 struct sk_buff *skb; 1370 const struct cpl_rx_pkt *p; 1371 struct adapter *adapter = sge->adapter; 1372 struct sge_port_stats *st; 1373 struct net_device *dev; 1374 1375 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); 1376 if (unlikely(!skb)) { 1377 sge->stats.rx_drops++; 1378 return; 1379 } 1380 1381 p = (const struct cpl_rx_pkt *) skb->data; 1382 if (p->iff >= adapter->params.nports) { 1383 kfree_skb(skb); 1384 return; 1385 } 1386 __skb_pull(skb, sizeof(*p)); 1387 1388 st = this_cpu_ptr(sge->port_stats[p->iff]); 1389 dev = adapter->port[p->iff].dev; 1390 1391 skb->protocol = eth_type_trans(skb, dev); 1392 if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff && 1393 skb->protocol == htons(ETH_P_IP) && 1394 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { 1395 ++st->rx_cso_good; 1396 skb->ip_summed = CHECKSUM_UNNECESSARY; 1397 } else 1398 skb_checksum_none_assert(skb); 1399 1400 if (unlikely(adapter->vlan_grp && p->vlan_valid)) { 1401 st->vlan_xtract++; 1402 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, 1403 ntohs(p->vlan)); 1404 } else 1405 netif_receive_skb(skb); 1406} 1407 1408/* 1409 * Returns true if a command queue has enough available descriptors that 1410 * we can resume Tx operation after temporarily disabling its packet queue. 1411 */ 1412static inline int enough_free_Tx_descs(const struct cmdQ *q) 1413{ 1414 unsigned int r = q->processed - q->cleaned; 1415 1416 return q->in_use - r < (q->size >> 1); 1417} 1418 1419/* 1420 * Called when sufficient space has become available in the SGE command queues 1421 * after the Tx packet schedulers have been suspended to restart the Tx path. 1422 */ 1423static void restart_tx_queues(struct sge *sge) 1424{ 1425 struct adapter *adap = sge->adapter; 1426 int i; 1427 1428 if (!enough_free_Tx_descs(&sge->cmdQ[0])) 1429 return; 1430 1431 for_each_port(adap, i) { 1432 struct net_device *nd = adap->port[i].dev; 1433 1434 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && 1435 netif_running(nd)) { 1436 sge->stats.cmdQ_restarted[2]++; 1437 netif_wake_queue(nd); 1438 } 1439 } 1440} 1441 1442/* 1443 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 1444 * information. 1445 */ 1446static unsigned int update_tx_info(struct adapter *adapter, 1447 unsigned int flags, 1448 unsigned int pr0) 1449{ 1450 struct sge *sge = adapter->sge; 1451 struct cmdQ *cmdq = &sge->cmdQ[0]; 1452 1453 cmdq->processed += pr0; 1454 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) { 1455 freelQs_empty(sge); 1456 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE); 1457 } 1458 if (flags & F_CMDQ0_ENABLE) { 1459 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); 1460 1461 if (cmdq->cleaned + cmdq->in_use != cmdq->processed && 1462 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { 1463 set_bit(CMDQ_STAT_RUNNING, &cmdq->status); 1464 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1465 } 1466 if (sge->tx_sched) 1467 tasklet_hi_schedule(&sge->tx_sched->sched_tsk); 1468 1469 flags &= ~F_CMDQ0_ENABLE; 1470 } 1471 1472 if (unlikely(sge->stopped_tx_queues != 0)) 1473 restart_tx_queues(sge); 1474 1475 return flags; 1476} 1477 1478/* 1479 * Process SGE responses, up to the supplied budget. Returns the number of 1480 * responses processed. A negative budget is effectively unlimited. 1481 */ 1482static int process_responses(struct adapter *adapter, int budget) 1483{ 1484 struct sge *sge = adapter->sge; 1485 struct respQ *q = &sge->respQ; 1486 struct respQ_e *e = &q->entries[q->cidx]; 1487 int done = 0; 1488 unsigned int flags = 0; 1489 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1490 1491 while (done < budget && e->GenerationBit == q->genbit) { 1492 flags |= e->Qsleeping; 1493 1494 cmdq_processed[0] += e->Cmdq0CreditReturn; 1495 cmdq_processed[1] += e->Cmdq1CreditReturn; 1496 1497 /* We batch updates to the TX side to avoid cacheline 1498 * ping-pong of TX state information on MP where the sender 1499 * might run on a different CPU than this function... 1500 */ 1501 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) { 1502 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1503 cmdq_processed[0] = 0; 1504 } 1505 1506 if (unlikely(cmdq_processed[1] > 16)) { 1507 sge->cmdQ[1].processed += cmdq_processed[1]; 1508 cmdq_processed[1] = 0; 1509 } 1510 1511 if (likely(e->DataValid)) { 1512 struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1513 1514 BUG_ON(!e->Sop || !e->Eop); 1515 if (unlikely(e->Offload)) 1516 unexpected_offload(adapter, fl); 1517 else 1518 sge_rx(sge, fl, e->BufferLength); 1519 1520 ++done; 1521 1522 /* 1523 * Note: this depends on each packet consuming a 1524 * single free-list buffer; cf. the BUG above. 1525 */ 1526 if (++fl->cidx == fl->size) 1527 fl->cidx = 0; 1528 prefetch(fl->centries[fl->cidx].skb); 1529 1530 if (unlikely(--fl->credits < 1531 fl->size - SGE_FREEL_REFILL_THRESH)) 1532 refill_free_list(sge, fl); 1533 } else 1534 sge->stats.pure_rsps++; 1535 1536 e++; 1537 if (unlikely(++q->cidx == q->size)) { 1538 q->cidx = 0; 1539 q->genbit ^= 1; 1540 e = q->entries; 1541 } 1542 prefetch(e); 1543 1544 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { 1545 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); 1546 q->credits = 0; 1547 } 1548 } 1549 1550 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1551 sge->cmdQ[1].processed += cmdq_processed[1]; 1552 1553 return done; 1554} 1555 1556static inline int responses_pending(const struct adapter *adapter) 1557{ 1558 const struct respQ *Q = &adapter->sge->respQ; 1559 const struct respQ_e *e = &Q->entries[Q->cidx]; 1560 1561 return e->GenerationBit == Q->genbit; 1562} 1563 1564/* 1565 * A simpler version of process_responses() that handles only pure (i.e., 1566 * non data-carrying) responses. Such respones are too light-weight to justify 1567 * calling a softirq when using NAPI, so we handle them specially in hard 1568 * interrupt context. The function is called with a pointer to a response, 1569 * which the caller must ensure is a valid pure response. Returns 1 if it 1570 * encounters a valid data-carrying response, 0 otherwise. 1571 */ 1572static int process_pure_responses(struct adapter *adapter) 1573{ 1574 struct sge *sge = adapter->sge; 1575 struct respQ *q = &sge->respQ; 1576 struct respQ_e *e = &q->entries[q->cidx]; 1577 const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1578 unsigned int flags = 0; 1579 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1580 1581 prefetch(fl->centries[fl->cidx].skb); 1582 if (e->DataValid) 1583 return 1; 1584 1585 do { 1586 flags |= e->Qsleeping; 1587 1588 cmdq_processed[0] += e->Cmdq0CreditReturn; 1589 cmdq_processed[1] += e->Cmdq1CreditReturn; 1590 1591 e++; 1592 if (unlikely(++q->cidx == q->size)) { 1593 q->cidx = 0; 1594 q->genbit ^= 1; 1595 e = q->entries; 1596 } 1597 prefetch(e); 1598 1599 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { 1600 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); 1601 q->credits = 0; 1602 } 1603 sge->stats.pure_rsps++; 1604 } while (e->GenerationBit == q->genbit && !e->DataValid); 1605 1606 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1607 sge->cmdQ[1].processed += cmdq_processed[1]; 1608 1609 return e->GenerationBit == q->genbit; 1610} 1611 1612/* 1613 * Handler for new data events when using NAPI. This does not need any locking 1614 * or protection from interrupts as data interrupts are off at this point and 1615 * other adapter interrupts do not interfere. 1616 */ 1617int t1_poll(struct napi_struct *napi, int budget) 1618{ 1619 struct adapter *adapter = container_of(napi, struct adapter, napi); 1620 int work_done = process_responses(adapter, budget); 1621 1622 if (likely(work_done < budget)) { 1623 napi_complete(napi); 1624 writel(adapter->sge->respQ.cidx, 1625 adapter->regs + A_SG_SLEEPING); 1626 } 1627 return work_done; 1628} 1629 1630irqreturn_t t1_interrupt(int irq, void *data) 1631{ 1632 struct adapter *adapter = data; 1633 struct sge *sge = adapter->sge; 1634 int handled; 1635 1636 if (likely(responses_pending(adapter))) { 1637 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); 1638 1639 if (napi_schedule_prep(&adapter->napi)) { 1640 if (process_pure_responses(adapter)) 1641 __napi_schedule(&adapter->napi); 1642 else { 1643 /* no data, no NAPI needed */ 1644 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1645 /* undo schedule_prep */ 1646 napi_enable(&adapter->napi); 1647 } 1648 } 1649 return IRQ_HANDLED; 1650 } 1651 1652 spin_lock(&adapter->async_lock); 1653 handled = t1_slow_intr_handler(adapter); 1654 spin_unlock(&adapter->async_lock); 1655 1656 if (!handled) 1657 sge->stats.unhandled_irqs++; 1658 1659 return IRQ_RETVAL(handled != 0); 1660} 1661 1662/* 1663 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. 1664 * 1665 * The code figures out how many entries the sk_buff will require in the 1666 * cmdQ and updates the cmdQ data structure with the state once the enqueue 1667 * has complete. Then, it doesn't access the global structure anymore, but 1668 * uses the corresponding fields on the stack. In conjunction with a spinlock 1669 * around that code, we can make the function reentrant without holding the 1670 * lock when we actually enqueue (which might be expensive, especially on 1671 * architectures with IO MMUs). 1672 * 1673 * This runs with softirqs disabled. 1674 */ 1675static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, 1676 unsigned int qid, struct net_device *dev) 1677{ 1678 struct sge *sge = adapter->sge; 1679 struct cmdQ *q = &sge->cmdQ[qid]; 1680 unsigned int credits, pidx, genbit, count, use_sched_skb = 0; 1681 1682 if (!spin_trylock(&q->lock)) 1683 return NETDEV_TX_LOCKED; 1684 1685 reclaim_completed_tx(sge, q); 1686 1687 pidx = q->pidx; 1688 credits = q->size - q->in_use; 1689 count = 1 + skb_shinfo(skb)->nr_frags; 1690 count += compute_large_page_tx_descs(skb); 1691 1692 /* Ethernet packet */ 1693 if (unlikely(credits < count)) { 1694 if (!netif_queue_stopped(dev)) { 1695 netif_stop_queue(dev); 1696 set_bit(dev->if_port, &sge->stopped_tx_queues); 1697 sge->stats.cmdQ_full[2]++; 1698 pr_err("%s: Tx ring full while queue awake!\n", 1699 adapter->name); 1700 } 1701 spin_unlock(&q->lock); 1702 return NETDEV_TX_BUSY; 1703 } 1704 1705 if (unlikely(credits - count < q->stop_thres)) { 1706 netif_stop_queue(dev); 1707 set_bit(dev->if_port, &sge->stopped_tx_queues); 1708 sge->stats.cmdQ_full[2]++; 1709 } 1710 1711 /* T204 cmdQ0 skbs that are destined for a certain port have to go 1712 * through the scheduler. 1713 */ 1714 if (sge->tx_sched && !qid && skb->dev) { 1715use_sched: 1716 use_sched_skb = 1; 1717 /* Note that the scheduler might return a different skb than 1718 * the one passed in. 1719 */ 1720 skb = sched_skb(sge, skb, credits); 1721 if (!skb) { 1722 spin_unlock(&q->lock); 1723 return NETDEV_TX_OK; 1724 } 1725 pidx = q->pidx; 1726 count = 1 + skb_shinfo(skb)->nr_frags; 1727 count += compute_large_page_tx_descs(skb); 1728 } 1729 1730 q->in_use += count; 1731 genbit = q->genbit; 1732 pidx = q->pidx; 1733 q->pidx += count; 1734 if (q->pidx >= q->size) { 1735 q->pidx -= q->size; 1736 q->genbit ^= 1; 1737 } 1738 spin_unlock(&q->lock); 1739 1740 write_tx_descs(adapter, skb, pidx, genbit, q); 1741 1742 /* 1743 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring 1744 * the doorbell if the Q is asleep. There is a natural race, where 1745 * the hardware is going to sleep just after we checked, however, 1746 * then the interrupt handler will detect the outstanding TX packet 1747 * and ring the doorbell for us. 1748 */ 1749 if (qid) 1750 doorbell_pio(adapter, F_CMDQ1_ENABLE); 1751 else { 1752 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1753 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { 1754 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1755 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1756 } 1757 } 1758 1759 if (use_sched_skb) { 1760 if (spin_trylock(&q->lock)) { 1761 credits = q->size - q->in_use; 1762 skb = NULL; 1763 goto use_sched; 1764 } 1765 } 1766 return NETDEV_TX_OK; 1767} 1768 1769#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) 1770 1771/* 1772 * eth_hdr_len - return the length of an Ethernet header 1773 * @data: pointer to the start of the Ethernet header 1774 * 1775 * Returns the length of an Ethernet header, including optional VLAN tag. 1776 */ 1777static inline int eth_hdr_len(const void *data) 1778{ 1779 const struct ethhdr *e = data; 1780 1781 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; 1782} 1783 1784/* 1785 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. 1786 */ 1787netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) 1788{ 1789 struct adapter *adapter = dev->ml_priv; 1790 struct sge *sge = adapter->sge; 1791 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); 1792 struct cpl_tx_pkt *cpl; 1793 struct sk_buff *orig_skb = skb; 1794 int ret; 1795 1796 if (skb->protocol == htons(ETH_P_CPL5)) 1797 goto send; 1798 1799 /* 1800 * We are using a non-standard hard_header_len. 1801 * Allocate more header room in the rare cases it is not big enough. 1802 */ 1803 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { 1804 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); 1805 ++st->tx_need_hdrroom; 1806 dev_kfree_skb_any(orig_skb); 1807 if (!skb) 1808 return NETDEV_TX_OK; 1809 } 1810 1811 if (skb_shinfo(skb)->gso_size) { 1812 int eth_type; 1813 struct cpl_tx_pkt_lso *hdr; 1814 1815 ++st->tx_tso; 1816 1817 eth_type = skb_network_offset(skb) == ETH_HLEN ? 1818 CPL_ETH_II : CPL_ETH_II_VLAN; 1819 1820 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); 1821 hdr->opcode = CPL_TX_PKT_LSO; 1822 hdr->ip_csum_dis = hdr->l4_csum_dis = 0; 1823 hdr->ip_hdr_words = ip_hdr(skb)->ihl; 1824 hdr->tcp_hdr_words = tcp_hdr(skb)->doff; 1825 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, 1826 skb_shinfo(skb)->gso_size)); 1827 hdr->len = htonl(skb->len - sizeof(*hdr)); 1828 cpl = (struct cpl_tx_pkt *)hdr; 1829 } else { 1830 /* 1831 * Packets shorter than ETH_HLEN can break the MAC, drop them 1832 * early. Also, we may get oversized packets because some 1833 * parts of the kernel don't handle our unusual hard_header_len 1834 * right, drop those too. 1835 */ 1836 if (unlikely(skb->len < ETH_HLEN || 1837 skb->len > dev->mtu + eth_hdr_len(skb->data))) { 1838 pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name, 1839 skb->len, eth_hdr_len(skb->data), dev->mtu); 1840 dev_kfree_skb_any(skb); 1841 return NETDEV_TX_OK; 1842 } 1843 1844 if (skb->ip_summed == CHECKSUM_PARTIAL && 1845 ip_hdr(skb)->protocol == IPPROTO_UDP) { 1846 if (unlikely(skb_checksum_help(skb))) { 1847 pr_debug("%s: unable to do udp checksum\n", dev->name); 1848 dev_kfree_skb_any(skb); 1849 return NETDEV_TX_OK; 1850 } 1851 } 1852 1853 /* Hmmm, assuming to catch the gratious arp... and we'll use 1854 * it to flush out stuck espi packets... 1855 */ 1856 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { 1857 if (skb->protocol == htons(ETH_P_ARP) && 1858 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { 1859 adapter->sge->espibug_skb[dev->if_port] = skb; 1860 /* We want to re-use this skb later. We 1861 * simply bump the reference count and it 1862 * will not be freed... 1863 */ 1864 skb = skb_get(skb); 1865 } 1866 } 1867 1868 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); 1869 cpl->opcode = CPL_TX_PKT; 1870 cpl->ip_csum_dis = 1; /* SW calculates IP csum */ 1871 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1; 1872 /* the length field isn't used so don't bother setting it */ 1873 1874 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL); 1875 } 1876 cpl->iff = dev->if_port; 1877 1878#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 1879 if (vlan_tx_tag_present(skb)) { 1880 cpl->vlan_valid = 1; 1881 cpl->vlan = htons(vlan_tx_tag_get(skb)); 1882 st->vlan_insert++; 1883 } else 1884#endif 1885 cpl->vlan_valid = 0; 1886 1887send: 1888 ret = t1_sge_tx(skb, adapter, 0, dev); 1889 1890 /* If transmit busy, and we reallocated skb's due to headroom limit, 1891 * then silently discard to avoid leak. 1892 */ 1893 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { 1894 dev_kfree_skb_any(skb); 1895 ret = NETDEV_TX_OK; 1896 } 1897 return ret; 1898} 1899 1900/* 1901 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. 1902 */ 1903static void sge_tx_reclaim_cb(unsigned long data) 1904{ 1905 int i; 1906 struct sge *sge = (struct sge *)data; 1907 1908 for (i = 0; i < SGE_CMDQ_N; ++i) { 1909 struct cmdQ *q = &sge->cmdQ[i]; 1910 1911 if (!spin_trylock(&q->lock)) 1912 continue; 1913 1914 reclaim_completed_tx(sge, q); 1915 if (i == 0 && q->in_use) { /* flush pending credits */ 1916 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); 1917 } 1918 spin_unlock(&q->lock); 1919 } 1920 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 1921} 1922 1923/* 1924 * Propagate changes of the SGE coalescing parameters to the HW. 1925 */ 1926int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) 1927{ 1928 sge->fixed_intrtimer = p->rx_coalesce_usecs * 1929 core_ticks_per_usec(sge->adapter); 1930 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); 1931 return 0; 1932} 1933 1934/* 1935 * Allocates both RX and TX resources and configures the SGE. However, 1936 * the hardware is not enabled yet. 1937 */ 1938int t1_sge_configure(struct sge *sge, struct sge_params *p) 1939{ 1940 if (alloc_rx_resources(sge, p)) 1941 return -ENOMEM; 1942 if (alloc_tx_resources(sge, p)) { 1943 free_rx_resources(sge); 1944 return -ENOMEM; 1945 } 1946 configure_sge(sge, p); 1947 1948 /* 1949 * Now that we have sized the free lists calculate the payload 1950 * capacity of the large buffers. Other parts of the driver use 1951 * this to set the max offload coalescing size so that RX packets 1952 * do not overflow our large buffers. 1953 */ 1954 p->large_buf_capacity = jumbo_payload_capacity(sge); 1955 return 0; 1956} 1957 1958/* 1959 * Disables the DMA engine. 1960 */ 1961void t1_sge_stop(struct sge *sge) 1962{ 1963 int i; 1964 writel(0, sge->adapter->regs + A_SG_CONTROL); 1965 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ 1966 1967 if (is_T2(sge->adapter)) 1968 del_timer_sync(&sge->espibug_timer); 1969 1970 del_timer_sync(&sge->tx_reclaim_timer); 1971 if (sge->tx_sched) 1972 tx_sched_stop(sge); 1973 1974 for (i = 0; i < MAX_NPORTS; i++) 1975 kfree_skb(sge->espibug_skb[i]); 1976} 1977 1978/* 1979 * Enables the DMA engine. 1980 */ 1981void t1_sge_start(struct sge *sge) 1982{ 1983 refill_free_list(sge, &sge->freelQ[0]); 1984 refill_free_list(sge, &sge->freelQ[1]); 1985 1986 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); 1987 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); 1988 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ 1989 1990 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 1991 1992 if (is_T2(sge->adapter)) 1993 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 1994} 1995 1996/* 1997 * Callback for the T2 ESPI 'stuck packet feature' workaorund 1998 */ 1999static void espibug_workaround_t204(unsigned long data) 2000{ 2001 struct adapter *adapter = (struct adapter *)data; 2002 struct sge *sge = adapter->sge; 2003 unsigned int nports = adapter->params.nports; 2004 u32 seop[MAX_NPORTS]; 2005 2006 if (adapter->open_device_map & PORT_MASK) { 2007 int i; 2008 2009 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) 2010 return; 2011 2012 for (i = 0; i < nports; i++) { 2013 struct sk_buff *skb = sge->espibug_skb[i]; 2014 2015 if (!netif_running(adapter->port[i].dev) || 2016 netif_queue_stopped(adapter->port[i].dev) || 2017 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb) 2018 continue; 2019 2020 if (!skb->cb[0]) { 2021 skb_copy_to_linear_data_offset(skb, 2022 sizeof(struct cpl_tx_pkt), 2023 ch_mac_addr, 2024 ETH_ALEN); 2025 skb_copy_to_linear_data_offset(skb, 2026 skb->len - 10, 2027 ch_mac_addr, 2028 ETH_ALEN); 2029 skb->cb[0] = 0xff; 2030 } 2031 2032 /* bump the reference count to avoid freeing of 2033 * the skb once the DMA has completed. 2034 */ 2035 skb = skb_get(skb); 2036 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev); 2037 } 2038 } 2039 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2040} 2041 2042static void espibug_workaround(unsigned long data) 2043{ 2044 struct adapter *adapter = (struct adapter *)data; 2045 struct sge *sge = adapter->sge; 2046 2047 if (netif_running(adapter->port[0].dev)) { 2048 struct sk_buff *skb = sge->espibug_skb[0]; 2049 u32 seop = t1_espi_get_mon(adapter, 0x930, 0); 2050 2051 if ((seop & 0xfff0fff) == 0xfff && skb) { 2052 if (!skb->cb[0]) { 2053 skb_copy_to_linear_data_offset(skb, 2054 sizeof(struct cpl_tx_pkt), 2055 ch_mac_addr, 2056 ETH_ALEN); 2057 skb_copy_to_linear_data_offset(skb, 2058 skb->len - 10, 2059 ch_mac_addr, 2060 ETH_ALEN); 2061 skb->cb[0] = 0xff; 2062 } 2063 2064 /* bump the reference count to avoid freeing of the 2065 * skb once the DMA has completed. 2066 */ 2067 skb = skb_get(skb); 2068 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); 2069 } 2070 } 2071 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2072} 2073 2074/* 2075 * Creates a t1_sge structure and returns suggested resource parameters. 2076 */ 2077struct sge * __devinit t1_sge_create(struct adapter *adapter, 2078 struct sge_params *p) 2079{ 2080 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); 2081 int i; 2082 2083 if (!sge) 2084 return NULL; 2085 2086 sge->adapter = adapter; 2087 sge->netdev = adapter->port[0].dev; 2088 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; 2089 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; 2090 2091 for_each_port(adapter, i) { 2092 sge->port_stats[i] = alloc_percpu(struct sge_port_stats); 2093 if (!sge->port_stats[i]) 2094 goto nomem_port; 2095 } 2096 2097 init_timer(&sge->tx_reclaim_timer); 2098 sge->tx_reclaim_timer.data = (unsigned long)sge; 2099 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; 2100 2101 if (is_T2(sge->adapter)) { 2102 init_timer(&sge->espibug_timer); 2103 2104 if (adapter->params.nports > 1) { 2105 tx_sched_init(sge); 2106 sge->espibug_timer.function = espibug_workaround_t204; 2107 } else 2108 sge->espibug_timer.function = espibug_workaround; 2109 sge->espibug_timer.data = (unsigned long)sge->adapter; 2110 2111 sge->espibug_timeout = 1; 2112 /* for T204, every 10ms */ 2113 if (adapter->params.nports > 1) 2114 sge->espibug_timeout = HZ/100; 2115 } 2116 2117 2118 p->cmdQ_size[0] = SGE_CMDQ0_E_N; 2119 p->cmdQ_size[1] = SGE_CMDQ1_E_N; 2120 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; 2121 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; 2122 if (sge->tx_sched) { 2123 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) 2124 p->rx_coalesce_usecs = 15; 2125 else 2126 p->rx_coalesce_usecs = 50; 2127 } else 2128 p->rx_coalesce_usecs = 50; 2129 2130 p->coalesce_enable = 0; 2131 p->sample_interval_usecs = 0; 2132 2133 return sge; 2134nomem_port: 2135 while (i >= 0) { 2136 free_percpu(sge->port_stats[i]); 2137 --i; 2138 } 2139 kfree(sge); 2140 return NULL; 2141 2142}