Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://github.com/davem330/net

* git://github.com/davem330/net: (62 commits)
ipv6: don't use inetpeer to store metrics for routes.
can: ti_hecc: include linux/io.h
IRDA: Fix global type conflicts in net/irda/irsysctl.c v2
net: Handle different key sizes between address families in flow cache
net: Align AF-specific flowi structs to long
ipv4: Fix fib_info->fib_metrics leak
caif: fix a potential NULL dereference
sctp: deal with multiple COOKIE_ECHO chunks
ibmveth: Fix checksum offload failure handling
ibmveth: Checksum offload is always disabled
ibmveth: Fix issue with DMA mapping failure
ibmveth: Fix DMA unmap error
pch_gbe: support ML7831 IOH
pch_gbe: added the process of FIFO over run error
pch_gbe: fixed the issue which receives an unnecessary packet.
sfc: Use 64-bit writes for TX push where possible
Revert "sfc: Use write-combining to reduce TX latency" and follow-ups
bnx2x: Fix ethtool advertisement
bnx2x: Fix 578xx link LED
bnx2x: Fix XMAC loopback test
...

+824 -505
+2 -1
Documentation/networking/dmfe.txt
··· 1 + Note: This driver doesn't have a maintainer. 2 + 1 3 Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux. 2 4 3 5 This program is free software; you can redistribute it and/or ··· 57 55 Authors: 58 56 59 57 Sten Wang <sten_wang@davicom.com.tw > : Original Author 60 - Tobias Ringstrom <tori@unhappy.mine.nu> : Current Maintainer 61 58 62 59 Contributors: 63 60
+2 -4
MAINTAINERS
··· 1278 1278 ATLX ETHERNET DRIVERS 1279 1279 M: Jay Cliburn <jcliburn@gmail.com> 1280 1280 M: Chris Snook <chris.snook@gmail.com> 1281 - M: Jie Yang <jie.yang@atheros.com> 1282 1281 L: netdev@vger.kernel.org 1283 1282 W: http://sourceforge.net/projects/atl1 1284 1283 W: http://atl1.sourceforge.net ··· 1573 1574 1574 1575 BROCADE BNA 10 GIGABIT ETHERNET DRIVER 1575 1576 M: Rasesh Mody <rmody@brocade.com> 1576 - M: Debashis Dutt <ddutt@brocade.com> 1577 1577 L: netdev@vger.kernel.org 1578 1578 S: Supported 1579 1579 F: drivers/net/bna/ ··· 1756 1758 1757 1759 CISCO VIC ETHERNET NIC DRIVER 1758 1760 M: Christian Benvenuti <benve@cisco.com> 1759 - M: Vasanthy Kolluri <vkolluri@cisco.com> 1760 1761 M: Roopa Prabhu <roprabhu@cisco.com> 1761 1762 M: David Wang <dwang2@cisco.com> 1762 1763 S: Supported ··· 4412 4415 L: coreteam@netfilter.org 4413 4416 W: http://www.netfilter.org/ 4414 4417 W: http://www.iptables.org/ 4415 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git 4418 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git 4419 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git 4416 4420 S: Supported 4417 4421 F: include/linux/netfilter* 4418 4422 F: include/linux/netfilter/
+6 -5
drivers/net/Kconfig
··· 2535 2535 source "drivers/net/stmmac/Kconfig" 2536 2536 2537 2537 config PCH_GBE 2538 - tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" 2538 + tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" 2539 2539 depends on PCI 2540 2540 select MII 2541 2541 ---help--- ··· 2548 2548 This driver enables Gigabit Ethernet function. 2549 2549 2550 2550 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 2551 - Output Hub), ML7223. 2552 - ML7223 IOH is for MP(Media Phone) use. 2553 - ML7223 is companion chip for Intel Atom E6xx series. 2554 - ML7223 is completely compatible for Intel EG20T PCH. 2551 + Output Hub), ML7223/ML7831. 2552 + ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general 2553 + purpose use. 2554 + ML7223/ML7831 is companion chip for Intel Atom E6xx series. 2555 + ML7223/ML7831 is completely compatible for Intel EG20T PCH. 2555 2556 2556 2557 config FTGMAC100 2557 2558 tristate "Faraday FTGMAC100 Gigabit Ethernet support"
+91 -31
drivers/net/bnx2x/bnx2x.h
··· 315 315 u32 raw; 316 316 }; 317 317 318 + /* dropless fc FW/HW related params */ 319 + #define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512) 320 + #define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \ 321 + ETH_MAX_AGGREGATION_QUEUES_E1 :\ 322 + ETH_MAX_AGGREGATION_QUEUES_E1H_E2) 323 + #define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) 324 + #define FW_PREFETCH_CNT 16 325 + #define DROPLESS_FC_HEADROOM 100 318 326 319 327 /* MC hsi */ 320 328 #define BCM_PAGE_SHIFT 12 ··· 339 331 /* SGE ring related macros */ 340 332 #define NUM_RX_SGE_PAGES 2 341 333 #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) 342 - #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) 334 + #define NEXT_PAGE_SGE_DESC_CNT 2 335 + #define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT) 343 336 /* RX_SGE_CNT is promised to be a power of 2 */ 344 337 #define RX_SGE_MASK (RX_SGE_CNT - 1) 345 338 #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) 346 339 #define MAX_RX_SGE (NUM_RX_SGE - 1) 347 340 #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ 348 - (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) 341 + (MAX_RX_SGE_CNT - 1)) ? \ 342 + (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ 343 + (x) + 1) 349 344 #define RX_SGE(x) ((x) & MAX_RX_SGE) 345 + 346 + /* 347 + * Number of required SGEs is the sum of two: 348 + * 1. Number of possible opened aggregations (next packet for 349 + * these aggregations will probably consume SGE immidiatelly) 350 + * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only 351 + * after placement on BD for new TPA aggregation) 352 + * 353 + * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page 354 + */ 355 + #define NUM_SGE_REQ (MAX_AGG_QS(bp) + \ 356 + (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) 357 + #define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ 358 + MAX_RX_SGE_CNT) 359 + #define SGE_TH_LO(bp) (NUM_SGE_REQ + \ 360 + NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) 361 + #define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) 350 362 351 363 /* Manipulate a bit vector defined as an array of u64 */ 352 364 ··· 579 551 580 552 #define NUM_TX_RINGS 16 581 553 #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) 582 - #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 554 + #define NEXT_PAGE_TX_DESC_CNT 1 555 + #define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT) 583 556 #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) 584 557 #define MAX_TX_BD (NUM_TX_BD - 1) 585 558 #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) 586 559 #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ 587 - (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 560 + (MAX_TX_DESC_CNT - 1)) ? \ 561 + (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ 562 + (x) + 1) 588 563 #define TX_BD(x) ((x) & MAX_TX_BD) 589 564 #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) 590 565 591 566 /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ 592 567 #define NUM_RX_RINGS 8 593 568 #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) 594 - #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) 569 + #define NEXT_PAGE_RX_DESC_CNT 2 570 + #define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT) 595 571 #define RX_DESC_MASK (RX_DESC_CNT - 1) 596 572 #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) 597 573 #define MAX_RX_BD (NUM_RX_BD - 1) 598 574 #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 599 - #define MIN_RX_AVAIL 128 575 + 576 + /* dropless fc calculations for BDs 577 + * 578 + * Number of BDs should as number of buffers in BRB: 579 + * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT 580 + * "next" elements on each page 581 + */ 582 + #define NUM_BD_REQ BRB_SIZE(bp) 583 + #define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ 584 + MAX_RX_DESC_CNT) 585 + #define BD_TH_LO(bp) (NUM_BD_REQ + \ 586 + NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ 587 + FW_DROP_LEVEL(bp)) 588 + #define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) 589 + 590 + #define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128) 600 591 601 592 #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ 602 593 ETH_MIN_RX_CQES_WITH_TPA_E1 : \ ··· 626 579 MIN_RX_AVAIL)) 627 580 628 581 #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 629 - (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) 582 + (MAX_RX_DESC_CNT - 1)) ? \ 583 + (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ 584 + (x) + 1) 630 585 #define RX_BD(x) ((x) & MAX_RX_BD) 631 586 632 587 /* ··· 638 589 #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) 639 590 #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) 640 591 #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) 641 - #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) 592 + #define NEXT_PAGE_RCQ_DESC_CNT 1 593 + #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT) 642 594 #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) 643 595 #define MAX_RCQ_BD (NUM_RCQ_BD - 1) 644 596 #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) 645 597 #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ 646 - (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 598 + (MAX_RCQ_DESC_CNT - 1)) ? \ 599 + (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ 600 + (x) + 1) 647 601 #define RCQ_BD(x) ((x) & MAX_RCQ_BD) 602 + 603 + /* dropless fc calculations for RCQs 604 + * 605 + * Number of RCQs should be as number of buffers in BRB: 606 + * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT 607 + * "next" elements on each page 608 + */ 609 + #define NUM_RCQ_REQ BRB_SIZE(bp) 610 + #define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ 611 + MAX_RCQ_DESC_CNT) 612 + #define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \ 613 + NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ 614 + FW_DROP_LEVEL(bp)) 615 + #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) 648 616 649 617 650 618 /* This is needed for determining of last_max */ ··· 751 685 #define FP_CSB_FUNC_OFF \ 752 686 offsetof(struct cstorm_status_block_c, func) 753 687 754 - #define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ 755 - /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ 756 - #define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ 757 - /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ 758 - #define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ 759 - /* (HC_INDEX_U_ETH_RX_BD_CONS) */ 688 + #define HC_INDEX_ETH_RX_CQ_CONS 1 760 689 761 - #define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ 762 - /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ 763 - #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */ 764 - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ 765 - #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */ 766 - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ 767 - #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */ 768 - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ 690 + #define HC_INDEX_OOO_TX_CQ_CONS 4 691 + 692 + #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 693 + 694 + #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 695 + 696 + #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 769 697 770 698 #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 771 - 772 699 773 700 #define BNX2X_RX_SB_INDEX \ 774 701 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) ··· 1159 1100 #define BP_PORT(bp) (bp->pfid & 1) 1160 1101 #define BP_FUNC(bp) (bp->pfid) 1161 1102 #define BP_ABS_FUNC(bp) (bp->pf_num) 1162 - #define BP_E1HVN(bp) (bp->pfid >> 1) 1163 - #define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ 1164 - #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 1165 - #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ 1166 - BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) 1103 + #define BP_VN(bp) ((bp)->pfid >> 1) 1104 + #define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) 1105 + #define BP_L_ID(bp) (BP_VN(bp) << 2) 1106 + #define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\ 1107 + (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) 1108 + #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) 1167 1109 1168 1110 struct net_device *dev; 1169 1111 struct pci_dev *pdev; ··· 1827 1767 1828 1768 #define MAX_DMAE_C_PER_PORT 8 1829 1769 #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1830 - BP_E1HVN(bp)) 1770 + BP_VN(bp)) 1831 1771 #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1832 1772 E1HVN_MAX) 1833 1773 ··· 1853 1793 1854 1794 /* must be used on a CID before placing it on a HW ring */ 1855 1795 #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ 1856 - (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ 1796 + (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ 1857 1797 (x)) 1858 1798 1859 1799 #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
+14 -13
drivers/net/bnx2x/bnx2x_cmn.c
··· 987 987 void bnx2x_init_rx_rings(struct bnx2x *bp) 988 988 { 989 989 int func = BP_FUNC(bp); 990 - int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : 991 - ETH_MAX_AGGREGATION_QUEUES_E1H_E2; 992 990 u16 ring_prod; 993 991 int i, j; 994 992 ··· 999 1001 1000 1002 if (!fp->disable_tpa) { 1001 1003 /* Fill the per-aggregtion pool */ 1002 - for (i = 0; i < max_agg_queues; i++) { 1004 + for (i = 0; i < MAX_AGG_QS(bp); i++) { 1003 1005 struct bnx2x_agg_info *tpa_info = 1004 1006 &fp->tpa_info[i]; 1005 1007 struct sw_rx_bd *first_buf = ··· 1039 1041 bnx2x_free_rx_sge_range(bp, fp, 1040 1042 ring_prod); 1041 1043 bnx2x_free_tpa_pool(bp, fp, 1042 - max_agg_queues); 1044 + MAX_AGG_QS(bp)); 1043 1045 fp->disable_tpa = 1; 1044 1046 ring_prod = 0; 1045 1047 break; ··· 1135 1137 bnx2x_free_rx_bds(fp); 1136 1138 1137 1139 if (!fp->disable_tpa) 1138 - bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? 1139 - ETH_MAX_AGGREGATION_QUEUES_E1 : 1140 - ETH_MAX_AGGREGATION_QUEUES_E1H_E2); 1140 + bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); 1141 1141 } 1142 1142 } 1143 1143 ··· 3091 3095 struct bnx2x_fastpath *fp = &bp->fp[index]; 3092 3096 int ring_size = 0; 3093 3097 u8 cos; 3098 + int rx_ring_size = 0; 3094 3099 3095 3100 /* if rx_ring_size specified - use it */ 3096 - int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : 3097 - MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 3101 + if (!bp->rx_ring_size) { 3098 3102 3099 - /* allocate at least number of buffers required by FW */ 3100 - rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 3101 - MIN_RX_SIZE_TPA, 3102 - rx_ring_size); 3103 + rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 3104 + 3105 + /* allocate at least number of buffers required by FW */ 3106 + rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 3107 + MIN_RX_SIZE_TPA, rx_ring_size); 3108 + 3109 + bp->rx_ring_size = rx_ring_size; 3110 + } else 3111 + rx_ring_size = bp->rx_ring_size; 3103 3112 3104 3113 /* Common */ 3105 3114 sb = &bnx2x_fp(bp, index, status_blk);
+41 -7
drivers/net/bnx2x/bnx2x_ethtool.c
··· 363 363 } 364 364 365 365 /* advertise the requested speed and duplex if supported */ 366 - cmd->advertising &= bp->port.supported[cfg_idx]; 366 + if (cmd->advertising & ~(bp->port.supported[cfg_idx])) { 367 + DP(NETIF_MSG_LINK, "Advertisement parameters " 368 + "are not supported\n"); 369 + return -EINVAL; 370 + } 367 371 368 372 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; 369 - bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; 370 - bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | 373 + bp->link_params.req_duplex[cfg_idx] = cmd->duplex; 374 + bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | 371 375 cmd->advertising); 376 + if (cmd->advertising) { 372 377 378 + bp->link_params.speed_cap_mask[cfg_idx] = 0; 379 + if (cmd->advertising & ADVERTISED_10baseT_Half) { 380 + bp->link_params.speed_cap_mask[cfg_idx] |= 381 + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; 382 + } 383 + if (cmd->advertising & ADVERTISED_10baseT_Full) 384 + bp->link_params.speed_cap_mask[cfg_idx] |= 385 + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; 386 + 387 + if (cmd->advertising & ADVERTISED_100baseT_Full) 388 + bp->link_params.speed_cap_mask[cfg_idx] |= 389 + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; 390 + 391 + if (cmd->advertising & ADVERTISED_100baseT_Half) { 392 + bp->link_params.speed_cap_mask[cfg_idx] |= 393 + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; 394 + } 395 + if (cmd->advertising & ADVERTISED_1000baseT_Half) { 396 + bp->link_params.speed_cap_mask[cfg_idx] |= 397 + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; 398 + } 399 + if (cmd->advertising & (ADVERTISED_1000baseT_Full | 400 + ADVERTISED_1000baseKX_Full)) 401 + bp->link_params.speed_cap_mask[cfg_idx] |= 402 + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; 403 + 404 + if (cmd->advertising & (ADVERTISED_10000baseT_Full | 405 + ADVERTISED_10000baseKX4_Full | 406 + ADVERTISED_10000baseKR_Full)) 407 + bp->link_params.speed_cap_mask[cfg_idx] |= 408 + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; 409 + } 373 410 } else { /* forced speed */ 374 411 /* advertise the requested speed and duplex if supported */ 375 412 switch (speed) { ··· 1347 1310 if (bp->rx_ring_size) 1348 1311 ering->rx_pending = bp->rx_ring_size; 1349 1312 else 1350 - if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) 1351 - ering->rx_pending = MAX_RX_AVAIL/bp->num_queues; 1352 - else 1353 - ering->rx_pending = MAX_RX_AVAIL; 1313 + ering->rx_pending = MAX_RX_AVAIL; 1354 1314 1355 1315 ering->rx_mini_pending = 0; 1356 1316 ering->rx_jumbo_pending = 0;
+23 -23
drivers/net/bnx2x/bnx2x_link.c
··· 778 778 { 779 779 u32 nig_reg_adress_crd_weight = 0; 780 780 u32 pbf_reg_adress_crd_weight = 0; 781 - /* Calculate and set BW for this COS*/ 782 - const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; 783 - const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; 781 + /* Calculate and set BW for this COS - use 1 instead of 0 for BW */ 782 + const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw; 783 + const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw; 784 784 785 785 switch (cos_entry) { 786 786 case 0: ··· 852 852 /* Calculate total BW requested */ 853 853 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { 854 854 if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { 855 - 856 - if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { 857 - DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" 858 - "was set to 0\n"); 859 - return -EINVAL; 855 + *total_bw += 856 + ets_params->cos[cos_idx].params.bw_params.bw; 860 857 } 861 - *total_bw += 862 - ets_params->cos[cos_idx].params.bw_params.bw; 863 - } 864 858 } 865 859 866 - /*Check taotl BW is valid */ 860 + /* Check total BW is valid */ 867 861 if ((100 != *total_bw) || (0 == *total_bw)) { 868 862 if (0 == *total_bw) { 869 863 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" ··· 1720 1726 1721 1727 /* Check loopback mode */ 1722 1728 if (lb) 1723 - val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; 1729 + val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; 1724 1730 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); 1725 1731 bnx2x_set_xumac_nig(params, 1726 1732 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); ··· 3623 3629 /* Advertised speeds */ 3624 3630 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3625 3631 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); 3632 + 3633 + /* Advertised and set FEC (Forward Error Correction) */ 3634 + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3635 + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, 3636 + (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | 3637 + MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); 3626 3638 3627 3639 /* Enable CL37 BAM */ 3628 3640 if (REG_RD(bp, params->shmem_base + ··· 5924 5924 (tmp | EMAC_LED_OVERRIDE)); 5925 5925 /* 5926 5926 * return here without enabling traffic 5927 - * LED blink andsetting rate in ON mode. 5927 + * LED blink and setting rate in ON mode. 5928 5928 * In oper mode, enabling LED blink 5929 5929 * and setting rate is needed. 5930 5930 */ ··· 5936 5936 * This is a work-around for HW issue found when link 5937 5937 * is up in CL73 5938 5938 */ 5939 - REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 5939 + if ((!CHIP_IS_E3(bp)) || 5940 + (CHIP_IS_E3(bp) && 5941 + mode == LED_MODE_ON)) 5942 + REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 5943 + 5940 5944 if (CHIP_IS_E1x(bp) || 5941 5945 CHIP_IS_E2(bp) || 5942 5946 (mode == LED_MODE_ON)) ··· 10642 10638 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 10643 10639 .addr = 0xff, 10644 10640 .def_md_devad = 0, 10645 - .flags = (FLAGS_HW_LOCK_REQUIRED | 10646 - FLAGS_TX_ERROR_CHECK), 10641 + .flags = FLAGS_HW_LOCK_REQUIRED, 10647 10642 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10648 10643 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10649 10644 .mdio_ctrl = 0, ··· 10768 10765 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, 10769 10766 .addr = 0xff, 10770 10767 .def_md_devad = 0, 10771 - .flags = (FLAGS_INIT_XGXS_FIRST | 10772 - FLAGS_TX_ERROR_CHECK), 10768 + .flags = FLAGS_INIT_XGXS_FIRST, 10773 10769 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10774 10770 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10775 10771 .mdio_ctrl = 0, ··· 10799 10797 .addr = 0xff, 10800 10798 .def_md_devad = 0, 10801 10799 .flags = (FLAGS_HW_LOCK_REQUIRED | 10802 - FLAGS_INIT_XGXS_FIRST | 10803 - FLAGS_TX_ERROR_CHECK), 10800 + FLAGS_INIT_XGXS_FIRST), 10804 10801 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10805 10802 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10806 10803 .mdio_ctrl = 0, ··· 10830 10829 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 10831 10830 .addr = 0xff, 10832 10831 .def_md_devad = 0, 10833 - .flags = (FLAGS_FAN_FAILURE_DET_REQ | 10834 - FLAGS_TX_ERROR_CHECK), 10832 + .flags = FLAGS_FAN_FAILURE_DET_REQ, 10835 10833 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10836 10834 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10837 10835 .mdio_ctrl = 0,
+115 -47
drivers/net/bnx2x/bnx2x_main.c
··· 407 407 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 408 408 409 409 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 410 - opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | 411 - (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 410 + opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | 411 + (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 412 412 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 413 413 414 414 #ifdef __BIG_ENDIAN ··· 1419 1419 if (!CHIP_IS_E1(bp)) { 1420 1420 /* init leading/trailing edge */ 1421 1421 if (IS_MF(bp)) { 1422 - val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1422 + val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1423 1423 if (bp->port.pmf) 1424 1424 /* enable nig and gpio3 attention */ 1425 1425 val |= 0x1100; ··· 1471 1471 1472 1472 /* init leading/trailing edge */ 1473 1473 if (IS_MF(bp)) { 1474 - val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1474 + val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1475 1475 if (bp->port.pmf) 1476 1476 /* enable nig and gpio3 attention */ 1477 1477 val |= 0x1100; ··· 2287 2287 int vn; 2288 2288 2289 2289 bp->vn_weight_sum = 0; 2290 - for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2290 + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2291 2291 u32 vn_cfg = bp->mf_config[vn]; 2292 2292 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2293 2293 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; ··· 2320 2320 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2321 2321 } 2322 2322 2323 + /* returns func by VN for current port */ 2324 + static inline int func_by_vn(struct bnx2x *bp, int vn) 2325 + { 2326 + return 2 * vn + BP_PORT(bp); 2327 + } 2328 + 2323 2329 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) 2324 2330 { 2325 2331 struct rate_shaping_vars_per_vn m_rs_vn; 2326 2332 struct fairness_vars_per_vn m_fair_vn; 2327 2333 u32 vn_cfg = bp->mf_config[vn]; 2328 - int func = 2*vn + BP_PORT(bp); 2334 + int func = func_by_vn(bp, vn); 2329 2335 u16 vn_min_rate, vn_max_rate; 2330 2336 int i; 2331 2337 ··· 2428 2422 * 2429 2423 * and there are 2 functions per port 2430 2424 */ 2431 - for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2425 + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2432 2426 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2433 2427 2434 2428 if (func >= E1H_FUNC_MAX) ··· 2460 2454 2461 2455 /* calculate and set min-max rate for each vn */ 2462 2456 if (bp->port.pmf) 2463 - for (vn = VN_0; vn < E1HVN_MAX; vn++) 2457 + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2464 2458 bnx2x_init_vn_minmax(bp, vn); 2465 2459 2466 2460 /* always enable rate shaping and fairness */ ··· 2479 2473 2480 2474 static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 2481 2475 { 2482 - int port = BP_PORT(bp); 2483 2476 int func; 2484 2477 int vn; 2485 2478 2486 2479 /* Set the attention towards other drivers on the same port */ 2487 - for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2488 - if (vn == BP_E1HVN(bp)) 2480 + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2481 + if (vn == BP_VN(bp)) 2489 2482 continue; 2490 2483 2491 - func = ((vn << 1) | port); 2484 + func = func_by_vn(bp, vn); 2492 2485 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 2493 2486 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 2494 2487 } ··· 2582 2577 bnx2x_dcbx_pmf_update(bp); 2583 2578 2584 2579 /* enable nig attention */ 2585 - val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); 2580 + val = (0xff0f | (1 << (BP_VN(bp) + 4))); 2586 2581 if (bp->common.int_block == INT_BLOCK_HC) { 2587 2582 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2588 2583 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); ··· 2761 2756 u16 tpa_agg_size = 0; 2762 2757 2763 2758 if (!fp->disable_tpa) { 2764 - pause->sge_th_hi = 250; 2765 - pause->sge_th_lo = 150; 2759 + pause->sge_th_lo = SGE_TH_LO(bp); 2760 + pause->sge_th_hi = SGE_TH_HI(bp); 2761 + 2762 + /* validate SGE ring has enough to cross high threshold */ 2763 + WARN_ON(bp->dropless_fc && 2764 + pause->sge_th_hi + FW_PREFETCH_CNT > 2765 + MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); 2766 + 2766 2767 tpa_agg_size = min_t(u32, 2767 2768 (min_t(u32, 8, MAX_SKB_FRAGS) * 2768 2769 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); ··· 2782 2771 2783 2772 /* pause - not for e1 */ 2784 2773 if (!CHIP_IS_E1(bp)) { 2785 - pause->bd_th_hi = 350; 2786 - pause->bd_th_lo = 250; 2787 - pause->rcq_th_hi = 350; 2788 - pause->rcq_th_lo = 250; 2774 + pause->bd_th_lo = BD_TH_LO(bp); 2775 + pause->bd_th_hi = BD_TH_HI(bp); 2776 + 2777 + pause->rcq_th_lo = RCQ_TH_LO(bp); 2778 + pause->rcq_th_hi = RCQ_TH_HI(bp); 2779 + /* 2780 + * validate that rings have enough entries to cross 2781 + * high thresholds 2782 + */ 2783 + WARN_ON(bp->dropless_fc && 2784 + pause->bd_th_hi + FW_PREFETCH_CNT > 2785 + bp->rx_ring_size); 2786 + WARN_ON(bp->dropless_fc && 2787 + pause->rcq_th_hi + FW_PREFETCH_CNT > 2788 + NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); 2789 2789 2790 2790 pause->pri_map = 1; 2791 2791 } ··· 2824 2802 * For PF Clients it should be the maximum avaliable number. 2825 2803 * VF driver(s) may want to define it to a smaller value. 2826 2804 */ 2827 - rxq_init->max_tpa_queues = 2828 - (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : 2829 - ETH_MAX_AGGREGATION_QUEUES_E1H_E2); 2805 + rxq_init->max_tpa_queues = MAX_AGG_QS(bp); 2830 2806 2831 2807 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 2832 2808 rxq_init->fw_sb_id = fp->fw_sb_id; ··· 4828 4808 hc_sm->time_to_expire = 0xFFFFFFFF; 4829 4809 } 4830 4810 4811 + 4812 + /* allocates state machine ids. */ 4813 + static inline 4814 + void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 4815 + { 4816 + /* zero out state machine indices */ 4817 + /* rx indices */ 4818 + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 4819 + 4820 + /* tx indices */ 4821 + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 4822 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 4823 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 4824 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 4825 + 4826 + /* map indices */ 4827 + /* rx indices */ 4828 + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 4829 + SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4830 + 4831 + /* tx indices */ 4832 + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 4833 + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4834 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 4835 + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4836 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 4837 + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4838 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 4839 + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4840 + } 4841 + 4831 4842 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 4832 4843 u8 vf_valid, int fw_sb_id, int igu_sb_id) 4833 4844 { ··· 4890 4839 hc_sm_p = sb_data_e2.common.state_machine; 4891 4840 sb_data_p = (u32 *)&sb_data_e2; 4892 4841 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 4842 + bnx2x_map_sb_state_machines(sb_data_e2.index_data); 4893 4843 } else { 4894 4844 memset(&sb_data_e1x, 0, 4895 4845 sizeof(struct hc_status_block_data_e1x)); ··· 4905 4853 hc_sm_p = sb_data_e1x.common.state_machine; 4906 4854 sb_data_p = (u32 *)&sb_data_e1x; 4907 4855 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 4856 + bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 4908 4857 } 4909 4858 4910 4859 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], ··· 5855 5802 * take the UNDI lock to protect undi_unload flow from accessing 5856 5803 * registers while we're resetting the chip 5857 5804 */ 5858 - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5805 + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 5859 5806 5860 5807 bnx2x_reset_common(bp); 5861 5808 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); ··· 5867 5814 } 5868 5815 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 5869 5816 5870 - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5817 + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 5871 5818 5872 5819 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 5873 5820 ··· 6724 6671 if (CHIP_MODE_IS_4_PORT(bp)) 6725 6672 dsb_idx = BP_FUNC(bp); 6726 6673 else 6727 - dsb_idx = BP_E1HVN(bp); 6674 + dsb_idx = BP_VN(bp); 6728 6675 6729 6676 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 6730 6677 IGU_BC_BASE_DSB_PROD + dsb_idx : 6731 6678 IGU_NORM_BASE_DSB_PROD + dsb_idx); 6732 6679 6680 + /* 6681 + * igu prods come in chunks of E1HVN_MAX (4) - 6682 + * does not matters what is the current chip mode 6683 + */ 6733 6684 for (i = 0; i < (num_segs * E1HVN_MAX); 6734 6685 i += E1HVN_MAX) { 6735 6686 addr = IGU_REG_PROD_CONS_MEMORY + ··· 7627 7570 u32 val; 7628 7571 /* The mac address is written to entries 1-4 to 7629 7572 preserve entry 0 which is used by the PMF */ 7630 - u8 entry = (BP_E1HVN(bp) + 1)*8; 7573 + u8 entry = (BP_VN(bp) + 1)*8; 7631 7574 7632 7575 val = (mac_addr[0] << 8) | mac_addr[1]; 7633 7576 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); ··· 8603 8546 /* Check if there is any driver already loaded */ 8604 8547 val = REG_RD(bp, MISC_REG_UNPREPARED); 8605 8548 if (val == 0x1) { 8606 - /* Check if it is the UNDI driver 8549 + 8550 + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 8551 + /* 8552 + * Check if it is the UNDI driver 8607 8553 * UNDI driver initializes CID offset for normal bell to 0x7 8608 8554 */ 8609 - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8610 8555 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 8611 8556 if (val == 0x7) { 8612 8557 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; ··· 8645 8586 8646 8587 bnx2x_fw_command(bp, reset_code, 0); 8647 8588 } 8648 - 8649 - /* now it's safe to release the lock */ 8650 - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8651 8589 8652 8590 bnx2x_undi_int_disable(bp); 8653 8591 port = BP_PORT(bp); ··· 8695 8639 bp->fw_seq = 8696 8640 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & 8697 8641 DRV_MSG_SEQ_NUMBER_MASK); 8698 - } else 8699 - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8642 + } 8643 + 8644 + /* now it's safe to release the lock */ 8645 + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 8700 8646 } 8701 8647 } 8702 8648 ··· 8835 8777 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) 8836 8778 { 8837 8779 int pfid = BP_FUNC(bp); 8838 - int vn = BP_E1HVN(bp); 8839 8780 int igu_sb_id; 8840 8781 u32 val; 8841 8782 u8 fid, igu_sb_cnt = 0; 8842 8783 8843 8784 bp->igu_base_sb = 0xff; 8844 8785 if (CHIP_INT_MODE_IS_BC(bp)) { 8786 + int vn = BP_VN(bp); 8845 8787 igu_sb_cnt = bp->igu_sb_cnt; 8846 8788 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 8847 8789 FP_SB_MAX_E1x; ··· 9474 9416 bp->igu_base_sb = 0; 9475 9417 } else { 9476 9418 bp->common.int_block = INT_BLOCK_IGU; 9419 + 9420 + /* do not allow device reset during IGU info preocessing */ 9421 + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 9422 + 9477 9423 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9478 9424 9479 9425 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { ··· 9509 9447 9510 9448 bnx2x_get_igu_cam_info(bp); 9511 9449 9450 + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 9512 9451 } 9513 9452 9514 9453 /* ··· 9536 9473 9537 9474 bp->mf_ov = 0; 9538 9475 bp->mf_mode = 0; 9539 - vn = BP_E1HVN(bp); 9476 + vn = BP_VN(bp); 9540 9477 9541 9478 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 9542 9479 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", ··· 9655 9592 9656 9593 /* port info */ 9657 9594 bnx2x_get_port_hwinfo(bp); 9658 - 9659 - if (!BP_NOMCP(bp)) { 9660 - bp->fw_seq = 9661 - (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 9662 - DRV_MSG_SEQ_NUMBER_MASK); 9663 - BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 9664 - } 9665 9595 9666 9596 /* Get MAC addresses */ 9667 9597 bnx2x_get_mac_hwinfo(bp); ··· 9820 9764 /* need to reset chip if undi was active */ 9821 9765 if (!BP_NOMCP(bp)) 9822 9766 bnx2x_undi_unload(bp); 9767 + 9768 + /* init fw_seq after undi_unload! */ 9769 + if (!BP_NOMCP(bp)) { 9770 + bp->fw_seq = 9771 + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 9772 + DRV_MSG_SEQ_NUMBER_MASK); 9773 + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 9774 + } 9823 9775 9824 9776 if (CHIP_REV_IS_FPGA(bp)) 9825 9777 dev_err(&bp->pdev->dev, "FPGA detected\n"); ··· 10323 10259 /* clean indirect addresses */ 10324 10260 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 10325 10261 PCICFG_VENDOR_ID_OFFSET); 10326 - /* Clean the following indirect addresses for all functions since it 10262 + /* 10263 + * Clean the following indirect addresses for all functions since it 10327 10264 * is not used by the driver. 10328 10265 */ 10329 10266 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 10330 10267 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 10331 10268 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 10332 10269 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 10333 - REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 10334 - REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 10335 - REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 10336 - REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 10270 + 10271 + if (CHIP_IS_E1x(bp)) { 10272 + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 10273 + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 10274 + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 10275 + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 10276 + } 10337 10277 10338 10278 /* 10339 10279 * Enable internal target-read (in case we are probed after PF FLR).
+5 -2
drivers/net/bnx2x/bnx2x_reg.h
··· 5320 5320 #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 5321 5321 #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) 5322 5322 #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) 5323 - #define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) 5323 + #define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2) 5324 5324 #define XMAC_CTRL_REG_RX_EN (0x1<<1) 5325 5325 #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) 5326 5326 #define XMAC_CTRL_REG_TX_EN (0x1<<0) ··· 5766 5766 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 5767 5767 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 5768 5768 #define HW_LOCK_RESOURCE_SPIO 2 5769 - #define HW_LOCK_RESOURCE_UNDI 5 5769 + #define HW_LOCK_RESOURCE_RESET 5 5770 5770 #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) 5771 5771 #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) 5772 5772 #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) ··· 6853 6853 #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 6854 6854 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 6855 6855 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 6856 + #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12 6857 + #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000 6858 + #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000 6856 6859 #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 6857 6860 #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 6858 6861 #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
+4 -3
drivers/net/bnx2x/bnx2x_stats.c
··· 710 710 break; 711 711 712 712 case MAC_TYPE_NONE: /* unreached */ 713 - BNX2X_ERR("stats updated by DMAE but no MAC active\n"); 713 + DP(BNX2X_MSG_STATS, 714 + "stats updated by DMAE but no MAC active\n"); 714 715 return -1; 715 716 716 717 default: /* unreached */ ··· 1392 1391 1393 1392 static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1394 1393 { 1395 - int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; 1394 + int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX; 1396 1395 u32 func_stx; 1397 1396 1398 1397 /* sanity */ ··· 1405 1404 func_stx = bp->func_stx; 1406 1405 1407 1406 for (vn = VN_0; vn < vn_max; vn++) { 1408 - int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; 1407 + int mb_idx = BP_FW_MB_IDX_VN(bp, vn); 1409 1408 1410 1409 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1411 1410 bnx2x_func_stats_init(bp);
+1
drivers/net/can/ti_hecc.c
··· 46 46 #include <linux/skbuff.h> 47 47 #include <linux/platform_device.h> 48 48 #include <linux/clk.h> 49 + #include <linux/io.h> 49 50 50 51 #include <linux/can/dev.h> 51 52 #include <linux/can/error.h>
+6
drivers/net/e1000/e1000_hw.c
··· 4026 4026 checksum += eeprom_data; 4027 4027 } 4028 4028 4029 + #ifdef CONFIG_PARISC 4030 + /* This is a signature and not a checksum on HP c8000 */ 4031 + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) 4032 + return E1000_SUCCESS; 4033 + 4034 + #endif 4029 4035 if (checksum == (u16) EEPROM_SUM) 4030 4036 return E1000_SUCCESS; 4031 4037 else {
+31 -17
drivers/net/ibmveth.c
··· 757 757 struct ibmveth_adapter *adapter = netdev_priv(dev); 758 758 unsigned long set_attr, clr_attr, ret_attr; 759 759 unsigned long set_attr6, clr_attr6; 760 - long ret, ret6; 760 + long ret, ret4, ret6; 761 761 int rc1 = 0, rc2 = 0; 762 762 int restart = 0; 763 763 ··· 770 770 771 771 set_attr = 0; 772 772 clr_attr = 0; 773 + set_attr6 = 0; 774 + clr_attr6 = 0; 773 775 774 776 if (data) { 775 777 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; ··· 786 784 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && 787 785 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && 788 786 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { 789 - ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 787 + ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 790 788 set_attr, &ret_attr); 791 789 792 - if (ret != H_SUCCESS) { 790 + if (ret4 != H_SUCCESS) { 793 791 netdev_err(dev, "unable to change IPv4 checksum " 794 792 "offload settings. %d rc=%ld\n", 795 - data, ret); 793 + data, ret4); 796 794 797 - ret = h_illan_attributes(adapter->vdev->unit_address, 798 - set_attr, clr_attr, &ret_attr); 795 + h_illan_attributes(adapter->vdev->unit_address, 796 + set_attr, clr_attr, &ret_attr); 797 + 798 + if (data == 1) 799 + dev->features &= ~NETIF_F_IP_CSUM; 800 + 799 801 } else { 800 802 adapter->fw_ipv4_csum_support = data; 801 803 } ··· 810 804 if (ret6 != H_SUCCESS) { 811 805 netdev_err(dev, "unable to change IPv6 checksum " 812 806 "offload settings. %d rc=%ld\n", 813 - data, ret); 807 + data, ret6); 814 808 815 - ret = h_illan_attributes(adapter->vdev->unit_address, 816 - set_attr6, clr_attr6, 817 - &ret_attr); 809 + h_illan_attributes(adapter->vdev->unit_address, 810 + set_attr6, clr_attr6, &ret_attr); 811 + 812 + if (data == 1) 813 + dev->features &= ~NETIF_F_IPV6_CSUM; 814 + 818 815 } else 819 816 adapter->fw_ipv6_csum_support = data; 820 817 821 - if (ret != H_SUCCESS || ret6 != H_SUCCESS) 818 + if (ret4 == H_SUCCESS || ret6 == H_SUCCESS) 822 819 adapter->rx_csum = data; 823 820 else 824 821 rc1 = -EIO; ··· 939 930 union ibmveth_buf_desc descs[6]; 940 931 int last, i; 941 932 int force_bounce = 0; 933 + dma_addr_t dma_addr; 942 934 943 935 /* 944 936 * veth handles a maximum of 6 segments including the header, so ··· 1004 994 } 1005 995 1006 996 /* Map the header */ 1007 - descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, 1008 - skb_headlen(skb), 1009 - DMA_TO_DEVICE); 1010 - if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address)) 997 + dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 998 + skb_headlen(skb), DMA_TO_DEVICE); 999 + if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) 1011 1000 goto map_failed; 1012 1001 1013 1002 descs[0].fields.flags_len = desc_flags | skb_headlen(skb); 1003 + descs[0].fields.address = dma_addr; 1014 1004 1015 1005 /* Map the frags */ 1016 1006 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1017 - unsigned long dma_addr; 1018 1007 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1019 1008 1020 1009 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, ··· 1035 1026 netdev->stats.tx_bytes += skb->len; 1036 1027 } 1037 1028 1038 - for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) 1029 + dma_unmap_single(&adapter->vdev->dev, 1030 + descs[0].fields.address, 1031 + descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, 1032 + DMA_TO_DEVICE); 1033 + 1034 + for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) 1039 1035 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, 1040 1036 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, 1041 1037 DMA_TO_DEVICE);
+10 -2
drivers/net/pch_gbe/pch_gbe.h
··· 127 127 128 128 /* Reset */ 129 129 #define PCH_GBE_ALL_RST 0x80000000 /* All reset */ 130 - #define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */ 131 - #define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */ 130 + #define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */ 131 + #define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */ 132 132 133 133 /* TCP/IP Accelerator Control */ 134 134 #define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ ··· 275 275 /* DMA Control */ 276 276 #define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ 277 277 #define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ 278 + 279 + /* RX DMA STATUS */ 280 + #define PCH_GBE_IDLE_CHECK 0xFFFFFFFE 278 281 279 282 /* Wake On LAN Status */ 280 283 #define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ ··· 474 471 struct pch_gbe_buffer { 475 472 struct sk_buff *skb; 476 473 dma_addr_t dma; 474 + unsigned char *rx_buffer; 477 475 unsigned long time_stamp; 478 476 u16 length; 479 477 bool mapped; ··· 515 511 struct pch_gbe_rx_ring { 516 512 struct pch_gbe_rx_desc *desc; 517 513 dma_addr_t dma; 514 + unsigned char *rx_buff_pool; 515 + dma_addr_t rx_buff_pool_logic; 516 + unsigned int rx_buff_pool_size; 518 517 unsigned int size; 519 518 unsigned int count; 520 519 unsigned int next_to_use; ··· 629 622 unsigned long rx_buffer_len; 630 623 unsigned long tx_queue_len; 631 624 bool have_msi; 625 + bool rx_stop_flag; 632 626 }; 633 627 634 628 extern const char pch_driver_version[];
+192 -108
drivers/net/pch_gbe/pch_gbe_main.c
··· 20 20 21 21 #include "pch_gbe.h" 22 22 #include "pch_gbe_api.h" 23 - #include <linux/prefetch.h> 24 23 25 24 #define DRV_VERSION "1.00" 26 25 const char pch_driver_version[] = DRV_VERSION; ··· 33 34 #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 34 35 #define PCH_GBE_COPYBREAK_DEFAULT 256 35 36 #define PCH_GBE_PCI_BAR 1 37 + #define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */ 36 38 37 39 /* Macros for ML7223 */ 38 40 #define PCI_VENDOR_ID_ROHM 0x10db 39 41 #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 42 + 43 + /* Macros for ML7831 */ 44 + #define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802 40 45 41 46 #define PCH_GBE_TX_WEIGHT 64 42 47 #define PCH_GBE_RX_WEIGHT 64 ··· 55 52 ) 56 53 57 54 /* Ethertype field values */ 55 + #define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880 58 56 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 59 57 #define PCH_GBE_FRAME_SIZE_2048 2048 60 58 #define PCH_GBE_FRAME_SIZE_4096 4096 ··· 87 83 #define PCH_GBE_INT_ENABLE_MASK ( \ 88 84 PCH_GBE_INT_RX_DMA_CMPLT | \ 89 85 PCH_GBE_INT_RX_DSC_EMP | \ 86 + PCH_GBE_INT_RX_FIFO_ERR | \ 90 87 PCH_GBE_INT_WOL_DET | \ 91 88 PCH_GBE_INT_TX_CMPLT \ 92 89 ) 93 90 91 + #define PCH_GBE_INT_DISABLE_ALL 0 94 92 95 93 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 96 94 ··· 144 138 if (!tmp) 145 139 pr_err("Error: busy bit is not cleared\n"); 146 140 } 141 + 142 + /** 143 + * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context 144 + * @reg: Pointer of register 145 + * @busy: Busy bit 146 + */ 147 + static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit) 148 + { 149 + u32 tmp; 150 + int ret = -1; 151 + /* wait busy */ 152 + tmp = 20; 153 + while ((ioread32(reg) & bit) && --tmp) 154 + udelay(5); 155 + if (!tmp) 156 + pr_err("Error: busy bit is not cleared\n"); 157 + else 158 + ret = 0; 159 + return ret; 160 + } 161 + 147 162 /** 148 163 * pch_gbe_mac_mar_set - Set MAC address register 149 164 * @hw: Pointer to the HW structure ··· 212 185 #endif 213 186 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST); 214 187 /* Setup the receive address */ 188 + pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 189 + return; 190 + } 191 + 192 + static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) 193 + { 194 + /* Read the MAC address. and store to the private data */ 195 + pch_gbe_mac_read_mac_addr(hw); 196 + iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); 197 + pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); 198 + /* Setup the MAC address */ 215 199 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 216 200 return; 217 201 } ··· 709 671 710 672 tcpip = ioread32(&hw->reg->TCPIP_ACC); 711 673 712 - if (netdev->features & NETIF_F_RXCSUM) { 713 - tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; 714 - tcpip |= PCH_GBE_RX_TCPIPACC_EN; 715 - } else { 716 - tcpip |= PCH_GBE_RX_TCPIPACC_OFF; 717 - tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; 718 - } 674 + tcpip |= PCH_GBE_RX_TCPIPACC_OFF; 675 + tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; 719 676 iowrite32(tcpip, &hw->reg->TCPIP_ACC); 720 677 return; 721 678 } ··· 750 717 iowrite32(rdba, &hw->reg->RX_DSC_BASE); 751 718 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); 752 719 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); 753 - 754 - /* Enables Receive DMA */ 755 - rxdma = ioread32(&hw->reg->DMA_CTRL); 756 - rxdma |= PCH_GBE_RX_DMA_EN; 757 - iowrite32(rxdma, &hw->reg->DMA_CTRL); 758 - /* Enables Receive */ 759 - iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); 760 720 } 761 721 762 722 /** ··· 1123 1097 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1124 1098 } 1125 1099 1100 + static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) 1101 + { 1102 + struct pch_gbe_hw *hw = &adapter->hw; 1103 + u32 rxdma; 1104 + u16 value; 1105 + int ret; 1106 + 1107 + /* Disable Receive DMA */ 1108 + rxdma = ioread32(&hw->reg->DMA_CTRL); 1109 + rxdma &= ~PCH_GBE_RX_DMA_EN; 1110 + iowrite32(rxdma, &hw->reg->DMA_CTRL); 1111 + /* Wait Rx DMA BUS is IDLE */ 1112 + ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK); 1113 + if (ret) { 1114 + /* Disable Bus master */ 1115 + pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); 1116 + value &= ~PCI_COMMAND_MASTER; 1117 + pci_write_config_word(adapter->pdev, PCI_COMMAND, value); 1118 + /* Stop Receive */ 1119 + pch_gbe_mac_reset_rx(hw); 1120 + /* Enable Bus master */ 1121 + value |= PCI_COMMAND_MASTER; 1122 + pci_write_config_word(adapter->pdev, PCI_COMMAND, value); 1123 + } else { 1124 + /* Stop Receive */ 1125 + pch_gbe_mac_reset_rx(hw); 1126 + } 1127 + } 1128 + 1129 + static void pch_gbe_start_receive(struct pch_gbe_hw *hw) 1130 + { 1131 + u32 rxdma; 1132 + 1133 + /* Enables Receive DMA */ 1134 + rxdma = ioread32(&hw->reg->DMA_CTRL); 1135 + rxdma |= PCH_GBE_RX_DMA_EN; 1136 + iowrite32(rxdma, &hw->reg->DMA_CTRL); 1137 + /* Enables Receive */ 1138 + iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); 1139 + return; 1140 + } 1141 + 1126 1142 /** 1127 1143 * pch_gbe_intr - Interrupt Handler 1128 1144 * @irq: Interrupt number ··· 1191 1123 if (int_st & PCH_GBE_INT_RX_FRAME_ERR) 1192 1124 adapter->stats.intr_rx_frame_err_count++; 1193 1125 if (int_st & PCH_GBE_INT_RX_FIFO_ERR) 1194 - adapter->stats.intr_rx_fifo_err_count++; 1126 + if (!adapter->rx_stop_flag) { 1127 + adapter->stats.intr_rx_fifo_err_count++; 1128 + pr_debug("Rx fifo over run\n"); 1129 + adapter->rx_stop_flag = true; 1130 + int_en = ioread32(&hw->reg->INT_EN); 1131 + iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), 1132 + &hw->reg->INT_EN); 1133 + pch_gbe_stop_receive(adapter); 1134 + } 1195 1135 if (int_st & PCH_GBE_INT_RX_DMA_ERR) 1196 1136 adapter->stats.intr_rx_dma_err_count++; 1197 1137 if (int_st & PCH_GBE_INT_TX_FIFO_ERR) ··· 1211 1135 /* When Rx descriptor is empty */ 1212 1136 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { 1213 1137 adapter->stats.intr_rx_dsc_empty_count++; 1214 - pr_err("Rx descriptor is empty\n"); 1138 + pr_debug("Rx descriptor is empty\n"); 1215 1139 int_en = ioread32(&hw->reg->INT_EN); 1216 1140 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); 1217 1141 if (hw->mac.tx_fc_enable) { ··· 1261 1185 unsigned int i; 1262 1186 unsigned int bufsz; 1263 1187 1264 - bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; 1188 + bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 1265 1189 i = rx_ring->next_to_use; 1266 1190 1267 1191 while ((cleaned_count--)) { 1268 1192 buffer_info = &rx_ring->buffer_info[i]; 1269 - skb = buffer_info->skb; 1270 - if (skb) { 1271 - skb_trim(skb, 0); 1272 - } else { 1273 - skb = netdev_alloc_skb(netdev, bufsz); 1274 - if (unlikely(!skb)) { 1275 - /* Better luck next round */ 1276 - adapter->stats.rx_alloc_buff_failed++; 1277 - break; 1278 - } 1279 - /* 64byte align */ 1280 - skb_reserve(skb, PCH_GBE_DMA_ALIGN); 1281 - 1282 - buffer_info->skb = skb; 1283 - buffer_info->length = adapter->rx_buffer_len; 1193 + skb = netdev_alloc_skb(netdev, bufsz); 1194 + if (unlikely(!skb)) { 1195 + /* Better luck next round */ 1196 + adapter->stats.rx_alloc_buff_failed++; 1197 + break; 1284 1198 } 1199 + /* align */ 1200 + skb_reserve(skb, NET_IP_ALIGN); 1201 + buffer_info->skb = skb; 1202 + 1285 1203 buffer_info->dma = dma_map_single(&pdev->dev, 1286 - skb->data, 1204 + buffer_info->rx_buffer, 1287 1205 buffer_info->length, 1288 1206 DMA_FROM_DEVICE); 1289 1207 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { ··· 1308 1238 &hw->reg->RX_DSC_SW_P); 1309 1239 } 1310 1240 return; 1241 + } 1242 + 1243 + static int 1244 + pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, 1245 + struct pch_gbe_rx_ring *rx_ring, int cleaned_count) 1246 + { 1247 + struct pci_dev *pdev = adapter->pdev; 1248 + struct pch_gbe_buffer *buffer_info; 1249 + unsigned int i; 1250 + unsigned int bufsz; 1251 + unsigned int size; 1252 + 1253 + bufsz = adapter->rx_buffer_len; 1254 + 1255 + size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; 1256 + rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, 1257 + &rx_ring->rx_buff_pool_logic, 1258 + GFP_KERNEL); 1259 + if (!rx_ring->rx_buff_pool) { 1260 + pr_err("Unable to allocate memory for the receive poll buffer\n"); 1261 + return -ENOMEM; 1262 + } 1263 + memset(rx_ring->rx_buff_pool, 0, size); 1264 + rx_ring->rx_buff_pool_size = size; 1265 + for (i = 0; i < rx_ring->count; i++) { 1266 + buffer_info = &rx_ring->buffer_info[i]; 1267 + buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i; 1268 + buffer_info->length = bufsz; 1269 + } 1270 + return 0; 1311 1271 } 1312 1272 1313 1273 /** ··· 1480 1380 unsigned int i; 1481 1381 unsigned int cleaned_count = 0; 1482 1382 bool cleaned = false; 1483 - struct sk_buff *skb, *new_skb; 1383 + struct sk_buff *skb; 1484 1384 u8 dma_status; 1485 1385 u16 gbec_status; 1486 1386 u32 tcp_ip_status; ··· 1501 1401 rx_desc->gbec_status = DSC_INIT16; 1502 1402 buffer_info = &rx_ring->buffer_info[i]; 1503 1403 skb = buffer_info->skb; 1404 + buffer_info->skb = NULL; 1504 1405 1505 1406 /* unmap dma */ 1506 1407 dma_unmap_single(&pdev->dev, buffer_info->dma, 1507 1408 buffer_info->length, DMA_FROM_DEVICE); 1508 1409 buffer_info->mapped = false; 1509 - /* Prefetch the packet */ 1510 - prefetch(skb->data); 1511 1410 1512 1411 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " 1513 1412 "TCP:0x%08x] BufInf = 0x%p\n", ··· 1526 1427 pr_err("Receive CRC Error\n"); 1527 1428 } else { 1528 1429 /* get receive length */ 1529 - /* length convert[-3] */ 1530 - length = (rx_desc->rx_words_eob) - 3; 1430 + /* length convert[-3], length includes FCS length */ 1431 + length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN; 1432 + if (rx_desc->rx_words_eob & 0x02) 1433 + length = length - 4; 1434 + /* 1435 + * buffer_info->rx_buffer: [Header:14][payload] 1436 + * skb->data: [Reserve:2][Header:14][payload] 1437 + */ 1438 + memcpy(skb->data, buffer_info->rx_buffer, length); 1531 1439 1532 - /* Decide the data conversion method */ 1533 - if (!(netdev->features & NETIF_F_RXCSUM)) { 1534 - /* [Header:14][payload] */ 1535 - if (NET_IP_ALIGN) { 1536 - /* Because alignment differs, 1537 - * the new_skb is newly allocated, 1538 - * and data is copied to new_skb.*/ 1539 - new_skb = netdev_alloc_skb(netdev, 1540 - length + NET_IP_ALIGN); 1541 - if (!new_skb) { 1542 - /* dorrop error */ 1543 - pr_err("New skb allocation " 1544 - "Error\n"); 1545 - goto dorrop; 1546 - } 1547 - skb_reserve(new_skb, NET_IP_ALIGN); 1548 - memcpy(new_skb->data, skb->data, 1549 - length); 1550 - skb = new_skb; 1551 - } else { 1552 - /* DMA buffer is used as SKB as it is.*/ 1553 - buffer_info->skb = NULL; 1554 - } 1555 - } else { 1556 - /* [Header:14][padding:2][payload] */ 1557 - /* The length includes padding length */ 1558 - length = length - PCH_GBE_DMA_PADDING; 1559 - if ((length < copybreak) || 1560 - (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { 1561 - /* Because alignment differs, 1562 - * the new_skb is newly allocated, 1563 - * and data is copied to new_skb. 1564 - * Padding data is deleted 1565 - * at the time of a copy.*/ 1566 - new_skb = netdev_alloc_skb(netdev, 1567 - length + NET_IP_ALIGN); 1568 - if (!new_skb) { 1569 - /* dorrop error */ 1570 - pr_err("New skb allocation " 1571 - "Error\n"); 1572 - goto dorrop; 1573 - } 1574 - skb_reserve(new_skb, NET_IP_ALIGN); 1575 - memcpy(new_skb->data, skb->data, 1576 - ETH_HLEN); 1577 - memcpy(&new_skb->data[ETH_HLEN], 1578 - &skb->data[ETH_HLEN + 1579 - PCH_GBE_DMA_PADDING], 1580 - length - ETH_HLEN); 1581 - skb = new_skb; 1582 - } else { 1583 - /* Padding data is deleted 1584 - * by moving header data.*/ 1585 - memmove(&skb->data[PCH_GBE_DMA_PADDING], 1586 - &skb->data[0], ETH_HLEN); 1587 - skb_reserve(skb, NET_IP_ALIGN); 1588 - buffer_info->skb = NULL; 1589 - } 1590 - } 1591 - /* The length includes FCS length */ 1592 - length = length - ETH_FCS_LEN; 1593 1440 /* update status of driver */ 1594 1441 adapter->stats.rx_bytes += length; 1595 1442 adapter->stats.rx_packets++; ··· 1554 1509 pr_debug("Receive skb->ip_summed: %d length: %d\n", 1555 1510 skb->ip_summed, length); 1556 1511 } 1557 - dorrop: 1558 1512 /* return some buffers to hardware, one at a time is too slow */ 1559 1513 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { 1560 1514 pch_gbe_alloc_rx_buffers(adapter, rx_ring, ··· 1758 1714 pr_err("Error: can't bring device up\n"); 1759 1715 return err; 1760 1716 } 1717 + err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); 1718 + if (err) { 1719 + pr_err("Error: can't bring device up\n"); 1720 + return err; 1721 + } 1761 1722 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1762 1723 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1763 1724 adapter->tx_queue_len = netdev->tx_queue_len; 1725 + pch_gbe_start_receive(&adapter->hw); 1764 1726 1765 1727 mod_timer(&adapter->watchdog_timer, jiffies); 1766 1728 ··· 1784 1734 void pch_gbe_down(struct pch_gbe_adapter *adapter) 1785 1735 { 1786 1736 struct net_device *netdev = adapter->netdev; 1737 + struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; 1787 1738 1788 1739 /* signal that we're down so the interrupt handler does not 1789 1740 * reschedule our watchdog timer */ ··· 1803 1752 pch_gbe_reset(adapter); 1804 1753 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); 1805 1754 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); 1755 + 1756 + pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size, 1757 + rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic); 1758 + rx_ring->rx_buff_pool_logic = 0; 1759 + rx_ring->rx_buff_pool_size = 0; 1760 + rx_ring->rx_buff_pool = NULL; 1806 1761 } 1807 1762 1808 1763 /** ··· 2061 2004 { 2062 2005 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2063 2006 int max_frame; 2007 + unsigned long old_rx_buffer_len = adapter->rx_buffer_len; 2008 + int err; 2064 2009 2065 2010 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2066 2011 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || ··· 2077 2018 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) 2078 2019 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; 2079 2020 else 2080 - adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; 2081 - netdev->mtu = new_mtu; 2082 - adapter->hw.mac.max_frame_size = max_frame; 2021 + adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE; 2083 2022 2084 - if (netif_running(netdev)) 2085 - pch_gbe_reinit_locked(adapter); 2086 - else 2023 + if (netif_running(netdev)) { 2024 + pch_gbe_down(adapter); 2025 + err = pch_gbe_up(adapter); 2026 + if (err) { 2027 + adapter->rx_buffer_len = old_rx_buffer_len; 2028 + pch_gbe_up(adapter); 2029 + return -ENOMEM; 2030 + } else { 2031 + netdev->mtu = new_mtu; 2032 + adapter->hw.mac.max_frame_size = max_frame; 2033 + } 2034 + } else { 2087 2035 pch_gbe_reset(adapter); 2036 + netdev->mtu = new_mtu; 2037 + adapter->hw.mac.max_frame_size = max_frame; 2038 + } 2088 2039 2089 2040 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", 2090 2041 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, ··· 2172 2103 int work_done = 0; 2173 2104 bool poll_end_flag = false; 2174 2105 bool cleaned = false; 2106 + u32 int_en; 2175 2107 2176 2108 pr_debug("budget : %d\n", budget); 2177 2109 ··· 2180 2110 if (!netif_carrier_ok(netdev)) { 2181 2111 poll_end_flag = true; 2182 2112 } else { 2183 - cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); 2184 2113 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); 2114 + if (adapter->rx_stop_flag) { 2115 + adapter->rx_stop_flag = false; 2116 + pch_gbe_start_receive(&adapter->hw); 2117 + int_en = ioread32(&adapter->hw.reg->INT_EN); 2118 + iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), 2119 + &adapter->hw.reg->INT_EN); 2120 + } 2121 + cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); 2185 2122 2186 2123 if (cleaned) 2187 2124 work_done = budget; ··· 2524 2447 }, 2525 2448 {.vendor = PCI_VENDOR_ID_ROHM, 2526 2449 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE, 2450 + .subvendor = PCI_ANY_ID, 2451 + .subdevice = PCI_ANY_ID, 2452 + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), 2453 + .class_mask = (0xFFFF00) 2454 + }, 2455 + {.vendor = PCI_VENDOR_ID_ROHM, 2456 + .device = PCI_DEVICE_ID_ROHM_ML7831_GBE, 2527 2457 .subvendor = PCI_ANY_ID, 2528 2458 .subdevice = PCI_ANY_ID, 2529 2459 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+2 -16
drivers/net/sfc/efx.c
··· 1050 1050 { 1051 1051 struct pci_dev *pci_dev = efx->pci_dev; 1052 1052 dma_addr_t dma_mask = efx->type->max_dma_mask; 1053 - bool use_wc; 1054 1053 int rc; 1055 1054 1056 1055 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); ··· 1100 1101 rc = -EIO; 1101 1102 goto fail3; 1102 1103 } 1103 - 1104 - /* bug22643: If SR-IOV is enabled then tx push over a write combined 1105 - * mapping is unsafe. We need to disable write combining in this case. 1106 - * MSI is unsupported when SR-IOV is enabled, and the firmware will 1107 - * have removed the MSI capability. So write combining is safe if 1108 - * there is an MSI capability. 1109 - */ 1110 - use_wc = (!EFX_WORKAROUND_22643(efx) || 1111 - pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); 1112 - if (use_wc) 1113 - efx->membase = ioremap_wc(efx->membase_phys, 1114 - efx->type->mem_map_size); 1115 - else 1116 - efx->membase = ioremap_nocache(efx->membase_phys, 1117 - efx->type->mem_map_size); 1104 + efx->membase = ioremap_nocache(efx->membase_phys, 1105 + efx->type->mem_map_size); 1118 1106 if (!efx->membase) { 1119 1107 netif_err(efx, probe, efx->net_dev, 1120 1108 "could not map memory BAR at %llx+%x\n",
-6
drivers/net/sfc/io.h
··· 103 103 _efx_writed(efx, value->u32[2], reg + 8); 104 104 _efx_writed(efx, value->u32[3], reg + 12); 105 105 #endif 106 - wmb(); 107 106 mmiowb(); 108 107 spin_unlock_irqrestore(&efx->biu_lock, flags); 109 108 } ··· 125 126 __raw_writel((__force u32)value->u32[0], membase + addr); 126 127 __raw_writel((__force u32)value->u32[1], membase + addr + 4); 127 128 #endif 128 - wmb(); 129 129 mmiowb(); 130 130 spin_unlock_irqrestore(&efx->biu_lock, flags); 131 131 } ··· 139 141 140 142 /* No lock required */ 141 143 _efx_writed(efx, value->u32[0], reg); 142 - wmb(); 143 144 } 144 145 145 146 /* Read a 128-bit CSR, locking as appropriate. */ ··· 149 152 150 153 spin_lock_irqsave(&efx->biu_lock, flags); 151 154 value->u32[0] = _efx_readd(efx, reg + 0); 152 - rmb(); 153 155 value->u32[1] = _efx_readd(efx, reg + 4); 154 156 value->u32[2] = _efx_readd(efx, reg + 8); 155 157 value->u32[3] = _efx_readd(efx, reg + 12); ··· 171 175 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 172 176 #else 173 177 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 174 - rmb(); 175 178 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 176 179 #endif 177 180 spin_unlock_irqrestore(&efx->biu_lock, flags); ··· 244 249 _efx_writed(efx, value->u32[2], reg + 8); 245 250 _efx_writed(efx, value->u32[3], reg + 12); 246 251 #endif 247 - wmb(); 248 252 } 249 253 #define efx_writeo_page(efx, value, reg, page) \ 250 254 _efx_writeo_page(efx, value, \
+17 -29
drivers/net/sfc/mcdi.c
··· 50 50 return &nic_data->mcdi; 51 51 } 52 52 53 - static inline void 54 - efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) 55 - { 56 - struct siena_nic_data *nic_data = efx->nic_data; 57 - value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); 58 - } 59 - 60 - static inline void 61 - efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) 62 - { 63 - struct siena_nic_data *nic_data = efx->nic_data; 64 - __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); 65 - } 66 - 67 53 void efx_mcdi_init(struct efx_nic *efx) 68 54 { 69 55 struct efx_mcdi_iface *mcdi; ··· 70 84 const u8 *inbuf, size_t inlen) 71 85 { 72 86 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 73 - unsigned pdu = MCDI_PDU(efx); 74 - unsigned doorbell = MCDI_DOORBELL(efx); 87 + unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 88 + unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); 75 89 unsigned int i; 76 90 efx_dword_t hdr; 77 91 u32 xflags, seqno; ··· 92 106 MCDI_HEADER_SEQ, seqno, 93 107 MCDI_HEADER_XFLAGS, xflags); 94 108 95 - efx_mcdi_writed(efx, &hdr, pdu); 109 + efx_writed(efx, &hdr, pdu); 96 110 97 111 for (i = 0; i < inlen; i += 4) 98 - efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), 99 - pdu + 4 + i); 112 + _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); 113 + 114 + /* Ensure the payload is written out before the header */ 115 + wmb(); 100 116 101 117 /* ring the doorbell with a distinctive value */ 102 - EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); 103 - efx_mcdi_writed(efx, &hdr, doorbell); 118 + _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 104 119 } 105 120 106 121 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 107 122 { 108 123 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 109 - unsigned int pdu = MCDI_PDU(efx); 124 + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 110 125 int i; 111 126 112 127 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 113 128 BUG_ON(outlen & 3 || outlen >= 0x100); 114 129 115 130 for (i = 0; i < outlen; i += 4) 116 - efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); 131 + *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 117 132 } 118 133 119 134 static int efx_mcdi_poll(struct efx_nic *efx) ··· 122 135 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 123 136 unsigned int time, finish; 124 137 unsigned int respseq, respcmd, error; 125 - unsigned int pdu = MCDI_PDU(efx); 138 + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 126 139 unsigned int rc, spins; 127 140 efx_dword_t reg; 128 141 ··· 148 161 149 162 time = get_seconds(); 150 163 151 - efx_mcdi_readd(efx, &reg, pdu); 164 + rmb(); 165 + efx_readd(efx, &reg, pdu); 152 166 153 167 /* All 1's indicates that shared memory is in reset (and is 154 168 * not a valid header). Wait for it to come out reset before ··· 176 188 respseq, mcdi->seqno); 177 189 rc = EIO; 178 190 } else if (error) { 179 - efx_mcdi_readd(efx, &reg, pdu + 4); 191 + efx_readd(efx, &reg, pdu + 4); 180 192 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 181 193 #define TRANSLATE_ERROR(name) \ 182 194 case MC_CMD_ERR_ ## name: \ ··· 210 222 /* Test and clear MC-rebooted flag for this port/function */ 211 223 int efx_mcdi_poll_reboot(struct efx_nic *efx) 212 224 { 213 - unsigned int addr = MCDI_REBOOT_FLAG(efx); 225 + unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); 214 226 efx_dword_t reg; 215 227 uint32_t value; 216 228 217 229 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 218 230 return false; 219 231 220 - efx_mcdi_readd(efx, &reg, addr); 232 + efx_readd(efx, &reg, addr); 221 233 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 222 234 223 235 if (value == 0) 224 236 return 0; 225 237 226 238 EFX_ZERO_DWORD(reg); 227 - efx_mcdi_writed(efx, &reg, addr); 239 + efx_writed(efx, &reg, addr); 228 240 229 241 if (value == MC_STATUS_DWORD_ASSERT) 230 242 return -EINTR;
-7
drivers/net/sfc/nic.c
··· 1936 1936 1937 1937 size = min_t(size_t, table->step, 16); 1938 1938 1939 - if (table->offset >= efx->type->mem_map_size) { 1940 - /* No longer mapped; return dummy data */ 1941 - memcpy(buf, "\xde\xc0\xad\xde", 4); 1942 - buf += table->rows * size; 1943 - continue; 1944 - } 1945 - 1946 1939 for (i = 0; i < table->rows; i++) { 1947 1940 switch (table->step) { 1948 1941 case 4: /* 32-bit register or SRAM */
-2
drivers/net/sfc/nic.h
··· 143 143 /** 144 144 * struct siena_nic_data - Siena NIC state 145 145 * @mcdi: Management-Controller-to-Driver Interface 146 - * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. 147 146 * @wol_filter_id: Wake-on-LAN packet filter id 148 147 */ 149 148 struct siena_nic_data { 150 149 struct efx_mcdi_iface mcdi; 151 - void __iomem *mcdi_smem; 152 150 int wol_filter_id; 153 151 }; 154 152
+4 -21
drivers/net/sfc/siena.c
··· 250 250 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 251 251 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 252 252 253 - /* Initialise MCDI */ 254 - nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + 255 - FR_CZ_MC_TREG_SMEM, 256 - FR_CZ_MC_TREG_SMEM_STEP * 257 - FR_CZ_MC_TREG_SMEM_ROWS); 258 - if (!nic_data->mcdi_smem) { 259 - netif_err(efx, probe, efx->net_dev, 260 - "could not map MCDI at %llx+%x\n", 261 - (unsigned long long)efx->membase_phys + 262 - FR_CZ_MC_TREG_SMEM, 263 - FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); 264 - rc = -ENOMEM; 265 - goto fail1; 266 - } 267 253 efx_mcdi_init(efx); 268 254 269 255 /* Recover from a failed assertion before probing */ 270 256 rc = efx_mcdi_handle_assertion(efx); 271 257 if (rc) 272 - goto fail2; 258 + goto fail1; 273 259 274 260 /* Let the BMC know that the driver is now in charge of link and 275 261 * filter settings. We must do this before we reset the NIC */ ··· 310 324 fail3: 311 325 efx_mcdi_drv_attach(efx, false, NULL); 312 326 fail2: 313 - iounmap(nic_data->mcdi_smem); 314 327 fail1: 315 328 kfree(efx->nic_data); 316 329 return rc; ··· 389 404 390 405 static void siena_remove_nic(struct efx_nic *efx) 391 406 { 392 - struct siena_nic_data *nic_data = efx->nic_data; 393 - 394 407 efx_nic_free_buffer(efx, &efx->irq_status); 395 408 396 409 siena_reset_hw(efx, RESET_TYPE_ALL); ··· 398 415 efx_mcdi_drv_attach(efx, false, NULL); 399 416 400 417 /* Tear down the private nic state */ 401 - iounmap(nic_data->mcdi_smem); 402 - kfree(nic_data); 418 + kfree(efx->nic_data); 403 419 efx->nic_data = NULL; 404 420 } 405 421 ··· 638 656 .default_mac_ops = &efx_mcdi_mac_operations, 639 657 640 658 .revision = EFX_REV_SIENA_A0, 641 - .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ 659 + .mem_map_size = (FR_CZ_MC_TREG_SMEM + 660 + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), 642 661 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 643 662 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 644 663 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
-2
drivers/net/sfc/workarounds.h
··· 38 38 #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS 39 39 /* Legacy interrupt storm when interrupt fifo fills */ 40 40 #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 41 - /* Write combining and sriov=enabled are incompatible */ 42 - #define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA 43 41 44 42 /* Spurious parity errors in TSORT buffers */ 45 43 #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
+5
drivers/net/usb/ipheth.c
··· 59 59 #define USB_PRODUCT_IPHONE_3G 0x1292 60 60 #define USB_PRODUCT_IPHONE_3GS 0x1294 61 61 #define USB_PRODUCT_IPHONE_4 0x1297 62 + #define USB_PRODUCT_IPHONE_4_VZW 0x129c 62 63 63 64 #define IPHETH_USBINTF_CLASS 255 64 65 #define IPHETH_USBINTF_SUBCLASS 253 ··· 97 96 IPHETH_USBINTF_PROTO) }, 98 97 { USB_DEVICE_AND_INTERFACE_INFO( 99 98 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, 99 + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 100 + IPHETH_USBINTF_PROTO) }, 101 + { USB_DEVICE_AND_INTERFACE_INFO( 102 + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, 100 103 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 101 104 IPHETH_USBINTF_PROTO) }, 102 105 { }
+2 -1
drivers/net/wireless/ath/ath9k/ar9002_calib.c
··· 41 41 case ADC_DC_CAL: 42 42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ 43 43 if (!IS_CHAN_B(chan) && 44 - !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) 44 + !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) && 45 + IS_CHAN_HT20(chan))) 45 46 supported = true; 46 47 break; 47 48 }
+1 -1
drivers/net/wireless/ath/ath9k/ar9003_phy.c
··· 671 671 REG_WRITE_ARRAY(&ah->iniModesAdditional, 672 672 modesIndex, regWrites); 673 673 674 - if (AR_SREV_9300(ah)) 674 + if (AR_SREV_9330(ah)) 675 675 REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); 676 676 677 677 if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
+6
drivers/net/wireless/ath/ath9k/main.c
··· 2303 2303 mutex_lock(&sc->mutex); 2304 2304 cancel_delayed_work_sync(&sc->tx_complete_work); 2305 2305 2306 + if (ah->ah_flags & AH_UNPLUGGED) { 2307 + ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n"); 2308 + mutex_unlock(&sc->mutex); 2309 + return; 2310 + } 2311 + 2306 2312 if (sc->sc_flags & SC_OP_INVALID) { 2307 2313 ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); 2308 2314 mutex_unlock(&sc->mutex);
+8 -5
drivers/net/wireless/iwlegacy/iwl-3945-rs.c
··· 822 822 823 823 out: 824 824 825 - rs_sta->last_txrate_idx = index; 826 - if (sband->band == IEEE80211_BAND_5GHZ) 827 - info->control.rates[0].idx = rs_sta->last_txrate_idx - 828 - IWL_FIRST_OFDM_RATE; 829 - else 825 + if (sband->band == IEEE80211_BAND_5GHZ) { 826 + if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE)) 827 + index = IWL_FIRST_OFDM_RATE; 828 + rs_sta->last_txrate_idx = index; 829 + info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE; 830 + } else { 831 + rs_sta->last_txrate_idx = index; 830 832 info->control.rates[0].idx = rs_sta->last_txrate_idx; 833 + } 831 834 832 835 IWL_DEBUG_RATE(priv, "leave: %d\n", index); 833 836 }
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
··· 167 167 168 168 memset(&cmd, 0, sizeof(cmd)); 169 169 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 170 - memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); 170 + memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib)); 171 171 if (!(cmd.radio_sensor_offset)) 172 172 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; 173 173
+2
drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
··· 771 771 cmd = txq->cmd[cmd_index]; 772 772 meta = &txq->meta[cmd_index]; 773 773 774 + txq->time_stamp = jiffies; 775 + 774 776 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 775 777 776 778 /* Input error checking is done when commands are added to queue. */
+8
drivers/net/wireless/rtlwifi/core.c
··· 610 610 611 611 mac->link_state = MAC80211_NOLINK; 612 612 memset(mac->bssid, 0, 6); 613 + 614 + /* reset sec info */ 615 + rtl_cam_reset_sec_info(hw); 616 + 617 + rtl_cam_reset_all_entry(hw); 613 618 mac->vendor = PEER_UNKNOWN; 614 619 615 620 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, ··· 1068 1063 *or clear all entry here. 1069 1064 */ 1070 1065 rtl_cam_delete_one_entry(hw, mac_addr, key_idx); 1066 + 1067 + rtl_cam_reset_sec_info(hw); 1068 + 1071 1069 break; 1072 1070 default: 1073 1071 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+6 -5
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
··· 549 549 (tcb_desc->rts_use_shortpreamble ? 1 : 0) 550 550 : (tcb_desc->rts_use_shortgi ? 1 : 0))); 551 551 if (mac->bw_40) { 552 - if (tcb_desc->packet_bw) { 552 + if (rate_flag & IEEE80211_TX_RC_DUP_DATA) { 553 553 SET_TX_DESC_DATA_BW(txdesc, 1); 554 554 SET_TX_DESC_DATA_SC(txdesc, 3); 555 + } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){ 556 + SET_TX_DESC_DATA_BW(txdesc, 1); 557 + SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc); 555 558 } else { 556 559 SET_TX_DESC_DATA_BW(txdesc, 0); 557 - if (rate_flag & IEEE80211_TX_RC_DUP_DATA) 558 - SET_TX_DESC_DATA_SC(txdesc, 559 - mac->cur_40_prime_sc); 560 - } 560 + SET_TX_DESC_DATA_SC(txdesc, 0); 561 + } 561 562 } else { 562 563 SET_TX_DESC_DATA_BW(txdesc, 0); 563 564 SET_TX_DESC_DATA_SC(txdesc, 0);
+1
include/linux/skbuff.h
··· 524 524 extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 525 525 526 526 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 527 + extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 527 528 extern struct sk_buff *skb_clone(struct sk_buff *skb, 528 529 gfp_t priority); 529 530 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
+2
include/linux/snmp.h
··· 231 231 LINUX_MIB_TCPDEFERACCEPTDROP, 232 232 LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ 233 233 LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */ 234 + LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */ 235 + LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */ 234 236 __LINUX_MIB_MAX 235 237 }; 236 238
+22 -3
include/net/flow.h
··· 7 7 #ifndef _NET_FLOW_H 8 8 #define _NET_FLOW_H 9 9 10 + #include <linux/socket.h> 10 11 #include <linux/in6.h> 11 12 #include <linux/atomic.h> 12 13 ··· 69 68 #define fl4_ipsec_spi uli.spi 70 69 #define fl4_mh_type uli.mht.type 71 70 #define fl4_gre_key uli.gre_key 72 - }; 71 + } __attribute__((__aligned__(BITS_PER_LONG/8))); 73 72 74 73 static inline void flowi4_init_output(struct flowi4 *fl4, int oif, 75 74 __u32 mark, __u8 tos, __u8 scope, ··· 113 112 #define fl6_ipsec_spi uli.spi 114 113 #define fl6_mh_type uli.mht.type 115 114 #define fl6_gre_key uli.gre_key 116 - }; 115 + } __attribute__((__aligned__(BITS_PER_LONG/8))); 117 116 118 117 struct flowidn { 119 118 struct flowi_common __fl_common; ··· 128 127 union flowi_uli uli; 129 128 #define fld_sport uli.ports.sport 130 129 #define fld_dport uli.ports.dport 131 - }; 130 + } __attribute__((__aligned__(BITS_PER_LONG/8))); 132 131 133 132 struct flowi { 134 133 union { ··· 160 159 static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn) 161 160 { 162 161 return container_of(fldn, struct flowi, u.dn); 162 + } 163 + 164 + typedef unsigned long flow_compare_t; 165 + 166 + static inline size_t flow_key_size(u16 family) 167 + { 168 + switch (family) { 169 + case AF_INET: 170 + BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t)); 171 + return sizeof(struct flowi4) / sizeof(flow_compare_t); 172 + case AF_INET6: 173 + BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t)); 174 + return sizeof(struct flowi6) / sizeof(flow_compare_t); 175 + case AF_DECnet: 176 + BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t)); 177 + return sizeof(struct flowidn) / sizeof(flow_compare_t); 178 + } 179 + return 0; 163 180 } 164 181 165 182 #define FLOW_DIR_IN 0
+2 -1
include/net/request_sock.h
··· 96 96 */ 97 97 struct listen_sock { 98 98 u8 max_qlen_log; 99 - /* 3 bytes hole, try to use */ 99 + u8 synflood_warned; 100 + /* 2 bytes hole, try to use */ 100 101 int qlen; 101 102 int qlen_young; 102 103 int clock_hand;
+1
include/net/sctp/command.h
··· 109 109 SCTP_CMD_SEND_MSG, /* Send the whole use message */ 110 110 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ 111 111 SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/ 112 + SCTP_CMD_SET_ASOC, /* Restore association context */ 112 113 SCTP_CMD_LAST 113 114 } sctp_verb_t; 114 115
+3
include/net/tcp.h
··· 460 460 extern void tcp_send_fin(struct sock *sk); 461 461 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); 462 462 extern int tcp_send_synack(struct sock *); 463 + extern int tcp_syn_flood_action(struct sock *sk, 464 + const struct sk_buff *skb, 465 + const char *proto); 463 466 extern void tcp_push_one(struct sock *, unsigned int mss_now); 464 467 extern void tcp_send_ack(struct sock *sk); 465 468 extern void tcp_send_delayed_ack(struct sock *sk);
+1
include/net/transp_v6.h
··· 39 39 struct sk_buff *skb); 40 40 41 41 extern int datagram_send_ctl(struct net *net, 42 + struct sock *sk, 42 43 struct msghdr *msg, 43 44 struct flowi6 *fl6, 44 45 struct ipv6_txoptions *opt,
+1 -1
net/bridge/netfilter/Kconfig
··· 4 4 5 5 menuconfig BRIDGE_NF_EBTABLES 6 6 tristate "Ethernet Bridge tables (ebtables) support" 7 - depends on BRIDGE && BRIDGE_NETFILTER 7 + depends on BRIDGE && NETFILTER 8 8 select NETFILTER_XTABLES 9 9 help 10 10 ebtables is a general, extensible frame/packet identification
+5 -1
net/caif/caif_dev.c
··· 93 93 caifdevs = caif_device_list(dev_net(dev)); 94 94 BUG_ON(!caifdevs); 95 95 96 - caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 96 + caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 97 97 if (!caifd) 98 98 return NULL; 99 99 caifd->pcpu_refcnt = alloc_percpu(int); 100 + if (!caifd->pcpu_refcnt) { 101 + kfree(caifd); 102 + return NULL; 103 + } 100 104 caifd->netdev = dev; 101 105 dev_hold(dev); 102 106 return caifd;
+1 -1
net/can/af_can.c
··· 857 857 struct net_device *dev; 858 858 859 859 if (stats_timer) 860 - del_timer(&can_stattimer); 860 + del_timer_sync(&can_stattimer); 861 861 862 862 can_remove_proc(); 863 863
+8
net/core/dev.c
··· 1515 1515 */ 1516 1516 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1517 1517 { 1518 + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1519 + if (skb_copy_ubufs(skb, GFP_ATOMIC)) { 1520 + atomic_long_inc(&dev->rx_dropped); 1521 + kfree_skb(skb); 1522 + return NET_RX_DROP; 1523 + } 1524 + } 1525 + 1518 1526 skb_orphan(skb); 1519 1527 nf_reset(skb); 1520 1528
+21 -15
net/core/flow.c
··· 30 30 struct hlist_node hlist; 31 31 struct list_head gc_list; 32 32 } u; 33 + struct net *net; 33 34 u16 family; 34 35 u8 dir; 35 36 u32 genid; ··· 173 172 174 173 static u32 flow_hash_code(struct flow_cache *fc, 175 174 struct flow_cache_percpu *fcp, 176 - const struct flowi *key) 175 + const struct flowi *key, 176 + size_t keysize) 177 177 { 178 178 const u32 *k = (const u32 *) key; 179 + const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32); 179 180 180 - return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) 181 + return jhash2(k, length, fcp->hash_rnd) 181 182 & (flow_cache_hash_size(fc) - 1); 182 183 } 183 184 184 - typedef unsigned long flow_compare_t; 185 - 186 185 /* I hear what you're saying, use memcmp. But memcmp cannot make 187 - * important assumptions that we can here, such as alignment and 188 - * constant size. 186 + * important assumptions that we can here, such as alignment. 189 187 */ 190 - static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) 188 + static int flow_key_compare(const struct flowi *key1, const struct flowi *key2, 189 + size_t keysize) 191 190 { 192 191 const flow_compare_t *k1, *k1_lim, *k2; 193 - const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); 194 - 195 - BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t)); 196 192 197 193 k1 = (const flow_compare_t *) key1; 198 - k1_lim = k1 + n_elem; 194 + k1_lim = k1 + keysize; 199 195 200 196 k2 = (const flow_compare_t *) key2; 201 197 ··· 213 215 struct flow_cache_entry *fle, *tfle; 214 216 struct hlist_node *entry; 215 217 struct flow_cache_object *flo; 218 + size_t keysize; 216 219 unsigned int hash; 217 220 218 221 local_bh_disable(); ··· 221 222 222 223 fle = NULL; 223 224 flo = NULL; 225 + 226 + keysize = flow_key_size(family); 227 + if (!keysize) 228 + goto nocache; 229 + 224 230 /* Packet really early in init? Making flow_cache_init a 225 231 * pre-smp initcall would solve this. --RR */ 226 232 if (!fcp->hash_table) ··· 234 230 if (fcp->hash_rnd_recalc) 235 231 flow_new_hash_rnd(fc, fcp); 236 232 237 - hash = flow_hash_code(fc, fcp, key); 233 + hash = flow_hash_code(fc, fcp, key, keysize); 238 234 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { 239 - if (tfle->family == family && 235 + if (tfle->net == net && 236 + tfle->family == family && 240 237 tfle->dir == dir && 241 - flow_key_compare(key, &tfle->key) == 0) { 238 + flow_key_compare(key, &tfle->key, keysize) == 0) { 242 239 fle = tfle; 243 240 break; 244 241 } ··· 251 246 252 247 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); 253 248 if (fle) { 249 + fle->net = net; 254 250 fle->family = family; 255 251 fle->dir = dir; 256 - memcpy(&fle->key, key, sizeof(*key)); 252 + memcpy(&fle->key, key, keysize * sizeof(flow_compare_t)); 257 253 fle->object = NULL; 258 254 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); 259 255 fcp->hash_count++;
+17 -5
net/core/skbuff.c
··· 611 611 } 612 612 EXPORT_SYMBOL_GPL(skb_morph); 613 613 614 - /* skb frags copy userspace buffers to kernel */ 615 - static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 614 + /* skb_copy_ubufs - copy userspace skb frags buffers to kernel 615 + * @skb: the skb to modify 616 + * @gfp_mask: allocation priority 617 + * 618 + * This must be called on SKBTX_DEV_ZEROCOPY skb. 619 + * It will copy all frags into kernel and drop the reference 620 + * to userspace pages. 621 + * 622 + * If this function is called from an interrupt gfp_mask() must be 623 + * %GFP_ATOMIC. 624 + * 625 + * Returns 0 on success or a negative error code on failure 626 + * to allocate kernel memory to copy to. 627 + */ 628 + int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 616 629 { 617 630 int i; 618 631 int num_frags = skb_shinfo(skb)->nr_frags; ··· 665 652 skb_shinfo(skb)->frags[i - 1].page = head; 666 653 head = (struct page *)head->private; 667 654 } 655 + 656 + skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 668 657 return 0; 669 658 } 670 659 ··· 692 677 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 693 678 if (skb_copy_ubufs(skb, gfp_mask)) 694 679 return NULL; 695 - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 696 680 } 697 681 698 682 n = skb + 1; ··· 817 803 n = NULL; 818 804 goto out; 819 805 } 820 - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 821 806 } 822 807 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 823 808 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; ··· 909 896 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 910 897 if (skb_copy_ubufs(skb, gfp_mask)) 911 898 goto nofrags; 912 - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 913 899 } 914 900 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 915 901 get_page(skb_shinfo(skb)->frags[i].page);
+1 -1
net/ethernet/eth.c
··· 340 340 dev->addr_len = ETH_ALEN; 341 341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */ 342 342 dev->flags = IFF_BROADCAST|IFF_MULTICAST; 343 - dev->priv_flags = IFF_TX_SKB_SHARING; 343 + dev->priv_flags |= IFF_TX_SKB_SHARING; 344 344 345 345 memset(dev->broadcast, 0xFF, ETH_ALEN); 346 346
+6 -1
net/ipv4/af_inet.c
··· 466 466 goto out; 467 467 468 468 if (addr->sin_family != AF_INET) { 469 + /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET) 470 + * only if s_addr is INADDR_ANY. 471 + */ 469 472 err = -EAFNOSUPPORT; 470 - goto out; 473 + if (addr->sin_family != AF_UNSPEC || 474 + addr->sin_addr.s_addr != htonl(INADDR_ANY)) 475 + goto out; 471 476 } 472 477 473 478 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
+9 -1
net/ipv4/fib_semantics.c
··· 142 142 }; 143 143 144 144 /* Release a nexthop info record */ 145 + static void free_fib_info_rcu(struct rcu_head *head) 146 + { 147 + struct fib_info *fi = container_of(head, struct fib_info, rcu); 148 + 149 + if (fi->fib_metrics != (u32 *) dst_default_metrics) 150 + kfree(fi->fib_metrics); 151 + kfree(fi); 152 + } 145 153 146 154 void free_fib_info(struct fib_info *fi) 147 155 { ··· 164 156 } endfor_nexthops(fi); 165 157 fib_info_cnt--; 166 158 release_net(fi->fib_net); 167 - kfree_rcu(fi, rcu); 159 + call_rcu(&fi->rcu, free_fib_info_rcu); 168 160 } 169 161 170 162 void fib_release_info(struct fib_info *fi)
+5 -7
net/ipv4/netfilter/ip_queue.c
··· 218 218 return skb; 219 219 220 220 nlmsg_failure: 221 + kfree_skb(skb); 221 222 *errp = -EINVAL; 222 223 printk(KERN_ERR "ip_queue: error creating packet message\n"); 223 224 return NULL; ··· 314 313 { 315 314 struct nf_queue_entry *entry; 316 315 317 - if (vmsg->value > NF_MAX_VERDICT) 316 + if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) 318 317 return -EINVAL; 319 318 320 319 entry = ipq_find_dequeue_entry(vmsg->id); ··· 359 358 break; 360 359 361 360 case IPQM_VERDICT: 362 - if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 363 - status = -EINVAL; 364 - else 365 - status = ipq_set_verdict(&pmsg->msg.verdict, 366 - len - sizeof(*pmsg)); 367 - break; 361 + status = ipq_set_verdict(&pmsg->msg.verdict, 362 + len - sizeof(*pmsg)); 363 + break; 368 364 default: 369 365 status = -EINVAL; 370 366 }
+2
net/ipv4/proc.c
··· 254 254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), 255 255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), 256 256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), 257 + SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), 258 + SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), 257 259 SNMP_MIB_SENTINEL 258 260 }; 259 261
+28 -21
net/ipv4/tcp_ipv4.c
··· 808 808 kfree(inet_rsk(req)->opt); 809 809 } 810 810 811 - static void syn_flood_warning(const struct sk_buff *skb) 811 + /* 812 + * Return 1 if a syncookie should be sent 813 + */ 814 + int tcp_syn_flood_action(struct sock *sk, 815 + const struct sk_buff *skb, 816 + const char *proto) 812 817 { 813 - const char *msg; 818 + const char *msg = "Dropping request"; 819 + int want_cookie = 0; 820 + struct listen_sock *lopt; 821 + 822 + 814 823 815 824 #ifdef CONFIG_SYN_COOKIES 816 - if (sysctl_tcp_syncookies) 825 + if (sysctl_tcp_syncookies) { 817 826 msg = "Sending cookies"; 818 - else 827 + want_cookie = 1; 828 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); 829 + } else 819 830 #endif 820 - msg = "Dropping request"; 831 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); 821 832 822 - pr_info("TCP: Possible SYN flooding on port %d. %s.\n", 823 - ntohs(tcp_hdr(skb)->dest), msg); 833 + lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; 834 + if (!lopt->synflood_warned) { 835 + lopt->synflood_warned = 1; 836 + pr_info("%s: Possible SYN flooding on port %d. %s. " 837 + " Check SNMP counters.\n", 838 + proto, ntohs(tcp_hdr(skb)->dest), msg); 839 + } 840 + return want_cookie; 824 841 } 842 + EXPORT_SYMBOL(tcp_syn_flood_action); 825 843 826 844 /* 827 845 * Save and compile IPv4 options into the request_sock if needed. ··· 1253 1235 __be32 saddr = ip_hdr(skb)->saddr; 1254 1236 __be32 daddr = ip_hdr(skb)->daddr; 1255 1237 __u32 isn = TCP_SKB_CB(skb)->when; 1256 - #ifdef CONFIG_SYN_COOKIES 1257 1238 int want_cookie = 0; 1258 - #else 1259 - #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */ 1260 - #endif 1261 1239 1262 1240 /* Never answer to SYNs send to broadcast or multicast */ 1263 1241 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) ··· 1264 1250 * evidently real one. 1265 1251 */ 1266 1252 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1267 - if (net_ratelimit()) 1268 - syn_flood_warning(skb); 1269 - #ifdef CONFIG_SYN_COOKIES 1270 - if (sysctl_tcp_syncookies) { 1271 - want_cookie = 1; 1272 - } else 1273 - #endif 1274 - goto drop; 1253 + want_cookie = tcp_syn_flood_action(sk, skb, "TCP"); 1254 + if (!want_cookie) 1255 + goto drop; 1275 1256 } 1276 1257 1277 1258 /* Accept backlog is full. If we have already queued enough ··· 1312 1303 while (l-- > 0) 1313 1304 *c++ ^= *hash_location++; 1314 1305 1315 - #ifdef CONFIG_SYN_COOKIES 1316 1306 want_cookie = 0; /* not our kind of cookie */ 1317 - #endif 1318 1307 tmp_ext.cookie_out_never = 0; /* false */ 1319 1308 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1320 1309 } else if (!tp->rx_opt.cookie_in_always) {
+3 -2
net/ipv6/datagram.c
··· 599 599 return 0; 600 600 } 601 601 602 - int datagram_send_ctl(struct net *net, 602 + int datagram_send_ctl(struct net *net, struct sock *sk, 603 603 struct msghdr *msg, struct flowi6 *fl6, 604 604 struct ipv6_txoptions *opt, 605 605 int *hlimit, int *tclass, int *dontfrag) ··· 658 658 659 659 if (addr_type != IPV6_ADDR_ANY) { 660 660 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; 661 - if (!ipv6_chk_addr(net, &src_info->ipi6_addr, 661 + if (!inet_sk(sk)->transparent && 662 + !ipv6_chk_addr(net, &src_info->ipi6_addr, 662 663 strict ? dev : NULL, 0)) 663 664 err = -EINVAL; 664 665 else
+4 -4
net/ipv6/ip6_flowlabel.c
··· 322 322 } 323 323 324 324 static struct ip6_flowlabel * 325 - fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, 326 - int optlen, int *err_p) 325 + fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, 326 + char __user *optval, int optlen, int *err_p) 327 327 { 328 328 struct ip6_flowlabel *fl = NULL; 329 329 int olen; ··· 360 360 msg.msg_control = (void*)(fl->opt+1); 361 361 memset(&flowi6, 0, sizeof(flowi6)); 362 362 363 - err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk, 363 + err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, 364 364 &junk, &junk); 365 365 if (err) 366 366 goto done; ··· 528 528 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) 529 529 return -EINVAL; 530 530 531 - fl = fl_create(net, &freq, optval, optlen, &err); 531 + fl = fl_create(net, sk, &freq, optval, optlen, &err); 532 532 if (fl == NULL) 533 533 return err; 534 534 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
+1 -1
net/ipv6/ipv6_sockglue.c
··· 475 475 msg.msg_controllen = optlen; 476 476 msg.msg_control = (void*)(opt+1); 477 477 478 - retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk, 478 + retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, 479 479 &junk); 480 480 if (retv) 481 481 goto done;
+5 -7
net/ipv6/netfilter/ip6_queue.c
··· 218 218 return skb; 219 219 220 220 nlmsg_failure: 221 + kfree_skb(skb); 221 222 *errp = -EINVAL; 222 223 printk(KERN_ERR "ip6_queue: error creating packet message\n"); 223 224 return NULL; ··· 314 313 { 315 314 struct nf_queue_entry *entry; 316 315 317 - if (vmsg->value > NF_MAX_VERDICT) 316 + if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) 318 317 return -EINVAL; 319 318 320 319 entry = ipq_find_dequeue_entry(vmsg->id); ··· 359 358 break; 360 359 361 360 case IPQM_VERDICT: 362 - if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 363 - status = -EINVAL; 364 - else 365 - status = ipq_set_verdict(&pmsg->msg.verdict, 366 - len - sizeof(*pmsg)); 367 - break; 361 + status = ipq_set_verdict(&pmsg->msg.verdict, 362 + len - sizeof(*pmsg)); 363 + break; 368 364 default: 369 365 status = -EINVAL; 370 366 }
+2 -2
net/ipv6/raw.c
··· 817 817 memset(opt, 0, sizeof(struct ipv6_txoptions)); 818 818 opt->tot_len = sizeof(struct ipv6_txoptions); 819 819 820 - err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 821 - &tclass, &dontfrag); 820 + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 821 + &hlimit, &tclass, &dontfrag); 822 822 if (err < 0) { 823 823 fl6_sock_release(flowlabel); 824 824 return err;
+22 -11
net/ipv6/route.c
··· 104 104 struct inet_peer *peer; 105 105 u32 *p = NULL; 106 106 107 + if (!(rt->dst.flags & DST_HOST)) 108 + return NULL; 109 + 107 110 if (!rt->rt6i_peer) 108 111 rt6_bind_peer(rt, 1); 109 112 ··· 254 251 struct rt6_info *rt = (struct rt6_info *)dst; 255 252 struct inet6_dev *idev = rt->rt6i_idev; 256 253 struct inet_peer *peer = rt->rt6i_peer; 254 + 255 + if (!(rt->dst.flags & DST_HOST)) 256 + dst_destroy_metrics_generic(dst); 257 257 258 258 if (idev != NULL) { 259 259 rt->rt6i_idev = NULL; ··· 729 723 ipv6_addr_copy(&rt->rt6i_gateway, daddr); 730 724 } 731 725 732 - rt->rt6i_dst.plen = 128; 733 726 rt->rt6i_flags |= RTF_CACHE; 734 - rt->dst.flags |= DST_HOST; 735 727 736 728 #ifdef CONFIG_IPV6_SUBTREES 737 729 if (rt->rt6i_src.plen && saddr) { ··· 779 775 struct rt6_info *rt = ip6_rt_copy(ort, daddr); 780 776 781 777 if (rt) { 782 - rt->rt6i_dst.plen = 128; 783 778 rt->rt6i_flags |= RTF_CACHE; 784 - rt->dst.flags |= DST_HOST; 785 779 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst))); 786 780 } 787 781 return rt; ··· 1080 1078 neigh = NULL; 1081 1079 } 1082 1080 1083 - rt->rt6i_idev = idev; 1081 + rt->dst.flags |= DST_HOST; 1082 + rt->dst.output = ip6_output; 1084 1083 dst_set_neighbour(&rt->dst, neigh); 1085 1084 atomic_set(&rt->dst.__refcnt, 1); 1086 - ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 1087 1085 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); 1088 - rt->dst.output = ip6_output; 1086 + 1087 + ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 1088 + rt->rt6i_dst.plen = 128; 1089 + rt->rt6i_idev = idev; 1089 1090 1090 1091 spin_lock_bh(&icmp6_dst_lock); 1091 1092 rt->dst.next = icmp6_dst_gc_list; ··· 1266 1261 if (rt->rt6i_dst.plen == 128) 1267 1262 rt->dst.flags |= DST_HOST; 1268 1263 1264 + if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) { 1265 + u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 1266 + if (!metrics) { 1267 + err = -ENOMEM; 1268 + goto out; 1269 + } 1270 + dst_init_metrics(&rt->dst, metrics, 0); 1271 + } 1269 1272 #ifdef CONFIG_IPV6_SUBTREES 1270 1273 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 1271 1274 rt->rt6i_src.plen = cfg->fc_src_len; ··· 1620 1607 if (on_link) 1621 1608 nrt->rt6i_flags &= ~RTF_GATEWAY; 1622 1609 1623 - nrt->rt6i_dst.plen = 128; 1624 - nrt->dst.flags |= DST_HOST; 1625 - 1626 1610 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); 1627 1611 dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); 1628 1612 ··· 1764 1754 if (rt) { 1765 1755 rt->dst.input = ort->dst.input; 1766 1756 rt->dst.output = ort->dst.output; 1757 + rt->dst.flags |= DST_HOST; 1767 1758 1768 1759 ipv6_addr_copy(&rt->rt6i_dst.addr, dest); 1769 - rt->rt6i_dst.plen = ort->rt6i_dst.plen; 1760 + rt->rt6i_dst.plen = 128; 1770 1761 dst_copy_metrics(&rt->dst, &ort->dst); 1771 1762 rt->dst.error = ort->dst.error; 1772 1763 rt->rt6i_idev = ort->rt6i_idev;
+3 -28
net/ipv6/tcp_ipv6.c
··· 531 531 return tcp_v6_send_synack(sk, req, rvp); 532 532 } 533 533 534 - static inline void syn_flood_warning(struct sk_buff *skb) 535 - { 536 - #ifdef CONFIG_SYN_COOKIES 537 - if (sysctl_tcp_syncookies) 538 - printk(KERN_INFO 539 - "TCPv6: Possible SYN flooding on port %d. " 540 - "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest)); 541 - else 542 - #endif 543 - printk(KERN_INFO 544 - "TCPv6: Possible SYN flooding on port %d. " 545 - "Dropping request.\n", ntohs(tcp_hdr(skb)->dest)); 546 - } 547 - 548 534 static void tcp_v6_reqsk_destructor(struct request_sock *req) 549 535 { 550 536 kfree_skb(inet6_rsk(req)->pktopts); ··· 1165 1179 struct tcp_sock *tp = tcp_sk(sk); 1166 1180 __u32 isn = TCP_SKB_CB(skb)->when; 1167 1181 struct dst_entry *dst = NULL; 1168 - #ifdef CONFIG_SYN_COOKIES 1169 1182 int want_cookie = 0; 1170 - #else 1171 - #define want_cookie 0 1172 - #endif 1173 1183 1174 1184 if (skb->protocol == htons(ETH_P_IP)) 1175 1185 return tcp_v4_conn_request(sk, skb); ··· 1174 1192 goto drop; 1175 1193 1176 1194 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1177 - if (net_ratelimit()) 1178 - syn_flood_warning(skb); 1179 - #ifdef CONFIG_SYN_COOKIES 1180 - if (sysctl_tcp_syncookies) 1181 - want_cookie = 1; 1182 - else 1183 - #endif 1184 - goto drop; 1195 + want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6"); 1196 + if (!want_cookie) 1197 + goto drop; 1185 1198 } 1186 1199 1187 1200 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) ··· 1226 1249 while (l-- > 0) 1227 1250 *c++ ^= *hash_location++; 1228 1251 1229 - #ifdef CONFIG_SYN_COOKIES 1230 1252 want_cookie = 0; /* not our kind of cookie */ 1231 - #endif 1232 1253 tmp_ext.cookie_out_never = 0; /* false */ 1233 1254 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1234 1255 } else if (!tp->rx_opt.cookie_in_always) {
+2 -2
net/ipv6/udp.c
··· 1090 1090 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1091 1091 opt->tot_len = sizeof(*opt); 1092 1092 1093 - err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 1094 - &tclass, &dontfrag); 1093 + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1094 + &hlimit, &tclass, &dontfrag); 1095 1095 if (err < 0) { 1096 1096 fl6_sock_release(flowlabel); 1097 1097 return err;
+3 -3
net/irda/irsysctl.c
··· 40 40 extern int sysctl_fast_poll_increase; 41 41 extern char sysctl_devname[]; 42 42 extern int sysctl_max_baud_rate; 43 - extern int sysctl_min_tx_turn_time; 44 - extern int sysctl_max_tx_data_size; 45 - extern int sysctl_max_tx_window; 43 + extern unsigned int sysctl_min_tx_turn_time; 44 + extern unsigned int sysctl_max_tx_data_size; 45 + extern unsigned int sysctl_max_tx_window; 46 46 extern int sysctl_max_noreply_time; 47 47 extern int sysctl_warn_noreply_time; 48 48 extern int sysctl_lap_keepalive_time;
+3 -3
net/irda/qos.c
··· 60 60 * Default is 10us which means using the unmodified value given by the 61 61 * peer except if it's 0 (0 is likely a bug in the other stack). 62 62 */ 63 - unsigned sysctl_min_tx_turn_time = 10; 63 + unsigned int sysctl_min_tx_turn_time = 10; 64 64 /* 65 65 * Maximum data size to be used in transmission in payload of LAP frame. 66 66 * There is a bit of confusion in the IrDA spec : ··· 75 75 * bytes frames or all negotiated frame sizes, but you can use the sysctl 76 76 * to play with this value anyway. 77 77 * Jean II */ 78 - unsigned sysctl_max_tx_data_size = 2042; 78 + unsigned int sysctl_max_tx_data_size = 2042; 79 79 /* 80 80 * Maximum transmit window, i.e. number of LAP frames between turn-around. 81 81 * This allow to override what the peer told us. Some peers are buggy and 82 82 * don't always support what they tell us. 83 83 * Jean II */ 84 - unsigned sysctl_max_tx_window = 7; 84 + unsigned int sysctl_max_tx_window = 7; 85 85 86 86 static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get); 87 87 static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
+1 -1
net/mac80211/sta_info.c
··· 665 665 BUG_ON(!sdata->bss); 666 666 667 667 atomic_dec(&sdata->bss->num_sta_ps); 668 - __sta_info_clear_tim_bit(sdata->bss, sta); 668 + sta_info_clear_tim_bit(sta); 669 669 } 670 670 671 671 local->num_sta--;
+1
net/netfilter/nf_conntrack_pptp.c
··· 364 364 break; 365 365 366 366 case PPTP_WAN_ERROR_NOTIFY: 367 + case PPTP_SET_LINK_INFO: 367 368 case PPTP_ECHO_REQUEST: 368 369 case PPTP_ECHO_REPLY: 369 370 /* I don't have to explain these ;) */
+3 -3
net/netfilter/nf_conntrack_proto_tcp.c
··· 409 409 if (opsize < 2) /* "silly options" */ 410 410 return; 411 411 if (opsize > length) 412 - break; /* don't parse partial options */ 412 + return; /* don't parse partial options */ 413 413 414 414 if (opcode == TCPOPT_SACK_PERM 415 415 && opsize == TCPOLEN_SACK_PERM) ··· 447 447 BUG_ON(ptr == NULL); 448 448 449 449 /* Fast path for timestamp-only option */ 450 - if (length == TCPOLEN_TSTAMP_ALIGNED*4 450 + if (length == TCPOLEN_TSTAMP_ALIGNED 451 451 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24) 452 452 | (TCPOPT_NOP << 16) 453 453 | (TCPOPT_TIMESTAMP << 8) ··· 469 469 if (opsize < 2) /* "silly options" */ 470 470 return; 471 471 if (opsize > length) 472 - break; /* don't parse partial options */ 472 + return; /* don't parse partial options */ 473 473 474 474 if (opcode == TCPOPT_SACK 475 475 && opsize >= (TCPOLEN_SACK_BASE
+4 -5
net/netfilter/xt_rateest.c
··· 78 78 { 79 79 struct xt_rateest_match_info *info = par->matchinfo; 80 80 struct xt_rateest *est1, *est2; 81 - int ret = false; 81 + int ret = -EINVAL; 82 82 83 83 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | 84 84 XT_RATEEST_MATCH_REL)) != 1) ··· 101 101 if (!est1) 102 102 goto err1; 103 103 104 + est2 = NULL; 104 105 if (info->flags & XT_RATEEST_MATCH_REL) { 105 106 est2 = xt_rateest_lookup(info->name2); 106 107 if (!est2) 107 108 goto err2; 108 - } else 109 - est2 = NULL; 110 - 109 + } 111 110 112 111 info->est1 = est1; 113 112 info->est2 = est2; ··· 115 116 err2: 116 117 xt_rateest_put(est1); 117 118 err1: 118 - return -EINVAL; 119 + return ret; 119 120 } 120 121 121 122 static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
+13 -14
net/sched/cls_rsvp.h
··· 425 425 struct rsvp_filter *f, **fp; 426 426 struct rsvp_session *s, **sp; 427 427 struct tc_rsvp_pinfo *pinfo = NULL; 428 - struct nlattr *opt = tca[TCA_OPTIONS-1]; 428 + struct nlattr *opt = tca[TCA_OPTIONS]; 429 429 struct nlattr *tb[TCA_RSVP_MAX + 1]; 430 430 struct tcf_exts e; 431 431 unsigned int h1, h2; ··· 439 439 if (err < 0) 440 440 return err; 441 441 442 - err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map); 442 + err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map); 443 443 if (err < 0) 444 444 return err; 445 445 ··· 449 449 450 450 if (f->handle != handle && handle) 451 451 goto errout2; 452 - if (tb[TCA_RSVP_CLASSID-1]) { 453 - f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 452 + if (tb[TCA_RSVP_CLASSID]) { 453 + f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); 454 454 tcf_bind_filter(tp, &f->res, base); 455 455 } 456 456 ··· 462 462 err = -EINVAL; 463 463 if (handle) 464 464 goto errout2; 465 - if (tb[TCA_RSVP_DST-1] == NULL) 465 + if (tb[TCA_RSVP_DST] == NULL) 466 466 goto errout2; 467 467 468 468 err = -ENOBUFS; ··· 471 471 goto errout2; 472 472 473 473 h2 = 16; 474 - if (tb[TCA_RSVP_SRC-1]) { 475 - memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src)); 474 + if (tb[TCA_RSVP_SRC]) { 475 + memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src)); 476 476 h2 = hash_src(f->src); 477 477 } 478 - if (tb[TCA_RSVP_PINFO-1]) { 479 - pinfo = nla_data(tb[TCA_RSVP_PINFO-1]); 478 + if (tb[TCA_RSVP_PINFO]) { 479 + pinfo = nla_data(tb[TCA_RSVP_PINFO]); 480 480 f->spi = pinfo->spi; 481 481 f->tunnelhdr = pinfo->tunnelhdr; 482 482 } 483 - if (tb[TCA_RSVP_CLASSID-1]) 484 - f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 483 + if (tb[TCA_RSVP_CLASSID]) 484 + f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); 485 485 486 - dst = nla_data(tb[TCA_RSVP_DST-1]); 486 + dst = nla_data(tb[TCA_RSVP_DST]); 487 487 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0); 488 488 489 489 err = -ENOMEM; ··· 642 642 return -1; 643 643 } 644 644 645 - static struct tcf_proto_ops RSVP_OPS = { 646 - .next = NULL, 645 + static struct tcf_proto_ops RSVP_OPS __read_mostly = { 647 646 .kind = RSVP_ID, 648 647 .classify = rsvp_classify, 649 648 .init = rsvp_init,
+5
net/sctp/sm_sideeffect.c
··· 1689 1689 case SCTP_CMD_PURGE_ASCONF_QUEUE: 1690 1690 sctp_asconf_queue_teardown(asoc); 1691 1691 break; 1692 + 1693 + case SCTP_CMD_SET_ASOC: 1694 + asoc = cmd->obj.asoc; 1695 + break; 1696 + 1692 1697 default: 1693 1698 pr_warn("Impossible command: %u, %p\n", 1694 1699 cmd->verb, cmd->obj.ptr);
+6
net/sctp/sm_statefuns.c
··· 2047 2047 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 2048 2048 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 2049 2049 2050 + /* Restore association pointer to provide SCTP command interpeter 2051 + * with a valid context in case it needs to manipulate 2052 + * the queues */ 2053 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, 2054 + SCTP_ASOC((struct sctp_association *)asoc)); 2055 + 2050 2056 return retval; 2051 2057 2052 2058 nomem: