Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (67 commits)
cxgb4vf: recover from failure in cxgb4vf_open()
netfilter: ebtables: make broute table work again
netfilter: fix race in conntrack between dump_table and destroy
ah: reload pointers to skb data after calling skb_cow_data()
ah: update maximum truncated ICV length
xfrm: check trunc_len in XFRMA_ALG_AUTH_TRUNC
ehea: Increase the skb array usage
net/fec: remove config FEC2 as it's used nowhere
pcnet_cs: add new_id
tcp: disallow bind() to reuse addr/port
net/r8169: Update the function of parsing firmware
net: ppp: use {get,put}_unaligned_be{16,32}
CAIF: Fix IPv6 support in receive path for GPRS/3G
arp: allow to invalidate specific ARP entries
net_sched: factorize qdisc stats handling
mlx4: Call alloc_etherdev to allocate RX and TX queues
net: Add alloc_netdev_mqs function
caif: don't set connection request param size before copying data
cxgb4vf: fix mailbox data/control coherency domain race
qlcnic: change module parameter permissions
...

+2905 -1871
+1
Documentation/networking/dccp.txt
··· 167 seq_window = 100 168 The initial sequence window (sec. 7.5.2) of the sender. This influences 169 the local ackno validity and the remote seqno validity windows (7.5.1). 170 171 tx_qlen = 5 172 The size of the transmit buffer in packets. A value of 0 corresponds
··· 167 seq_window = 100 168 The initial sequence window (sec. 7.5.2) of the sender. This influences 169 the local ackno validity and the remote seqno validity windows (7.5.1). 170 + Values in the range Wmin = 32 (RFC 4340, 7.5.2) up to 2^32-1 can be set. 171 172 tx_qlen = 5 173 The size of the transmit buffer in packets. A value of 0 corresponds
+12 -7
drivers/atm/ambassador.c
··· 1926 const struct firmware *fw; 1927 unsigned long start_address; 1928 const struct ihex_binrec *rec; 1929 int res; 1930 - 1931 res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev); 1932 if (res) { 1933 PRINTK (KERN_ERR, "Cannot load microcode data"); ··· 1938 /* First record contains just the start address */ 1939 rec = (const struct ihex_binrec *)fw->data; 1940 if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) { 1941 - PRINTK (KERN_ERR, "Bad microcode data (no start record)"); 1942 - return -EINVAL; 1943 } 1944 start_address = be32_to_cpup((__be32 *)rec->data); 1945 ··· 1951 PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr), 1952 be16_to_cpu(rec->len)); 1953 if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) { 1954 - PRINTK (KERN_ERR, "Bad microcode data (record too long)"); 1955 - return -EINVAL; 1956 } 1957 if (be16_to_cpu(rec->len) & 3) { 1958 - PRINTK (KERN_ERR, "Bad microcode data (odd number of bytes)"); 1959 - return -EINVAL; 1960 } 1961 res = loader_write(lb, dev, rec); 1962 if (res) ··· 1971 res = loader_start(lb, dev, start_address); 1972 1973 return res; 1974 } 1975 1976 /********** give adapter parameters **********/
··· 1926 const struct firmware *fw; 1927 unsigned long start_address; 1928 const struct ihex_binrec *rec; 1929 + const char *errmsg = 0; 1930 int res; 1931 + 1932 res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev); 1933 if (res) { 1934 PRINTK (KERN_ERR, "Cannot load microcode data"); ··· 1937 /* First record contains just the start address */ 1938 rec = (const struct ihex_binrec *)fw->data; 1939 if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) { 1940 + errmsg = "no start record"; 1941 + goto fail; 1942 } 1943 start_address = be32_to_cpup((__be32 *)rec->data); 1944 ··· 1950 PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr), 1951 be16_to_cpu(rec->len)); 1952 if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) { 1953 + errmsg = "record too long"; 1954 + goto fail; 1955 } 1956 if (be16_to_cpu(rec->len) & 3) { 1957 + errmsg = "odd number of bytes"; 1958 + goto fail; 1959 } 1960 res = loader_write(lb, dev, rec); 1961 if (res) ··· 1970 res = loader_start(lb, dev, start_address); 1971 1972 return res; 1973 + fail: 1974 + release_firmware(fw); 1975 + PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg); 1976 + return -EINVAL; 1977 } 1978 1979 /********** give adapter parameters **********/
+1 -8
drivers/net/Kconfig
··· 1944 config FEC 1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ 1947 - MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 1948 select PHYLIB 1949 help 1950 Say Y here if you want to use the built-in 10/100 Fast ethernet 1951 controller on some Motorola ColdFire and Freescale i.MX processors. 1952 - 1953 - config FEC2 1954 - bool "Second FEC ethernet controller (on some ColdFire CPUs)" 1955 - depends on FEC 1956 - help 1957 - Say Y here if you want to use the second built-in 10/100 Fast 1958 - ethernet controller on some Motorola ColdFire processors. 1959 1960 config FEC_MPC52xx 1961 tristate "MPC52xx FEC driver"
··· 1944 config FEC 1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ 1947 + MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28 1948 select PHYLIB 1949 help 1950 Say Y here if you want to use the built-in 10/100 Fast ethernet 1951 controller on some Motorola ColdFire and Freescale i.MX processors. 1952 1953 config FEC_MPC52xx 1954 tristate "MPC52xx FEC driver"
+37 -37
drivers/net/bfin_mac.c
··· 8 * Licensed under the GPL-2 or later. 9 */ 10 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/kernel.h> ··· 46 47 #include "bfin_mac.h" 48 49 - #define DRV_NAME "bfin_mac" 50 - #define DRV_VERSION "1.1" 51 - #define DRV_AUTHOR "Bryan Wu, Luke Yang" 52 - #define DRV_DESC "Blackfin on-chip Ethernet MAC driver" 53 - 54 - MODULE_AUTHOR(DRV_AUTHOR); 55 MODULE_LICENSE("GPL"); 56 MODULE_DESCRIPTION(DRV_DESC); 57 MODULE_ALIAS("platform:bfin_mac"); ··· 189 /* allocate a new skb for next time receive */ 190 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 191 if (!new_skb) { 192 - printk(KERN_NOTICE DRV_NAME 193 - ": init: low on mem - packet dropped\n"); 194 goto init_error; 195 } 196 skb_reserve(new_skb, NET_IP_ALIGN); ··· 239 240 init_error: 241 desc_list_free(); 242 - printk(KERN_ERR DRV_NAME ": kmalloc failed\n"); 243 return -ENOMEM; 244 } 245 ··· 258 while ((bfin_read_EMAC_STAADD()) & STABUSY) { 259 udelay(1); 260 if (timeout_cnt-- < 0) { 261 - printk(KERN_ERR DRV_NAME 262 - ": wait MDC/MDIO transaction to complete timeout\n"); 263 return -ETIMEDOUT; 264 } 265 } ··· 348 opmode &= ~RMII_10; 349 break; 350 default: 351 - printk(KERN_WARNING 352 - "%s: Ack! Speed (%d) is not 10/100!\n", 353 - DRV_NAME, phydev->speed); 354 break; 355 } 356 bfin_write_EMAC_OPMODE(opmode); ··· 415 416 /* now we are supposed to have a proper phydev, to attach to... */ 417 if (!phydev) { 418 - printk(KERN_INFO "%s: Don't found any phy device at all\n", 419 - dev->name); 420 return -ENODEV; 421 } 422 423 if (phy_mode != PHY_INTERFACE_MODE_RMII && 424 phy_mode != PHY_INTERFACE_MODE_MII) { 425 - printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name); 426 return -EINVAL; 427 } 428 ··· 429 0, phy_mode); 430 431 if (IS_ERR(phydev)) { 432 - printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 433 return PTR_ERR(phydev); 434 } 435 ··· 450 lp->old_duplex = -1; 451 lp->phydev = phydev; 452 453 - printk(KERN_INFO "%s: attached PHY driver [%s] " 454 - "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" 455 - "@sclk=%dMHz)\n", 456 - DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq, 457 - MDC_CLK, mdc_div, sclk/1000000); 458 459 return 0; 460 } ··· 498 static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, 499 struct ethtool_drvinfo *info) 500 { 501 - strcpy(info->driver, DRV_NAME); 502 strcpy(info->version, DRV_VERSION); 503 strcpy(info->fw_version, "N/A"); 504 strcpy(info->bus_info, dev_name(&dev->dev)); ··· 558 }; 559 560 /**************************************************************************/ 561 - void setup_system_regs(struct net_device *dev) 562 { 563 struct bfin_mac_local *lp = netdev_priv(dev); 564 int i; ··· 587 bfin_write_EMAC_SYSCTL(sysctl); 588 589 bfin_write_EMAC_MMC_CTL(RSTC | CROLL); 590 591 /* Initialize the TX DMA channel registers */ 592 bfin_write_DMA2_X_COUNT(0); ··· 827 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) 828 udelay(1); 829 if (timeout_cnt == 0) 830 - printk(KERN_ERR DRV_NAME 831 - ": fails to timestamp the TX packet\n"); 832 else { 833 struct skb_shared_hwtstamps shhwtstamps; 834 u64 ns; ··· 1082 * we which case we simply drop the packet 1083 */ 1084 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { 1085 - printk(KERN_NOTICE DRV_NAME 1086 - ": rx: receive error - packet dropped\n"); 1087 dev->stats.rx_dropped++; 1088 goto out; 1089 } ··· 1092 1093 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 1094 if (!new_skb) { 1095 - printk(KERN_NOTICE DRV_NAME 1096 - ": rx: low on mem - packet dropped\n"); 1097 dev->stats.rx_dropped++; 1098 goto out; 1099 } ··· 1210 int ret; 1211 u32 opmode; 1212 1213 - pr_debug("%s: %s\n", DRV_NAME, __func__); 1214 1215 /* Set RX DMA */ 1216 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); ··· 1320 u32 sysctl; 1321 1322 if (dev->flags & IFF_PROMISC) { 1323 - printk(KERN_INFO "%s: set to promisc mode\n", dev->name); 1324 sysctl = bfin_read_EMAC_OPMODE(); 1325 sysctl |= PR; 1326 bfin_write_EMAC_OPMODE(sysctl); ··· 1390 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1391 */ 1392 if (!is_valid_ether_addr(dev->dev_addr)) { 1393 - printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n"); 1394 return -EINVAL; 1395 } 1396 ··· 1524 goto out_err_mii_probe; 1525 } 1526 1527 /* Fill in the fields of the device structure with ethernet values. */ 1528 ether_setup(ndev); 1529 ··· 1558 bfin_mac_hwtstamp_init(ndev); 1559 1560 /* now, print out the card info, in a short format.. */ 1561 - dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); 1562 1563 return 0; 1564 ··· 1650 * so set the GPIO pins to Ethernet mode 1651 */ 1652 pin_req = mii_bus_pd->mac_peripherals; 1653 - rc = peripheral_request_list(pin_req, DRV_NAME); 1654 if (rc) { 1655 dev_err(&pdev->dev, "Requesting peripherals failed!\n"); 1656 return rc; ··· 1739 .resume = bfin_mac_resume, 1740 .suspend = bfin_mac_suspend, 1741 .driver = { 1742 - .name = DRV_NAME, 1743 .owner = THIS_MODULE, 1744 }, 1745 };
··· 8 * Licensed under the GPL-2 or later. 9 */ 10 11 + #define DRV_VERSION "1.1" 12 + #define DRV_DESC "Blackfin on-chip Ethernet MAC driver" 13 + 14 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 + 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/kernel.h> ··· 41 42 #include "bfin_mac.h" 43 44 + MODULE_AUTHOR("Bryan Wu, Luke Yang"); 45 MODULE_LICENSE("GPL"); 46 MODULE_DESCRIPTION(DRV_DESC); 47 MODULE_ALIAS("platform:bfin_mac"); ··· 189 /* allocate a new skb for next time receive */ 190 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 191 if (!new_skb) { 192 + pr_notice("init: low on mem - packet dropped\n"); 193 goto init_error; 194 } 195 skb_reserve(new_skb, NET_IP_ALIGN); ··· 240 241 init_error: 242 desc_list_free(); 243 + pr_err("kmalloc failed\n"); 244 return -ENOMEM; 245 } 246 ··· 259 while ((bfin_read_EMAC_STAADD()) & STABUSY) { 260 udelay(1); 261 if (timeout_cnt-- < 0) { 262 + pr_err("wait MDC/MDIO transaction to complete timeout\n"); 263 return -ETIMEDOUT; 264 } 265 } ··· 350 opmode &= ~RMII_10; 351 break; 352 default: 353 + netdev_warn(dev, 354 + "Ack! Speed (%d) is not 10/100!\n", 355 + phydev->speed); 356 break; 357 } 358 bfin_write_EMAC_OPMODE(opmode); ··· 417 418 /* now we are supposed to have a proper phydev, to attach to... */ 419 if (!phydev) { 420 + netdev_err(dev, "no phy device found\n"); 421 return -ENODEV; 422 } 423 424 if (phy_mode != PHY_INTERFACE_MODE_RMII && 425 phy_mode != PHY_INTERFACE_MODE_MII) { 426 + netdev_err(dev, "invalid phy interface mode\n"); 427 return -EINVAL; 428 } 429 ··· 432 0, phy_mode); 433 434 if (IS_ERR(phydev)) { 435 + netdev_err(dev, "could not attach PHY\n"); 436 return PTR_ERR(phydev); 437 } 438 ··· 453 lp->old_duplex = -1; 454 lp->phydev = phydev; 455 456 + pr_info("attached PHY driver [%s] " 457 + "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", 458 + phydev->drv->name, dev_name(&phydev->dev), phydev->irq, 459 + MDC_CLK, mdc_div, sclk/1000000); 460 461 return 0; 462 } ··· 502 static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, 503 struct ethtool_drvinfo *info) 504 { 505 + strcpy(info->driver, KBUILD_MODNAME); 506 strcpy(info->version, DRV_VERSION); 507 strcpy(info->fw_version, "N/A"); 508 strcpy(info->bus_info, dev_name(&dev->dev)); ··· 562 }; 563 564 /**************************************************************************/ 565 + static void setup_system_regs(struct net_device *dev) 566 { 567 struct bfin_mac_local *lp = netdev_priv(dev); 568 int i; ··· 591 bfin_write_EMAC_SYSCTL(sysctl); 592 593 bfin_write_EMAC_MMC_CTL(RSTC | CROLL); 594 + 595 + /* Set vlan regs to let 1522 bytes long packets pass through */ 596 + bfin_write_EMAC_VLAN1(lp->vlan1_mask); 597 + bfin_write_EMAC_VLAN2(lp->vlan2_mask); 598 599 /* Initialize the TX DMA channel registers */ 600 bfin_write_DMA2_X_COUNT(0); ··· 827 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) 828 udelay(1); 829 if (timeout_cnt == 0) 830 + netdev_err(netdev, "timestamp the TX packet failed\n"); 831 else { 832 struct skb_shared_hwtstamps shhwtstamps; 833 u64 ns; ··· 1083 * we which case we simply drop the packet 1084 */ 1085 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { 1086 + netdev_notice(dev, "rx: receive error - packet dropped\n"); 1087 dev->stats.rx_dropped++; 1088 goto out; 1089 } ··· 1094 1095 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 1096 if (!new_skb) { 1097 + netdev_notice(dev, "rx: low on mem - packet dropped\n"); 1098 dev->stats.rx_dropped++; 1099 goto out; 1100 } ··· 1213 int ret; 1214 u32 opmode; 1215 1216 + pr_debug("%s\n", __func__); 1217 1218 /* Set RX DMA */ 1219 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); ··· 1323 u32 sysctl; 1324 1325 if (dev->flags & IFF_PROMISC) { 1326 + netdev_info(dev, "set promisc mode\n"); 1327 sysctl = bfin_read_EMAC_OPMODE(); 1328 sysctl |= PR; 1329 bfin_write_EMAC_OPMODE(sysctl); ··· 1393 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1394 */ 1395 if (!is_valid_ether_addr(dev->dev_addr)) { 1396 + netdev_warn(dev, "no valid ethernet hw addr\n"); 1397 return -EINVAL; 1398 } 1399 ··· 1527 goto out_err_mii_probe; 1528 } 1529 1530 + lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask; 1531 + lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask; 1532 + 1533 /* Fill in the fields of the device structure with ethernet values. */ 1534 ether_setup(ndev); 1535 ··· 1558 bfin_mac_hwtstamp_init(ndev); 1559 1560 /* now, print out the card info, in a short format.. */ 1561 + netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); 1562 1563 return 0; 1564 ··· 1650 * so set the GPIO pins to Ethernet mode 1651 */ 1652 pin_req = mii_bus_pd->mac_peripherals; 1653 + rc = peripheral_request_list(pin_req, KBUILD_MODNAME); 1654 if (rc) { 1655 dev_err(&pdev->dev, "Requesting peripherals failed!\n"); 1656 return rc; ··· 1739 .resume = bfin_mac_resume, 1740 .suspend = bfin_mac_suspend, 1741 .driver = { 1742 + .name = KBUILD_MODNAME, 1743 .owner = THIS_MODULE, 1744 }, 1745 };
+10 -1
drivers/net/bfin_mac.h
··· 17 #include <linux/etherdevice.h> 18 #include <linux/bfin_mac.h> 19 20 #define BFIN_MAC_CSUM_OFFLOAD 21 22 #define TX_RECLAIM_JIFFIES (HZ / 5) 23 ··· 75 */ 76 struct net_device_stats stats; 77 78 - unsigned char Mac[6]; /* MAC address of the board */ 79 spinlock_t lock; 80 81 int wol; /* Wake On Lan */ 82 int irq_wake_requested; 83 struct timer_list tx_reclaim_timer; 84 struct net_device *ndev; 85 86 /* MII and PHY stuffs */ 87 int old_link; /* used by bf537_adjust_link */
··· 17 #include <linux/etherdevice.h> 18 #include <linux/bfin_mac.h> 19 20 + /* 21 + * Disable hardware checksum for bug #5600 if writeback cache is 22 + * enabled. Otherwize, corrupted RX packet will be sent up stack 23 + * without error mark. 24 + */ 25 + #ifndef CONFIG_BFIN_EXTMEM_WRITEBACK 26 #define BFIN_MAC_CSUM_OFFLOAD 27 + #endif 28 29 #define TX_RECLAIM_JIFFIES (HZ / 5) 30 ··· 68 */ 69 struct net_device_stats stats; 70 71 spinlock_t lock; 72 73 int wol; /* Wake On Lan */ 74 int irq_wake_requested; 75 struct timer_list tx_reclaim_timer; 76 struct net_device *ndev; 77 + 78 + /* Data for EMAC_VLAN1 regs */ 79 + u16 vlan1_mask, vlan2_mask; 80 81 /* MII and PHY stuffs */ 82 int old_link; /* used by bf537_adjust_link */
+1
drivers/net/bnx2x/bnx2x.h
··· 636 637 #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) 638 #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) 639 640 int flash_size; 641 #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
··· 636 637 #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) 638 #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) 639 + #define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) 640 641 int flash_size; 642 #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
+570 -420
drivers/net/bnx2x/bnx2x_dump.h
··· 1 /* bnx2x_dump.h: Broadcom Everest network driver. 2 * 3 - * Copyright (c) 2009 Broadcom Corporation 4 * 5 - * This program is free software; you can redistribute it and/or modify 6 - * it under the terms of the GNU General Public License as published by 7 - * the Free Software Foundation. 8 */ 9 10 ··· 23 #define BNX2X_DUMP_H 24 25 26 struct dump_sign { 27 u32 time_stamp; 28 u32 diag_ver; 29 u32 grc_dump_ver; 30 }; 31 32 - #define TSTORM_WAITP_ADDR 0x1b8a80 33 - #define CSTORM_WAITP_ADDR 0x238a80 34 - #define XSTORM_WAITP_ADDR 0x2b8a80 35 - #define USTORM_WAITP_ADDR 0x338a80 36 - #define TSTORM_CAM_MODE 0x1b1440 37 - 38 - #define RI_E1 0x1 39 - #define RI_E1H 0x2 40 - #define RI_E2 0x4 41 - #define RI_ONLINE 0x100 42 - #define RI_PATH0_DUMP 0x200 43 - #define RI_PATH1_DUMP 0x400 44 - #define RI_E1_OFFLINE (RI_E1) 45 - #define RI_E1_ONLINE (RI_E1 | RI_ONLINE) 46 - #define RI_E1H_OFFLINE (RI_E1H) 47 - #define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) 48 - #define RI_E2_OFFLINE (RI_E2) 49 - #define RI_E2_ONLINE (RI_E2 | RI_ONLINE) 50 - #define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) 51 - #define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) 52 - #define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) 53 - #define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) 54 - #define RI_E1E2_OFFLINE (RI_E2 | RI_E1) 55 - #define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) 56 - #define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2) 57 - #define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) 58 - 59 - #define MAX_TIMER_PENDING 200 60 - #define TIMER_SCAN_DONT_CARE 0xFF 61 - 62 - 63 struct dump_hdr { 64 - u32 hdr_size; /* in dwords, excluding this field */ 65 - struct dump_sign dump_sign; 66 - u32 xstorm_waitp; 67 - u32 tstorm_waitp; 68 - u32 ustorm_waitp; 69 - u32 cstorm_waitp; 70 - u16 info; 71 - u8 idle_chk; 72 - u8 reserved; 73 }; 74 75 struct reg_addr { ··· 86 u16 info; 87 }; 88 89 - 90 - #define REGS_COUNT 558 91 static const struct reg_addr reg_addrs[REGS_COUNT] = { 92 { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, 93 { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, 94 - { 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE }, 95 - { 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE }, 96 - { 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE }, 97 - { 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE }, 98 - { 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE }, 99 - { 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE }, 100 - { 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE }, 101 - { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE }, 102 - { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE }, 103 - { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE }, 104 - { 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE }, 105 - { 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE }, 106 - { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE }, 107 - { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE }, 108 - { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE }, 109 - { 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE }, 110 - { 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, 111 - { 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE }, 112 - { 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE }, 113 - { 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE }, 114 - { 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE }, 115 - { 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE }, 116 - { 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE }, 117 - { 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE }, 118 - { 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE }, 119 - { 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE }, 120 - { 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE }, 121 - { 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE }, 122 - { 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE }, 123 - { 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE }, 124 - { 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE }, 125 - { 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE }, 126 - { 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE }, 127 - { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE }, 128 - { 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE }, 129 - { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE }, 130 - { 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE }, 131 - { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE }, 132 - { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE }, 133 - { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE }, 134 - { 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE }, 135 - { 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE }, 136 - { 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE }, 137 - { 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE }, 138 - { 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, 139 { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, 140 { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, 141 - { 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE }, 142 - { 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE }, 143 - { 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE }, 144 - { 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE }, 145 - { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE }, 146 - { 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE }, 147 - { 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE }, 148 - { 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE }, 149 - { 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE }, 150 - { 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE }, 151 { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, 152 { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, 153 - { 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE }, 154 - { 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE }, 155 - { 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, 156 { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, 157 - { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE }, 158 - { 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE }, 159 - { 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE }, 160 - { 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE }, 161 - { 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE }, 162 - { 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE }, 163 - { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE }, 164 - { 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE }, 165 - { 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE }, 166 - { 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE }, 167 { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, 168 { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, 169 - { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE }, 170 - { 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE }, 171 - { 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE }, 172 - { 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE }, 173 { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, 174 { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, 175 - { 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE }, 176 - { 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE }, 177 - { 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE }, 178 - { 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE }, 179 - { 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE }, 180 - { 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE }, 181 - { 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE }, 182 - { 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE }, 183 - { 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, 184 - { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE }, 185 { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE }, 186 { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE }, 187 { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE }, 188 { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE }, 189 - { 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE }, 190 - { 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE }, 191 - { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE }, 192 - { 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE }, 193 { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE }, 194 - { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE }, 195 - { 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE }, 196 - { 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE }, 197 { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE }, 198 - { 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE }, 199 - { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE }, 200 - { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE }, 201 - { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE }, 202 - { 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE }, 203 - { 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE }, 204 - { 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE }, 205 - { 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE }, 206 - { 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE }, 207 - { 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE }, 208 - { 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE }, 209 - { 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE }, 210 - { 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE }, 211 - { 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, 212 - { 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE }, 213 - { 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, 214 - { 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE }, 215 - { 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, 216 - { 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE }, 217 - { 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE }, 218 - { 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE }, 219 - { 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE }, 220 - { 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE }, 221 - { 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE }, 222 - { 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE }, 223 - { 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE }, 224 - { 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE }, 225 - { 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE }, 226 - { 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE }, 227 - { 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE }, 228 - { 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE }, 229 - { 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE }, 230 - { 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE }, 231 - { 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE }, 232 - { 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE }, 233 - { 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE }, 234 - { 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE }, 235 - { 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE }, 236 - { 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE }, 237 - { 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE }, 238 - { 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE }, 239 - { 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE }, 240 - { 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE }, 241 - { 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE }, 242 - { 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE }, 243 - { 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE }, 244 - { 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE }, 245 - { 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE }, 246 - { 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE }, 247 - { 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE }, 248 - { 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE }, 249 - { 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE }, 250 - { 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE }, 251 - { 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE }, 252 - { 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE }, 253 - { 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE }, 254 - { 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE }, 255 - { 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE }, 256 - { 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE }, 257 - { 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE }, 258 - { 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE }, 259 - { 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE }, 260 - { 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE }, 261 - { 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE }, 262 - { 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE }, 263 - { 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE }, 264 - { 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE }, 265 - { 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE }, 266 - { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE }, 267 - { 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE }, 268 - { 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE }, 269 - { 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE }, 270 - { 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE }, 271 - { 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE }, 272 - { 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE }, 273 - { 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE }, 274 - { 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE }, 275 - { 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE }, 276 - { 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE }, 277 - { 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE }, 278 - { 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE }, 279 - { 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE }, 280 - { 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE }, 281 - { 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE }, 282 - { 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE }, 283 - { 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE }, 284 - { 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE }, 285 { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, 286 { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, 287 { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, ··· 273 { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, 274 { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, 275 { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, 276 - { 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE }, 277 - { 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE }, 278 { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, 279 - { 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE }, 280 - { 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE }, 281 - { 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE }, 282 - { 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE }, 283 - { 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE }, 284 - { 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE }, 285 - { 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, 286 - { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE }, 287 - { 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE }, 288 - { 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE }, 289 - { 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE }, 290 - { 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE }, 291 - { 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE }, 292 - { 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE }, 293 - { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE }, 294 - { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE }, 295 { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, 296 - { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE }, 297 - { 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE }, 298 - { 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE }, 299 - { 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE }, 300 - { 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE }, 301 - { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE }, 302 - { 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, 303 { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE }, 304 - { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, 305 - { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE }, 306 - { 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE }, 307 - { 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE }, 308 - { 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE }, 309 - { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE }, 310 - { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE }, 311 - { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE }, 312 - { 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE }, 313 - { 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE }, 314 - { 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE }, 315 - { 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE }, 316 - { 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, 317 - { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE }, 318 - { 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE }, 319 - { 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE }, 320 - { 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE }, 321 - { 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE }, 322 - { 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE }, 323 - { 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE }, 324 - { 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE }, 325 - { 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE }, 326 - { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE }, 327 - { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE }, 328 - { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE }, 329 - { 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE }, 330 - { 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE }, 331 - { 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE }, 332 - { 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, 333 - { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE }, 334 - { 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE }, 335 - { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE }, 336 - { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE }, 337 - { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, 338 - { 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE }, 339 - { 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE }, 340 - { 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE }, 341 { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE }, 342 - { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE }, 343 { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, 344 - { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE }, 345 - { 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE }, 346 - { 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE }, 347 - { 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE }, 348 - { 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE }, 349 - { 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE }, 350 - { 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE }, 351 { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE }, 352 - { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE }, 353 - { 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE } 354 }; 355 356 - 357 - #define IDLE_REGS_COUNT 277 358 static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { 359 - { 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE }, 360 - { 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, 361 - { 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE }, 362 { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE }, 363 - { 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE }, 364 - { 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE }, 365 - { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE }, 366 - { 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE }, 367 - { 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE }, 368 - { 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE }, 369 - { 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE }, 370 - { 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE }, 371 - { 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE }, 372 - { 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE }, 373 - { 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE }, 374 - { 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE }, 375 - { 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE }, 376 - { 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE }, 377 - { 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE }, 378 - { 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE }, 379 - { 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE }, 380 - { 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE }, 381 - { 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE }, 382 - { 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE }, 383 - { 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE }, 384 - { 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE }, 385 - { 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE }, 386 - { 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE }, 387 - { 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE }, 388 - { 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE }, 389 - { 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE }, 390 - { 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE }, 391 - { 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE }, 392 - { 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE }, 393 - { 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE }, 394 - { 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE }, 395 - { 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE }, 396 { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, 397 { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, 398 { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, 399 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE }, 400 - { 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE }, 401 - { 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE }, 402 - { 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE }, 403 - { 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE }, 404 - { 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE }, 405 - { 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, 406 - { 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, 407 - { 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, 408 - { 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE }, 409 - { 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE }, 410 - { 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE }, 411 - { 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE }, 412 - { 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE }, 413 - { 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE }, 414 - { 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE }, 415 - { 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE }, 416 - { 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE }, 417 - { 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE }, 418 - { 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE }, 419 - { 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE }, 420 - { 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE }, 421 - { 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE }, 422 - { 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE }, 423 - { 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE }, 424 - { 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE }, 425 - { 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE }, 426 - { 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE }, 427 - { 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE }, 428 - { 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE }, 429 - { 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE }, 430 - { 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE }, 431 - { 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE }, 432 - { 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE }, 433 - { 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE }, 434 - { 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE }, 435 - { 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE }, 436 - { 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE }, 437 - { 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE }, 438 - { 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE }, 439 { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE }, 440 { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE }, 441 { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE }, ··· 580 { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE }, 581 { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE }, 582 { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE }, 583 - { 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE }, 584 - { 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE }, 585 - { 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE }, 586 - { 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE }, 587 - { 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE }, 588 - { 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE }, 589 - { 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE }, 590 - { 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE }, 591 - { 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE }, 592 - { 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE }, 593 - { 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE }, 594 - { 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE }, 595 - { 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE }, 596 - { 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE }, 597 { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, 598 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, 599 - { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE }, 600 - { 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE }, 601 - { 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE }, 602 - { 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE }, 603 - { 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE }, 604 - { 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE }, 605 - { 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE }, 606 - { 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, 607 - { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, 608 - { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE }, 609 - { 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE }, 610 - { 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE }, 611 - { 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE }, 612 - { 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE }, 613 - { 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE }, 614 - { 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE }, 615 - { 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE }, 616 - { 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE }, 617 - { 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE }, 618 - { 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE }, 619 - { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE }, 620 - { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE }, 621 { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE }, 622 { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE }, 623 - { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE }, 624 - { 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE }, 625 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, 626 { 0x3380c0, 1, RI_ALL_ONLINE } 627 }; ··· 634 static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = { 635 { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } 636 }; 637 - 638 639 #define WREGS_COUNT_E1H 1 640 static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; ··· 649 { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } 650 }; 651 652 - static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 }; 653 - 654 655 #define TIMER_REGS_COUNT_E1 2 656 - static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = 657 - { 0x164014, 0x164018 }; 658 - static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = 659 - { 0x1640d0, 0x1640d4 }; 660 661 662 #define TIMER_REGS_COUNT_E1H 2 663 - static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = 664 - { 0x164014, 0x164018 }; 665 - static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = 666 - { 0x1640d0, 0x1640d4 }; 667 668 669 #define PAGE_MODE_VALUES_E2 2 670
··· 1 /* bnx2x_dump.h: Broadcom Everest network driver. 2 * 3 + * Copyright (c) 2011 Broadcom Corporation 4 * 5 + * Unless you and Broadcom execute a separate written software license 6 + * agreement governing use of this software, this software is licensed to you 7 + * under the terms of the GNU General Public License version 2, available 8 + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 + * 10 + * Notwithstanding the above, under no circumstances may you combine this 11 + * software in any way with any other Broadcom software provided under a 12 + * license other than the GPL, without Broadcom's express prior written 13 + * consent. 14 */ 15 16 ··· 17 #define BNX2X_DUMP_H 18 19 20 + 21 + /*definitions */ 22 + #define XSTORM_WAITP_ADDR 0x2b8a80 23 + #define TSTORM_WAITP_ADDR 0x1b8a80 24 + #define USTORM_WAITP_ADDR 0x338a80 25 + #define CSTORM_WAITP_ADDR 0x238a80 26 + #define TSTORM_CAM_MODE 0x1B1440 27 + 28 + #define MAX_TIMER_PENDING 200 29 + #define TIMER_SCAN_DONT_CARE 0xFF 30 + #define RI_E1 0x1 31 + #define RI_E1H 0x2 32 + #define RI_E2 0x4 33 + #define RI_ONLINE 0x100 34 + #define RI_PATH0_DUMP 0x200 35 + #define RI_PATH1_DUMP 0x400 36 + #define RI_E1_OFFLINE (RI_E1) 37 + #define RI_E1_ONLINE (RI_E1 | RI_ONLINE) 38 + #define RI_E1H_OFFLINE (RI_E1H) 39 + #define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) 40 + #define RI_E2_OFFLINE (RI_E2) 41 + #define RI_E2_ONLINE (RI_E2 | RI_ONLINE) 42 + #define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) 43 + #define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) 44 + #define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) 45 + #define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) 46 + #define RI_E1E2_OFFLINE (RI_E2 | RI_E1) 47 + #define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) 48 + #define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2) 49 + #define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) 50 + 51 struct dump_sign { 52 u32 time_stamp; 53 u32 diag_ver; 54 u32 grc_dump_ver; 55 }; 56 57 struct dump_hdr { 58 + u32 hdr_size; /* in dwords, excluding this field */ 59 + struct dump_sign dump_sign; 60 + u32 xstorm_waitp; 61 + u32 tstorm_waitp; 62 + u32 ustorm_waitp; 63 + u32 cstorm_waitp; 64 + u16 info; 65 + u8 idle_chk; 66 + u8 reserved; 67 }; 68 69 struct reg_addr { ··· 80 u16 info; 81 }; 82 83 + #define REGS_COUNT 834 84 static const struct reg_addr reg_addrs[REGS_COUNT] = { 85 { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, 86 { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, 87 + { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE }, 88 + { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE }, 89 + { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE }, 90 + { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE }, 91 + { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE }, 92 + { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE }, 93 + { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE }, 94 + { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE }, 95 + { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE }, 96 + { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE }, 97 + { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE }, 98 + { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE }, 99 + { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE }, 100 + { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE }, 101 + { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE }, 102 + { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE }, 103 + { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE }, 104 + { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE }, 105 + { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE }, 106 + { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE }, 107 + { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE }, 108 + { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE }, 109 + { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE }, 110 + { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE }, 111 + { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE }, 112 + { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE }, 113 + { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, 114 + { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE }, 115 + { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE }, 116 + { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE }, 117 + { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE }, 118 + { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE }, 119 + { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE }, 120 + { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE }, 121 + { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE }, 122 + { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE }, 123 + { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE }, 124 + { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE }, 125 + { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE }, 126 + { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE }, 127 + { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE }, 128 + { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE }, 129 + { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE }, 130 + { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE }, 131 + { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE }, 132 + { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE }, 133 + { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE }, 134 + { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE }, 135 + { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE }, 136 + { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE }, 137 + { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE }, 138 + { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE }, 139 + { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE }, 140 + { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE }, 141 + { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE }, 142 + { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE }, 143 + { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE }, 144 + { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE }, 145 + { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE }, 146 + { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, 147 { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, 148 { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, 149 + { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE }, 150 + { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE }, 151 + { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE }, 152 + { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE }, 153 + { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE }, 154 + { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE }, 155 + { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE }, 156 + { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE }, 157 + { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE }, 158 + { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE }, 159 + { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE }, 160 + { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE }, 161 + { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE }, 162 { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, 163 { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, 164 + { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE }, 165 + { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE }, 166 + { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE }, 167 + { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE }, 168 + { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE }, 169 + { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE }, 170 + { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, 171 + { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE }, 172 { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, 173 + { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE }, 174 + { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE }, 175 + { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE }, 176 + { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE }, 177 + { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE }, 178 + { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE }, 179 { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, 180 { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, 181 + { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE }, 182 + { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE }, 183 + { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE }, 184 + { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE }, 185 { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, 186 { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, 187 + { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE }, 188 + { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE }, 189 + { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE }, 190 + { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE }, 191 + { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE }, 192 + { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE }, 193 + { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE }, 194 + { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE }, 195 + { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, 196 + { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE }, 197 { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE }, 198 { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE }, 199 { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE }, 200 { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE }, 201 + { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE }, 202 + { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE }, 203 + { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE }, 204 { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE }, 205 + { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE }, 206 + { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE }, 207 + { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE }, 208 + { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE }, 209 { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE }, 210 + { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE }, 211 + { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE }, 212 + { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE }, 213 + { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE }, 214 + { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE }, 215 + { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE }, 216 + { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE }, 217 + { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE }, 218 + { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE }, 219 + { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE }, 220 + { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE }, 221 + { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE }, 222 + { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE }, 223 + { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE }, 224 + { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE }, 225 + { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE }, 226 + { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE }, 227 + { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE }, 228 + { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE }, 229 + { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE }, 230 + { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE }, 231 + { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE }, 232 + { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE }, 233 + { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE }, 234 + { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE }, 235 + { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE }, 236 + { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE }, 237 + { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE }, 238 + { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE }, 239 + { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE }, 240 + { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE }, 241 + { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE }, 242 + { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE }, 243 + { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE }, 244 + { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE }, 245 + { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE }, 246 + { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE }, 247 + { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE }, 248 + { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE }, 249 + { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE }, 250 + { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE }, 251 + { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE }, 252 + { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE }, 253 + { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE }, 254 + { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE }, 255 + { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE }, 256 + { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE }, 257 + { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE }, 258 + { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE }, 259 + { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE }, 260 + { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE }, 261 + { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE }, 262 { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, 263 { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, 264 { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, ··· 284 { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, 285 { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, 286 { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, 287 + { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE }, 288 { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, 289 + { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE }, 290 + { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE }, 291 + { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE }, 292 + { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE }, 293 + { 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE }, 294 + { 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE }, 295 + { 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE }, 296 + { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE }, 297 + { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE }, 298 + { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE }, 299 + { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, 300 + { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE }, 301 + { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE }, 302 + { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE }, 303 + { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE }, 304 + { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE }, 305 + { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE }, 306 + { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE }, 307 + { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE }, 308 + { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE }, 309 + { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE }, 310 + { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE }, 311 + { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE }, 312 + { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE }, 313 + { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE }, 314 + { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE }, 315 + { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE }, 316 { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, 317 + { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE }, 318 + { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE }, 319 + { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE }, 320 + { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE }, 321 + { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE }, 322 + { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE }, 323 + { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE }, 324 + { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE }, 325 + { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE }, 326 + { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE }, 327 + { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE }, 328 + { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE }, 329 + { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, 330 + { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, 331 + { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE }, 332 + { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE }, 333 + { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, 334 { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE }, 335 + { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE }, 336 + { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE }, 337 + { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE }, 338 + { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE }, 339 + { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE }, 340 + { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE }, 341 + { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE }, 342 + { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE }, 343 + { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE }, 344 + { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE }, 345 + { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, 346 + { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE }, 347 + { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE }, 348 + { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE }, 349 + { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE }, 350 + { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE }, 351 + { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE }, 352 + { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE }, 353 + { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE }, 354 + { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE }, 355 + { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE }, 356 + { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE }, 357 + { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE }, 358 + { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE }, 359 + { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE }, 360 + { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE }, 361 + { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE }, 362 + { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE }, 363 + { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE }, 364 + { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE }, 365 + { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE }, 366 + { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE }, 367 + { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE }, 368 + { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE }, 369 + { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE }, 370 + { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE}, 371 + { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE }, 372 + { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE }, 373 + { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE }, 374 + { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE }, 375 + { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE }, 376 + { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE }, 377 + { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE }, 378 + { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE }, 379 + { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, 380 + { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE }, 381 + { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE }, 382 + { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE }, 383 + { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE }, 384 + { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE }, 385 + { 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE }, 386 + { 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE }, 387 + { 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE }, 388 + { 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE }, 389 + { 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE }, 390 + { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE }, 391 + { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE }, 392 + { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE }, 393 + { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE }, 394 + { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE }, 395 + { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE }, 396 + { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE }, 397 + { 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE }, 398 + { 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE }, 399 + { 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE }, 400 + { 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE }, 401 + { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE }, 402 + { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE }, 403 + { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE }, 404 + { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE }, 405 + { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE }, 406 + { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE }, 407 + { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE }, 408 + { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE }, 409 + { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE }, 410 + { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE }, 411 + { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE }, 412 + { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE }, 413 + { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE }, 414 + { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE }, 415 + { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE }, 416 + { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE }, 417 + { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE }, 418 + { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE }, 419 + { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE }, 420 + { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE }, 421 + { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE}, 422 + { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE }, 423 + { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE }, 424 + { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE }, 425 + { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE }, 426 + { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE }, 427 + { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE }, 428 + { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE }, 429 + { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE }, 430 + { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, 431 + { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE }, 432 + { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE }, 433 + { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE }, 434 + { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE }, 435 + { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE }, 436 + { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE }, 437 + { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE }, 438 + { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE }, 439 + { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE }, 440 + { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE }, 441 + { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE }, 442 + { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE }, 443 + { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE }, 444 + { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE }, 445 + { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE }, 446 + { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE }, 447 + { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE }, 448 + { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE }, 449 + { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE }, 450 + { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE }, 451 + { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE }, 452 + { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE }, 453 + { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE }, 454 + { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE }, 455 + { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE }, 456 + { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE }, 457 + { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE }, 458 + { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE }, 459 + { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE }, 460 + { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE }, 461 + { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE }, 462 + { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE }, 463 + { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE }, 464 + { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE }, 465 + { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, 466 + { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE }, 467 { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE }, 468 + { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE }, 469 + { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE }, 470 + { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE }, 471 + { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE }, 472 { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, 473 + { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE }, 474 + { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE }, 475 + { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE }, 476 + { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE }, 477 + { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE }, 478 + { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE }, 479 + { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE }, 480 + { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE }, 481 + { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE }, 482 + { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE }, 483 + { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE }, 484 + { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE }, 485 + { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE }, 486 + { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, 487 + { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE }, 488 + { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE }, 489 + { 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE }, 490 { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE }, 491 + { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE }, 492 + { 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE }, 493 + { 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE }, 494 + { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE }, 495 + { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE }, 496 + { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE }, 497 + { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE }, 498 + { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE }, 499 + { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE }, 500 + { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE }, 501 + { 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE }, 502 + { 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE }, 503 + { 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE }, 504 + { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE }, 505 + { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE }, 506 + { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE }, 507 + { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE }, 508 + { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE }, 509 + { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE }, 510 + { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE }, 511 + { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE }, 512 + { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE }, 513 + { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE }, 514 + { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE }, 515 + { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE }, 516 + { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }, 517 }; 518 519 + #define IDLE_REGS_COUNT 237 520 static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { 521 + { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE }, 522 + { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, 523 + { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE }, 524 + { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE }, 525 + { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE }, 526 + { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE }, 527 + { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE }, 528 + { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE }, 529 { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE }, 530 + { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE }, 531 + { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE }, 532 + { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE }, 533 + { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE }, 534 + { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE }, 535 + { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE }, 536 + { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE }, 537 + { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE }, 538 + { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE }, 539 + { 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE }, 540 + { 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE }, 541 + { 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE }, 542 + { 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE }, 543 + { 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE }, 544 + { 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE }, 545 + { 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE }, 546 + { 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE }, 547 + { 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE }, 548 + { 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE }, 549 + { 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE }, 550 + { 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE }, 551 + { 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE }, 552 + { 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE }, 553 + { 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE }, 554 + { 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE }, 555 + { 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE }, 556 + { 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE }, 557 + { 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE }, 558 + { 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE }, 559 + { 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE }, 560 + { 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE }, 561 + { 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE }, 562 + { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE }, 563 + { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE }, 564 + { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE }, 565 + { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE }, 566 + { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE }, 567 { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, 568 { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, 569 { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, 570 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE }, 571 + { 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE }, 572 + { 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE }, 573 + { 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE }, 574 + { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE }, 575 + { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE }, 576 + { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE }, 577 + { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE }, 578 + { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE }, 579 { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE }, 580 { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE }, 581 { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE }, ··· 462 { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE }, 463 { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE }, 464 { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE }, 465 + { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE }, 466 + { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE }, 467 + { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE }, 468 + { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE }, 469 + { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE }, 470 + { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE }, 471 + { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE }, 472 + { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE }, 473 + { 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE }, 474 + { 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE }, 475 + { 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE }, 476 + { 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE }, 477 + { 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE }, 478 + { 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE }, 479 + { 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE }, 480 + { 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE }, 481 + { 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE }, 482 + { 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE }, 483 + { 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE }, 484 { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, 485 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, 486 + { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE }, 487 + { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE }, 488 + { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE }, 489 + { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE }, 490 + { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE }, 491 + { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE }, 492 + { 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE }, 493 + { 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE }, 494 + { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE }, 495 + { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE }, 496 + { 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE }, 497 + { 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE }, 498 + { 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE }, 499 + { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE }, 500 + { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE }, 501 + { 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE }, 502 + { 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE }, 503 + { 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE }, 504 + { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE }, 505 + { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE }, 506 { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE }, 507 { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE }, 508 + { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE }, 509 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, 510 { 0x3380c0, 1, RI_ALL_ONLINE } 511 }; ··· 514 static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = { 515 { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } 516 }; 517 518 #define WREGS_COUNT_E1H 1 519 static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; ··· 530 { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } 531 }; 532 533 + static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a }; 534 535 #define TIMER_REGS_COUNT_E1 2 536 537 + static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = { 538 + 0x164014, 0x164018 }; 539 + static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = { 540 + 0x1640d0, 0x1640d4 }; 541 542 #define TIMER_REGS_COUNT_E1H 2 543 544 + static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = { 545 + 0x164014, 0x164018 }; 546 + static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = { 547 + 0x1640d0, 0x1640d4 }; 548 + 549 + #define TIMER_REGS_COUNT_E2 2 550 + 551 + static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = { 552 + 0x164014, 0x164018 }; 553 + static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = { 554 + 0x1640d0, 0x1640d4 }; 555 + 556 + #define PAGE_MODE_VALUES_E1 0 557 + 558 + #define PAGE_READ_REGS_E1 0 559 + 560 + #define PAGE_WRITE_REGS_E1 0 561 + 562 + static const u32 page_vals_e1[] = { 0 }; 563 + 564 + static const u32 page_write_regs_e1[] = { 0 }; 565 + 566 + static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } }; 567 + 568 + #define PAGE_MODE_VALUES_E1H 0 569 + 570 + #define PAGE_READ_REGS_E1H 0 571 + 572 + #define PAGE_WRITE_REGS_E1H 0 573 + 574 + static const u32 page_vals_e1h[] = { 0 }; 575 + 576 + static const u32 page_write_regs_e1h[] = { 0 }; 577 + 578 + static const struct reg_addr page_read_regs_e1h[] = { 579 + { 0x0, 0, RI_E1H_ONLINE } }; 580 581 #define PAGE_MODE_VALUES_E2 2 582
+21 -1
drivers/net/bnx2x/bnx2x_ethtool.c
··· 24 #include "bnx2x.h" 25 #include "bnx2x_cmn.h" 26 #include "bnx2x_dump.h" 27 28 /* Note: in the format strings below %s is replaced by the queue-name which is 29 * either its index or 'fcoe' for the fcoe queue. Make sure the format string ··· 473 { 474 struct bnx2x *bp = netdev_priv(dev); 475 int regdump_len = 0; 476 - int i; 477 478 if (CHIP_IS_E1(bp)) { 479 for (i = 0; i < REGS_COUNT; i++) ··· 503 if (IS_E2_ONLINE(wreg_addrs_e2[i].info)) 504 regdump_len += wreg_addrs_e2[i].size * 505 (1 + wreg_addrs_e2[i].read_regs_count); 506 } 507 regdump_len *= 4; 508 regdump_len += sizeof(struct dump_hdr); ··· 548 549 if (!netif_running(bp->dev)) 550 return; 551 552 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; 553 dump_hdr.dump_sign = dump_sign_all; ··· 596 597 bnx2x_read_pages_regs_e2(bp, p); 598 } 599 } 600 601 #define PHY_FW_VER_LEN 20
··· 24 #include "bnx2x.h" 25 #include "bnx2x_cmn.h" 26 #include "bnx2x_dump.h" 27 + #include "bnx2x_init.h" 28 29 /* Note: in the format strings below %s is replaced by the queue-name which is 30 * either its index or 'fcoe' for the fcoe queue. Make sure the format string ··· 472 { 473 struct bnx2x *bp = netdev_priv(dev); 474 int regdump_len = 0; 475 + int i, j, k; 476 477 if (CHIP_IS_E1(bp)) { 478 for (i = 0; i < REGS_COUNT; i++) ··· 502 if (IS_E2_ONLINE(wreg_addrs_e2[i].info)) 503 regdump_len += wreg_addrs_e2[i].size * 504 (1 + wreg_addrs_e2[i].read_regs_count); 505 + 506 + for (i = 0; i < PAGE_MODE_VALUES_E2; i++) 507 + for (j = 0; j < PAGE_WRITE_REGS_E2; j++) { 508 + for (k = 0; k < PAGE_READ_REGS_E2; k++) 509 + if (IS_E2_ONLINE(page_read_regs_e2[k]. 510 + info)) 511 + regdump_len += 512 + page_read_regs_e2[k].size; 513 + } 514 } 515 regdump_len *= 4; 516 regdump_len += sizeof(struct dump_hdr); ··· 538 539 if (!netif_running(bp->dev)) 540 return; 541 + 542 + /* Disable parity attentions as long as following dump may 543 + * cause false alarms by reading never written registers. We 544 + * will re-enable parity attentions right after the dump. 545 + */ 546 + bnx2x_disable_blocks_parity(bp); 547 548 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; 549 dump_hdr.dump_sign = dump_sign_all; ··· 580 581 bnx2x_read_pages_regs_e2(bp, p); 582 } 583 + /* Re-enable parity attentions */ 584 + bnx2x_clear_blocks_parity(bp); 585 + if (CHIP_PARITY_ENABLED(bp)) 586 + bnx2x_enable_blocks_parity(bp); 587 } 588 589 #define PHY_FW_VER_LEN 20
+220
drivers/net/bnx2x/bnx2x_init.h
··· 192 u64 next; 193 }; 194 195 #endif /* BNX2X_INIT_H */ 196
··· 192 u64 next; 193 }; 194 195 + /**************************************************************************** 196 + * Parity configuration 197 + ****************************************************************************/ 198 + #define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \ 199 + { \ 200 + block##_REG_##block##_PRTY_MASK, \ 201 + block##_REG_##block##_PRTY_STS_CLR, \ 202 + en_mask, {m1, m1h, m2}, #block \ 203 + } 204 + 205 + #define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \ 206 + { \ 207 + block##_REG_##block##_PRTY_MASK_0, \ 208 + block##_REG_##block##_PRTY_STS_CLR_0, \ 209 + en_mask, {m1, m1h, m2}, #block"_0" \ 210 + } 211 + 212 + #define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \ 213 + { \ 214 + block##_REG_##block##_PRTY_MASK_1, \ 215 + block##_REG_##block##_PRTY_STS_CLR_1, \ 216 + en_mask, {m1, m1h, m2}, #block"_1" \ 217 + } 218 + 219 + static const struct { 220 + u32 mask_addr; 221 + u32 sts_clr_addr; 222 + u32 en_mask; /* Mask to enable parity attentions */ 223 + struct { 224 + u32 e1; /* 57710 */ 225 + u32 e1h; /* 57711 */ 226 + u32 e2; /* 57712 */ 227 + } reg_mask; /* Register mask (all valid bits) */ 228 + char name[7]; /* Block's longest name is 6 characters long 229 + * (name + suffix) 230 + */ 231 + } bnx2x_blocks_parity_data[] = { 232 + /* bit 19 masked */ 233 + /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */ 234 + /* bit 5,18,20-31 */ 235 + /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */ 236 + /* bit 5 */ 237 + /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */ 238 + /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */ 239 + /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */ 240 + 241 + /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't 242 + * want to handle "system kill" flow at the moment. 243 + */ 244 + BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), 245 + BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), 246 + BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), 247 + BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), 248 + BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff), 249 + BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1), 250 + BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff), 251 + BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3), 252 + {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 253 + GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0, 254 + {0xf, 0xf, 0xf}, "UPB"}, 255 + {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 256 + GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0, 257 + {0xf, 0xf, 0xf}, "XPB"}, 258 + BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7), 259 + BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f), 260 + BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf), 261 + BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1), 262 + BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf), 263 + BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf), 264 + BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff), 265 + BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff), 266 + BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), 267 + BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff), 268 + BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), 269 + BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), 270 + BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f), 271 + BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), 272 + BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f), 273 + BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), 274 + BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f), 275 + BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), 276 + BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f), 277 + }; 278 + 279 + 280 + /* [28] MCP Latched rom_parity 281 + * [29] MCP Latched ump_rx_parity 282 + * [30] MCP Latched ump_tx_parity 283 + * [31] MCP Latched scpad_parity 284 + */ 285 + #define MISC_AEU_ENABLE_MCP_PRTY_BITS \ 286 + (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ 287 + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ 288 + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ 289 + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) 290 + 291 + /* Below registers control the MCP parity attention output. When 292 + * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are 293 + * enabled, when cleared - disabled. 294 + */ 295 + static const u32 mcp_attn_ctl_regs[] = { 296 + MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, 297 + MISC_REG_AEU_ENABLE4_NIG_0, 298 + MISC_REG_AEU_ENABLE4_PXP_0, 299 + MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, 300 + MISC_REG_AEU_ENABLE4_NIG_1, 301 + MISC_REG_AEU_ENABLE4_PXP_1 302 + }; 303 + 304 + static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) 305 + { 306 + int i; 307 + u32 reg_val; 308 + 309 + for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { 310 + reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); 311 + 312 + if (enable) 313 + reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; 314 + else 315 + reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; 316 + 317 + REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); 318 + } 319 + } 320 + 321 + static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx) 322 + { 323 + if (CHIP_IS_E1(bp)) 324 + return bnx2x_blocks_parity_data[idx].reg_mask.e1; 325 + else if (CHIP_IS_E1H(bp)) 326 + return bnx2x_blocks_parity_data[idx].reg_mask.e1h; 327 + else 328 + return bnx2x_blocks_parity_data[idx].reg_mask.e2; 329 + } 330 + 331 + static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp) 332 + { 333 + int i; 334 + 335 + for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { 336 + u32 dis_mask = bnx2x_parity_reg_mask(bp, i); 337 + 338 + if (dis_mask) { 339 + REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, 340 + dis_mask); 341 + DP(NETIF_MSG_HW, "Setting parity mask " 342 + "for %s to\t\t0x%x\n", 343 + bnx2x_blocks_parity_data[i].name, dis_mask); 344 + } 345 + } 346 + 347 + /* Disable MCP parity attentions */ 348 + bnx2x_set_mcp_parity(bp, false); 349 + } 350 + 351 + /** 352 + * Clear the parity error status registers. 353 + */ 354 + static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) 355 + { 356 + int i; 357 + u32 reg_val, mcp_aeu_bits = 358 + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | 359 + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY | 360 + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | 361 + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY; 362 + 363 + /* Clear SEM_FAST parities */ 364 + REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); 365 + REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); 366 + REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); 367 + REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); 368 + 369 + for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { 370 + u32 reg_mask = bnx2x_parity_reg_mask(bp, i); 371 + 372 + if (reg_mask) { 373 + reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i]. 374 + sts_clr_addr); 375 + if (reg_val & reg_mask) 376 + DP(NETIF_MSG_HW, 377 + "Parity errors in %s: 0x%x\n", 378 + bnx2x_blocks_parity_data[i].name, 379 + reg_val & reg_mask); 380 + } 381 + } 382 + 383 + /* Check if there were parity attentions in MCP */ 384 + reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP); 385 + if (reg_val & mcp_aeu_bits) 386 + DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n", 387 + reg_val & mcp_aeu_bits); 388 + 389 + /* Clear parity attentions in MCP: 390 + * [7] clears Latched rom_parity 391 + * [8] clears Latched ump_rx_parity 392 + * [9] clears Latched ump_tx_parity 393 + * [10] clears Latched scpad_parity (both ports) 394 + */ 395 + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780); 396 + } 397 + 398 + static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp) 399 + { 400 + int i; 401 + 402 + for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { 403 + u32 reg_mask = bnx2x_parity_reg_mask(bp, i); 404 + 405 + if (reg_mask) 406 + REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, 407 + bnx2x_blocks_parity_data[i].en_mask & reg_mask); 408 + } 409 + 410 + /* Enable MCP parity attentions */ 411 + bnx2x_set_mcp_parity(bp, true); 412 + } 413 + 414 + 415 #endif /* BNX2X_INIT_H */ 416
+12 -58
drivers/net/bnx2x/bnx2x_main.c
··· 3152 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) 3153 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) 3154 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS 3155 - #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) 3156 3157 /* 3158 * should be run under rtnl lock ··· 3526 try to handle this event */ 3527 bnx2x_acquire_alr(bp); 3528 3529 - if (bnx2x_chk_parity_attn(bp)) { 3530 bp->recovery_state = BNX2X_RECOVERY_INIT; 3531 bnx2x_set_reset_in_progress(bp); 3532 schedule_delayed_work(&bp->reset_task, 0); ··· 4753 return 0; /* OK */ 4754 } 4755 4756 - static void enable_blocks_attention(struct bnx2x *bp) 4757 { 4758 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 4759 if (CHIP_IS_E2(bp)) ··· 4807 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 4808 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 4809 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 4810 - REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ 4811 } 4812 - 4813 - static const struct { 4814 - u32 addr; 4815 - u32 mask; 4816 - } bnx2x_parity_mask[] = { 4817 - {PXP_REG_PXP_PRTY_MASK, 0x3ffffff}, 4818 - {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff}, 4819 - {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f}, 4820 - {HC_REG_HC_PRTY_MASK, 0x7}, 4821 - {MISC_REG_MISC_PRTY_MASK, 0x1}, 4822 - {QM_REG_QM_PRTY_MASK, 0x0}, 4823 - {DORQ_REG_DORQ_PRTY_MASK, 0x0}, 4824 - {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0}, 4825 - {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0}, 4826 - {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */ 4827 - {CDU_REG_CDU_PRTY_MASK, 0x0}, 4828 - {CFC_REG_CFC_PRTY_MASK, 0x0}, 4829 - {DBG_REG_DBG_PRTY_MASK, 0x0}, 4830 - {DMAE_REG_DMAE_PRTY_MASK, 0x0}, 4831 - {BRB1_REG_BRB1_PRTY_MASK, 0x0}, 4832 - {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */ 4833 - {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */ 4834 - {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */ 4835 - {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */ 4836 - {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */ 4837 - {TSEM_REG_TSEM_PRTY_MASK_0, 0x0}, 4838 - {TSEM_REG_TSEM_PRTY_MASK_1, 0x0}, 4839 - {USEM_REG_USEM_PRTY_MASK_0, 0x0}, 4840 - {USEM_REG_USEM_PRTY_MASK_1, 0x0}, 4841 - {CSEM_REG_CSEM_PRTY_MASK_0, 0x0}, 4842 - {CSEM_REG_CSEM_PRTY_MASK_1, 0x0}, 4843 - {XSEM_REG_XSEM_PRTY_MASK_0, 0x0}, 4844 - {XSEM_REG_XSEM_PRTY_MASK_1, 0x0} 4845 - }; 4846 - 4847 - static void enable_blocks_parity(struct bnx2x *bp) 4848 - { 4849 - int i; 4850 - 4851 - for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++) 4852 - REG_WR(bp, bnx2x_parity_mask[i].addr, 4853 - bnx2x_parity_mask[i].mask); 4854 - } 4855 - 4856 4857 static void bnx2x_reset_common(struct bnx2x *bp) 4858 { ··· 5305 /* clear PXP2 attentions */ 5306 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 5307 5308 - enable_blocks_attention(bp); 5309 - if (CHIP_PARITY_SUPPORTED(bp)) 5310 - enable_blocks_parity(bp); 5311 5312 if (!BP_NOMCP(bp)) { 5313 /* In E2 2-PORT mode, same ext phy is used for the two paths */ ··· 8706 dev_err(&bp->pdev->dev, "MCP disabled, " 8707 "must load devices in order!\n"); 8708 8709 - /* Set multi queue mode */ 8710 - if ((multi_mode != ETH_RSS_MODE_DISABLED) && 8711 - ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { 8712 - dev_err(&bp->pdev->dev, "Multi disabled since int_mode " 8713 - "requested is not MSI-X\n"); 8714 - multi_mode = ETH_RSS_MODE_DISABLED; 8715 - } 8716 bp->multi_mode = multi_mode; 8717 bp->int_mode = int_mode; 8718 ··· 9508 /* Delete all NAPI objects */ 9509 bnx2x_del_all_napi(bp); 9510 9511 /* Disable MSI/MSI-X */ 9512 bnx2x_disable_msi(bp); 9513 9514 /* Make sure RESET task is not scheduled before continuing */ 9515 cancel_delayed_work_sync(&bp->reset_task);
··· 3152 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) 3153 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) 3154 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS 3155 3156 /* 3157 * should be run under rtnl lock ··· 3527 try to handle this event */ 3528 bnx2x_acquire_alr(bp); 3529 3530 + if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) { 3531 bp->recovery_state = BNX2X_RECOVERY_INIT; 3532 bnx2x_set_reset_in_progress(bp); 3533 schedule_delayed_work(&bp->reset_task, 0); ··· 4754 return 0; /* OK */ 4755 } 4756 4757 + static void bnx2x_enable_blocks_attention(struct bnx2x *bp) 4758 { 4759 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 4760 if (CHIP_IS_E2(bp)) ··· 4808 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 4809 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 4810 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 4811 + REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 4812 } 4813 4814 static void bnx2x_reset_common(struct bnx2x *bp) 4815 { ··· 5350 /* clear PXP2 attentions */ 5351 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 5352 5353 + bnx2x_enable_blocks_attention(bp); 5354 + if (CHIP_PARITY_ENABLED(bp)) 5355 + bnx2x_enable_blocks_parity(bp); 5356 5357 if (!BP_NOMCP(bp)) { 5358 /* In E2 2-PORT mode, same ext phy is used for the two paths */ ··· 8751 dev_err(&bp->pdev->dev, "MCP disabled, " 8752 "must load devices in order!\n"); 8753 8754 bp->multi_mode = multi_mode; 8755 bp->int_mode = int_mode; 8756 ··· 9560 /* Delete all NAPI objects */ 9561 bnx2x_del_all_napi(bp); 9562 9563 + /* Power on: we can't let PCI layer write to us while we are in D3 */ 9564 + bnx2x_set_power_state(bp, PCI_D0); 9565 + 9566 /* Disable MSI/MSI-X */ 9567 bnx2x_disable_msi(bp); 9568 + 9569 + /* Power off */ 9570 + bnx2x_set_power_state(bp, PCI_D3hot); 9571 9572 /* Make sure RESET task is not scheduled before continuing */ 9573 cancel_delayed_work_sync(&bp->reset_task);
+74
drivers/net/bnx2x/bnx2x_reg.h
··· 18 * WR - Write Clear (write 1 to clear the bit) 19 * 20 */ 21 22 #define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) 23 #define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) ··· 41 #define BRB1_REG_BRB1_PRTY_MASK 0x60138 42 /* [R 4] Parity register #0 read */ 43 #define BRB1_REG_BRB1_PRTY_STS 0x6012c 44 /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At 45 * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address 46 * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - ··· 136 #define CCM_REG_CCM_INT_MASK 0xd01e4 137 /* [R 11] Interrupt register #0 read */ 138 #define CCM_REG_CCM_INT_STS 0xd01d8 139 /* [R 27] Parity register #0 read */ 140 #define CCM_REG_CCM_PRTY_STS 0xd01e8 141 /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS 142 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 143 Is used to determine the number of the AG context REG-pairs written back; ··· 358 #define CDU_REG_CDU_PRTY_MASK 0x10104c 359 /* [R 5] Parity register #0 read */ 360 #define CDU_REG_CDU_PRTY_STS 0x101040 361 /* [RC 32] logging of error data in case of a CDU load error: 362 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; 363 ype_error; ctual_active; ctual_compressed_context}; */ ··· 391 #define CFC_REG_CFC_PRTY_MASK 0x104118 392 /* [R 4] Parity register #0 read */ 393 #define CFC_REG_CFC_PRTY_STS 0x10410c 394 /* [RW 21] CID cam access (21:1 - Data; alid - 0) */ 395 #define CFC_REG_CID_CAM 0x104800 396 #define CFC_REG_CONTROL0 0x104028 ··· 478 #define CSDM_REG_CSDM_PRTY_MASK 0xc22bc 479 /* [R 11] Parity register #0 read */ 480 #define CSDM_REG_CSDM_PRTY_STS 0xc22b0 481 #define CSDM_REG_ENABLE_IN1 0xc2238 482 #define CSDM_REG_ENABLE_IN2 0xc223c 483 #define CSDM_REG_ENABLE_OUT1 0xc2240 ··· 570 /* [R 32] Parity register #0 read */ 571 #define CSEM_REG_CSEM_PRTY_STS_0 0x200124 572 #define CSEM_REG_CSEM_PRTY_STS_1 0x200134 573 #define CSEM_REG_ENABLE_IN 0x2000a4 574 #define CSEM_REG_ENABLE_OUT 0x2000a8 575 /* [RW 32] This address space contains all registers and memories that are ··· 665 #define DBG_REG_DBG_PRTY_MASK 0xc0a8 666 /* [R 1] Parity register #0 read */ 667 #define DBG_REG_DBG_PRTY_STS 0xc09c 668 /* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The 669 * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; 670 * 4.Completion function=0; 5.Error handling=0 */ ··· 687 #define DMAE_REG_DMAE_PRTY_MASK 0x102064 688 /* [R 4] Parity register #0 read */ 689 #define DMAE_REG_DMAE_PRTY_STS 0x102058 690 /* [RW 1] Command 0 go. */ 691 #define DMAE_REG_GO_C0 0x102080 692 /* [RW 1] Command 1 go. */ ··· 755 #define DORQ_REG_DORQ_PRTY_MASK 0x170190 756 /* [R 2] Parity register #0 read */ 757 #define DORQ_REG_DORQ_PRTY_STS 0x170184 758 /* [RW 8] The address to write the DPM CID to STORM. */ 759 #define DORQ_REG_DPM_CID_ADDR 0x170044 760 /* [RW 5] The DPM mode CID extraction offset. */ ··· 865 /* [R 1] data availble for error memory. If this bit is clear do not red 866 * from error_handling_memory. */ 867 #define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 868 /* [R 11] Parity register #0 read */ 869 #define IGU_REG_IGU_PRTY_STS 0x13009c 870 /* [R 4] Debug: int_handle_fsm */ 871 #define IGU_REG_INT_HANDLE_FSM 0x130050 872 #define IGU_REG_LEADING_EDGE_LATCH 0x130134 ··· 1528 #define MISC_REG_MISC_PRTY_MASK 0xa398 1529 /* [R 1] Parity register #0 read */ 1530 #define MISC_REG_MISC_PRTY_STS 0xa38c 1531 #define MISC_REG_NIG_WOL_P0 0xa270 1532 #define MISC_REG_NIG_WOL_P1 0xa274 1533 /* [R 1] If set indicate that the pcie_rst_b was asserted without perst ··· 2111 #define PBF_REG_PBF_INT_MASK 0x1401d4 2112 /* [R 5] Interrupt register #0 read */ 2113 #define PBF_REG_PBF_INT_STS 0x1401c8 2114 #define PB_REG_CONTROL 0 2115 /* [RW 2] Interrupt mask register #0 read/write */ 2116 #define PB_REG_PB_INT_MASK 0x28 ··· 2124 #define PB_REG_PB_PRTY_MASK 0x38 2125 /* [R 4] Parity register #0 read */ 2126 #define PB_REG_PB_PRTY_STS 0x2c 2127 #define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) 2128 #define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) 2129 #define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) ··· 2481 #define PRS_REG_PRS_PRTY_MASK 0x401a4 2482 /* [R 8] Parity register #0 read */ 2483 #define PRS_REG_PRS_PRTY_STS 0x40198 2484 /* [RW 8] Context region for pure acknowledge packets. Used in CFC load 2485 request message */ 2486 #define PRS_REG_PURE_REGIONS 0x40024 ··· 2636 /* [R 32] Parity register #0 read */ 2637 #define PXP2_REG_PXP2_PRTY_STS_0 0x12057c 2638 #define PXP2_REG_PXP2_PRTY_STS_1 0x12058c 2639 /* [R 1] Debug only: The 'almost full' indication from each fifo (gives 2640 indication about backpressure) */ 2641 #define PXP2_REG_RD_ALMOST_FULL_0 0x120424 ··· 3041 #define PXP_REG_PXP_PRTY_MASK 0x103094 3042 /* [R 26] Parity register #0 read */ 3043 #define PXP_REG_PXP_PRTY_STS 0x103088 3044 /* [RW 4] The activity counter initial increment value sent in the load 3045 request */ 3046 #define QM_REG_ACTCTRINITVAL_0 0x168040 ··· 3199 #define QM_REG_QM_PRTY_MASK 0x168454 3200 /* [R 12] Parity register #0 read */ 3201 #define QM_REG_QM_PRTY_STS 0x168448 3202 /* [R 32] Current queues in pipeline: Queues from 32 to 63 */ 3203 #define QM_REG_QSTATUS_HIGH 0x16802c 3204 /* [R 32] Current queues in pipeline: Queues from 96 to 127 */ ··· 3486 #define QM_REG_WRRWEIGHTS_9 0x168848 3487 /* [R 6] Keep the fill level of the fifo from write client 1 */ 3488 #define QM_REG_XQM_WRC_FIFOLVL 0x168000 3489 #define SRC_REG_COUNTFREE0 0x40500 3490 /* [RW 1] If clr the searcher is compatible to E1 A0 - support only two 3491 ports. If set the searcher support 8 functions. */ ··· 3516 #define SRC_REG_SRC_PRTY_MASK 0x404c8 3517 /* [R 3] Parity register #0 read */ 3518 #define SRC_REG_SRC_PRTY_STS 0x404bc 3519 /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ 3520 #define TCM_REG_CAM_OCCUP 0x5017c 3521 /* [RW 1] CDU AG read Interface enable. If 0 - the request input is ··· 3644 #define TCM_REG_TCM_INT_MASK 0x501dc 3645 /* [R 11] Interrupt register #0 read */ 3646 #define TCM_REG_TCM_INT_STS 0x501d0 3647 /* [R 27] Parity register #0 read */ 3648 #define TCM_REG_TCM_PRTY_STS 0x501e0 3649 /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS 3650 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 3651 Is used to determine the number of the AG context REG-pairs written back; ··· 3807 #define TM_REG_TM_INT_MASK 0x1640fc 3808 /* [R 1] Interrupt register #0 read */ 3809 #define TM_REG_TM_INT_STS 0x1640f0 3810 /* [RW 8] The event id for aggregated interrupt 0 */ 3811 #define TSDM_REG_AGG_INT_EVENT_0 0x42038 3812 #define TSDM_REG_AGG_INT_EVENT_1 0x4203c ··· 3891 #define TSDM_REG_TSDM_PRTY_MASK 0x422bc 3892 /* [R 11] Parity register #0 read */ 3893 #define TSDM_REG_TSDM_PRTY_STS 0x422b0 3894 /* [RW 5] The number of time_slots in the arbitration cycle */ 3895 #define TSEM_REG_ARB_CYCLE_SIZE 0x180034 3896 /* [RW 3] The source that is associated with arbitration element 0. Source ··· 3972 #define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 3973 /* [RW 8] List of free threads . There is a bit per thread. */ 3974 #define TSEM_REG_THREADS_LIST 0x1802e4 3975 /* [RW 3] The arbitration scheme of time_slot 0 */ 3976 #define TSEM_REG_TS_0_AS 0x180038 3977 /* [RW 3] The arbitration scheme of time_slot 10 */ ··· 4177 #define UCM_REG_UCM_INT_STS 0xe01c8 4178 /* [R 27] Parity register #0 read */ 4179 #define UCM_REG_UCM_PRTY_STS 0xe01d8 4180 /* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS 4181 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 4182 Is used to determine the number of the AG context REG-pairs written back; ··· 4355 #define USDM_REG_USDM_PRTY_MASK 0xc42c0 4356 /* [R 11] Parity register #0 read */ 4357 #define USDM_REG_USDM_PRTY_STS 0xc42b4 4358 /* [RW 5] The number of time_slots in the arbitration cycle */ 4359 #define USEM_REG_ARB_CYCLE_SIZE 0x300034 4360 /* [RW 3] The source that is associated with arbitration element 0. Source ··· 4486 /* [R 32] Parity register #0 read */ 4487 #define USEM_REG_USEM_PRTY_STS_0 0x300124 4488 #define USEM_REG_USEM_PRTY_STS_1 0x300134 4489 /* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 4490 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ 4491 #define USEM_REG_VFPF_ERR_NUM 0x300380 ··· 4865 #define XSDM_REG_XSDM_PRTY_MASK 0x1662bc 4866 /* [R 11] Parity register #0 read */ 4867 #define XSDM_REG_XSDM_PRTY_STS 0x1662b0 4868 /* [RW 5] The number of time_slots in the arbitration cycle */ 4869 #define XSEM_REG_ARB_CYCLE_SIZE 0x280034 4870 /* [RW 3] The source that is associated with arbitration element 0. Source ··· 4999 /* [R 32] Parity register #0 read */ 5000 #define XSEM_REG_XSEM_PRTY_STS_0 0x280124 5001 #define XSEM_REG_XSEM_PRTY_STS_1 0x280134 5002 #define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) 5003 #define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) 5004 #define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) ··· 6389 } 6390 6391
··· 18 * WR - Write Clear (write 1 to clear the bit) 19 * 20 */ 21 + #ifndef BNX2X_REG_H 22 + #define BNX2X_REG_H 23 24 #define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) 25 #define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) ··· 39 #define BRB1_REG_BRB1_PRTY_MASK 0x60138 40 /* [R 4] Parity register #0 read */ 41 #define BRB1_REG_BRB1_PRTY_STS 0x6012c 42 + /* [RC 4] Parity register #0 read clear */ 43 + #define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130 44 /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At 45 * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address 46 * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - ··· 132 #define CCM_REG_CCM_INT_MASK 0xd01e4 133 /* [R 11] Interrupt register #0 read */ 134 #define CCM_REG_CCM_INT_STS 0xd01d8 135 + /* [RW 27] Parity mask register #0 read/write */ 136 + #define CCM_REG_CCM_PRTY_MASK 0xd01f4 137 /* [R 27] Parity register #0 read */ 138 #define CCM_REG_CCM_PRTY_STS 0xd01e8 139 + /* [RC 27] Parity register #0 read clear */ 140 + #define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec 141 /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS 142 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 143 Is used to determine the number of the AG context REG-pairs written back; ··· 350 #define CDU_REG_CDU_PRTY_MASK 0x10104c 351 /* [R 5] Parity register #0 read */ 352 #define CDU_REG_CDU_PRTY_STS 0x101040 353 + /* [RC 5] Parity register #0 read clear */ 354 + #define CDU_REG_CDU_PRTY_STS_CLR 0x101044 355 /* [RC 32] logging of error data in case of a CDU load error: 356 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; 357 ype_error; ctual_active; ctual_compressed_context}; */ ··· 381 #define CFC_REG_CFC_PRTY_MASK 0x104118 382 /* [R 4] Parity register #0 read */ 383 #define CFC_REG_CFC_PRTY_STS 0x10410c 384 + /* [RC 4] Parity register #0 read clear */ 385 + #define CFC_REG_CFC_PRTY_STS_CLR 0x104110 386 /* [RW 21] CID cam access (21:1 - Data; alid - 0) */ 387 #define CFC_REG_CID_CAM 0x104800 388 #define CFC_REG_CONTROL0 0x104028 ··· 466 #define CSDM_REG_CSDM_PRTY_MASK 0xc22bc 467 /* [R 11] Parity register #0 read */ 468 #define CSDM_REG_CSDM_PRTY_STS 0xc22b0 469 + /* [RC 11] Parity register #0 read clear */ 470 + #define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4 471 #define CSDM_REG_ENABLE_IN1 0xc2238 472 #define CSDM_REG_ENABLE_IN2 0xc223c 473 #define CSDM_REG_ENABLE_OUT1 0xc2240 ··· 556 /* [R 32] Parity register #0 read */ 557 #define CSEM_REG_CSEM_PRTY_STS_0 0x200124 558 #define CSEM_REG_CSEM_PRTY_STS_1 0x200134 559 + /* [RC 32] Parity register #0 read clear */ 560 + #define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128 561 + #define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138 562 #define CSEM_REG_ENABLE_IN 0x2000a4 563 #define CSEM_REG_ENABLE_OUT 0x2000a8 564 /* [RW 32] This address space contains all registers and memories that are ··· 648 #define DBG_REG_DBG_PRTY_MASK 0xc0a8 649 /* [R 1] Parity register #0 read */ 650 #define DBG_REG_DBG_PRTY_STS 0xc09c 651 + /* [RC 1] Parity register #0 read clear */ 652 + #define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0 653 /* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The 654 * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; 655 * 4.Completion function=0; 5.Error handling=0 */ ··· 668 #define DMAE_REG_DMAE_PRTY_MASK 0x102064 669 /* [R 4] Parity register #0 read */ 670 #define DMAE_REG_DMAE_PRTY_STS 0x102058 671 + /* [RC 4] Parity register #0 read clear */ 672 + #define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c 673 /* [RW 1] Command 0 go. */ 674 #define DMAE_REG_GO_C0 0x102080 675 /* [RW 1] Command 1 go. */ ··· 734 #define DORQ_REG_DORQ_PRTY_MASK 0x170190 735 /* [R 2] Parity register #0 read */ 736 #define DORQ_REG_DORQ_PRTY_STS 0x170184 737 + /* [RC 2] Parity register #0 read clear */ 738 + #define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188 739 /* [RW 8] The address to write the DPM CID to STORM. */ 740 #define DORQ_REG_DPM_CID_ADDR 0x170044 741 /* [RW 5] The DPM mode CID extraction offset. */ ··· 842 /* [R 1] data availble for error memory. If this bit is clear do not red 843 * from error_handling_memory. */ 844 #define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 845 + /* [RW 11] Parity mask register #0 read/write */ 846 + #define IGU_REG_IGU_PRTY_MASK 0x1300a8 847 /* [R 11] Parity register #0 read */ 848 #define IGU_REG_IGU_PRTY_STS 0x13009c 849 + /* [RC 11] Parity register #0 read clear */ 850 + #define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0 851 /* [R 4] Debug: int_handle_fsm */ 852 #define IGU_REG_INT_HANDLE_FSM 0x130050 853 #define IGU_REG_LEADING_EDGE_LATCH 0x130134 ··· 1501 #define MISC_REG_MISC_PRTY_MASK 0xa398 1502 /* [R 1] Parity register #0 read */ 1503 #define MISC_REG_MISC_PRTY_STS 0xa38c 1504 + /* [RC 1] Parity register #0 read clear */ 1505 + #define MISC_REG_MISC_PRTY_STS_CLR 0xa390 1506 #define MISC_REG_NIG_WOL_P0 0xa270 1507 #define MISC_REG_NIG_WOL_P1 0xa274 1508 /* [R 1] If set indicate that the pcie_rst_b was asserted without perst ··· 2082 #define PBF_REG_PBF_INT_MASK 0x1401d4 2083 /* [R 5] Interrupt register #0 read */ 2084 #define PBF_REG_PBF_INT_STS 0x1401c8 2085 + /* [RW 20] Parity mask register #0 read/write */ 2086 + #define PBF_REG_PBF_PRTY_MASK 0x1401e4 2087 + /* [RC 20] Parity register #0 read clear */ 2088 + #define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc 2089 #define PB_REG_CONTROL 0 2090 /* [RW 2] Interrupt mask register #0 read/write */ 2091 #define PB_REG_PB_INT_MASK 0x28 ··· 2091 #define PB_REG_PB_PRTY_MASK 0x38 2092 /* [R 4] Parity register #0 read */ 2093 #define PB_REG_PB_PRTY_STS 0x2c 2094 + /* [RC 4] Parity register #0 read clear */ 2095 + #define PB_REG_PB_PRTY_STS_CLR 0x30 2096 #define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) 2097 #define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) 2098 #define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) ··· 2446 #define PRS_REG_PRS_PRTY_MASK 0x401a4 2447 /* [R 8] Parity register #0 read */ 2448 #define PRS_REG_PRS_PRTY_STS 0x40198 2449 + /* [RC 8] Parity register #0 read clear */ 2450 + #define PRS_REG_PRS_PRTY_STS_CLR 0x4019c 2451 /* [RW 8] Context region for pure acknowledge packets. Used in CFC load 2452 request message */ 2453 #define PRS_REG_PURE_REGIONS 0x40024 ··· 2599 /* [R 32] Parity register #0 read */ 2600 #define PXP2_REG_PXP2_PRTY_STS_0 0x12057c 2601 #define PXP2_REG_PXP2_PRTY_STS_1 0x12058c 2602 + /* [RC 32] Parity register #0 read clear */ 2603 + #define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580 2604 + #define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590 2605 /* [R 1] Debug only: The 'almost full' indication from each fifo (gives 2606 indication about backpressure) */ 2607 #define PXP2_REG_RD_ALMOST_FULL_0 0x120424 ··· 3001 #define PXP_REG_PXP_PRTY_MASK 0x103094 3002 /* [R 26] Parity register #0 read */ 3003 #define PXP_REG_PXP_PRTY_STS 0x103088 3004 + /* [RC 27] Parity register #0 read clear */ 3005 + #define PXP_REG_PXP_PRTY_STS_CLR 0x10308c 3006 /* [RW 4] The activity counter initial increment value sent in the load 3007 request */ 3008 #define QM_REG_ACTCTRINITVAL_0 0x168040 ··· 3157 #define QM_REG_QM_PRTY_MASK 0x168454 3158 /* [R 12] Parity register #0 read */ 3159 #define QM_REG_QM_PRTY_STS 0x168448 3160 + /* [RC 12] Parity register #0 read clear */ 3161 + #define QM_REG_QM_PRTY_STS_CLR 0x16844c 3162 /* [R 32] Current queues in pipeline: Queues from 32 to 63 */ 3163 #define QM_REG_QSTATUS_HIGH 0x16802c 3164 /* [R 32] Current queues in pipeline: Queues from 96 to 127 */ ··· 3442 #define QM_REG_WRRWEIGHTS_9 0x168848 3443 /* [R 6] Keep the fill level of the fifo from write client 1 */ 3444 #define QM_REG_XQM_WRC_FIFOLVL 0x168000 3445 + /* [W 1] reset to parity interrupt */ 3446 + #define SEM_FAST_REG_PARITY_RST 0x18840 3447 #define SRC_REG_COUNTFREE0 0x40500 3448 /* [RW 1] If clr the searcher is compatible to E1 A0 - support only two 3449 ports. If set the searcher support 8 functions. */ ··· 3470 #define SRC_REG_SRC_PRTY_MASK 0x404c8 3471 /* [R 3] Parity register #0 read */ 3472 #define SRC_REG_SRC_PRTY_STS 0x404bc 3473 + /* [RC 3] Parity register #0 read clear */ 3474 + #define SRC_REG_SRC_PRTY_STS_CLR 0x404c0 3475 /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ 3476 #define TCM_REG_CAM_OCCUP 0x5017c 3477 /* [RW 1] CDU AG read Interface enable. If 0 - the request input is ··· 3596 #define TCM_REG_TCM_INT_MASK 0x501dc 3597 /* [R 11] Interrupt register #0 read */ 3598 #define TCM_REG_TCM_INT_STS 0x501d0 3599 + /* [RW 27] Parity mask register #0 read/write */ 3600 + #define TCM_REG_TCM_PRTY_MASK 0x501ec 3601 /* [R 27] Parity register #0 read */ 3602 #define TCM_REG_TCM_PRTY_STS 0x501e0 3603 + /* [RC 27] Parity register #0 read clear */ 3604 + #define TCM_REG_TCM_PRTY_STS_CLR 0x501e4 3605 /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS 3606 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 3607 Is used to determine the number of the AG context REG-pairs written back; ··· 3755 #define TM_REG_TM_INT_MASK 0x1640fc 3756 /* [R 1] Interrupt register #0 read */ 3757 #define TM_REG_TM_INT_STS 0x1640f0 3758 + /* [RW 7] Parity mask register #0 read/write */ 3759 + #define TM_REG_TM_PRTY_MASK 0x16410c 3760 + /* [RC 7] Parity register #0 read clear */ 3761 + #define TM_REG_TM_PRTY_STS_CLR 0x164104 3762 /* [RW 8] The event id for aggregated interrupt 0 */ 3763 #define TSDM_REG_AGG_INT_EVENT_0 0x42038 3764 #define TSDM_REG_AGG_INT_EVENT_1 0x4203c ··· 3835 #define TSDM_REG_TSDM_PRTY_MASK 0x422bc 3836 /* [R 11] Parity register #0 read */ 3837 #define TSDM_REG_TSDM_PRTY_STS 0x422b0 3838 + /* [RC 11] Parity register #0 read clear */ 3839 + #define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4 3840 /* [RW 5] The number of time_slots in the arbitration cycle */ 3841 #define TSEM_REG_ARB_CYCLE_SIZE 0x180034 3842 /* [RW 3] The source that is associated with arbitration element 0. Source ··· 3914 #define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 3915 /* [RW 8] List of free threads . There is a bit per thread. */ 3916 #define TSEM_REG_THREADS_LIST 0x1802e4 3917 + /* [RC 32] Parity register #0 read clear */ 3918 + #define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118 3919 + #define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128 3920 /* [RW 3] The arbitration scheme of time_slot 0 */ 3921 #define TSEM_REG_TS_0_AS 0x180038 3922 /* [RW 3] The arbitration scheme of time_slot 10 */ ··· 4116 #define UCM_REG_UCM_INT_STS 0xe01c8 4117 /* [R 27] Parity register #0 read */ 4118 #define UCM_REG_UCM_PRTY_STS 0xe01d8 4119 + /* [RC 27] Parity register #0 read clear */ 4120 + #define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc 4121 /* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS 4122 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 4123 Is used to determine the number of the AG context REG-pairs written back; ··· 4292 #define USDM_REG_USDM_PRTY_MASK 0xc42c0 4293 /* [R 11] Parity register #0 read */ 4294 #define USDM_REG_USDM_PRTY_STS 0xc42b4 4295 + /* [RC 11] Parity register #0 read clear */ 4296 + #define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8 4297 /* [RW 5] The number of time_slots in the arbitration cycle */ 4298 #define USEM_REG_ARB_CYCLE_SIZE 0x300034 4299 /* [RW 3] The source that is associated with arbitration element 0. Source ··· 4421 /* [R 32] Parity register #0 read */ 4422 #define USEM_REG_USEM_PRTY_STS_0 0x300124 4423 #define USEM_REG_USEM_PRTY_STS_1 0x300134 4424 + /* [RC 32] Parity register #0 read clear */ 4425 + #define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128 4426 + #define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138 4427 /* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 4428 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ 4429 #define USEM_REG_VFPF_ERR_NUM 0x300380 ··· 4797 #define XSDM_REG_XSDM_PRTY_MASK 0x1662bc 4798 /* [R 11] Parity register #0 read */ 4799 #define XSDM_REG_XSDM_PRTY_STS 0x1662b0 4800 + /* [RC 11] Parity register #0 read clear */ 4801 + #define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4 4802 /* [RW 5] The number of time_slots in the arbitration cycle */ 4803 #define XSEM_REG_ARB_CYCLE_SIZE 0x280034 4804 /* [RW 3] The source that is associated with arbitration element 0. Source ··· 4929 /* [R 32] Parity register #0 read */ 4930 #define XSEM_REG_XSEM_PRTY_STS_0 0x280124 4931 #define XSEM_REG_XSEM_PRTY_STS_1 0x280134 4932 + /* [RC 32] Parity register #0 read clear */ 4933 + #define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128 4934 + #define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138 4935 #define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) 4936 #define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) 4937 #define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) ··· 6316 } 6317 6318 6319 + #endif /* BNX2X_REG_H */
+5
drivers/net/bnx2x/bnx2x_stats.c
··· 158 159 spin_lock_bh(&bp->stats_lock); 160 161 ramrod_data.drv_counter = bp->stats_counter++; 162 ramrod_data.collect_port = bp->port.pmf ? 1 : 0; 163 for_each_eth_queue(bp, i)
··· 158 159 spin_lock_bh(&bp->stats_lock); 160 161 + if (bp->stats_pending) { 162 + spin_unlock_bh(&bp->stats_lock); 163 + return; 164 + } 165 + 166 ramrod_data.drv_counter = bp->stats_counter++; 167 ramrod_data.collect_port = bp->port.pmf ? 1 : 0; 168 for_each_eth_queue(bp, i)
+10 -5
drivers/net/cxgb4vf/cxgb4vf_main.c
··· 749 netif_set_real_num_tx_queues(dev, pi->nqsets); 750 err = netif_set_real_num_rx_queues(dev, pi->nqsets); 751 if (err) 752 - return err; 753 - set_bit(pi->port_id, &adapter->open_device_map); 754 err = link_start(dev); 755 if (err) 756 - return err; 757 netif_tx_start_all_queues(dev); 758 return 0; 759 } 760 761 /* ··· 770 */ 771 static int cxgb4vf_stop(struct net_device *dev) 772 { 773 - int ret; 774 struct port_info *pi = netdev_priv(dev); 775 struct adapter *adapter = pi->adapter; 776 777 netif_tx_stop_all_queues(dev); 778 netif_carrier_off(dev); 779 - ret = t4vf_enable_vi(adapter, pi->viid, false, false); 780 pi->link_cfg.link_ok = 0; 781 782 clear_bit(pi->port_id, &adapter->open_device_map);
··· 749 netif_set_real_num_tx_queues(dev, pi->nqsets); 750 err = netif_set_real_num_rx_queues(dev, pi->nqsets); 751 if (err) 752 + goto err_unwind; 753 err = link_start(dev); 754 if (err) 755 + goto err_unwind; 756 + 757 netif_tx_start_all_queues(dev); 758 + set_bit(pi->port_id, &adapter->open_device_map); 759 return 0; 760 + 761 + err_unwind: 762 + if (adapter->open_device_map == 0) 763 + adapter_down(adapter); 764 + return err; 765 } 766 767 /* ··· 764 */ 765 static int cxgb4vf_stop(struct net_device *dev) 766 { 767 struct port_info *pi = netdev_priv(dev); 768 struct adapter *adapter = pi->adapter; 769 770 netif_tx_stop_all_queues(dev); 771 netif_carrier_off(dev); 772 + t4vf_enable_vi(adapter, pi->viid, false, false); 773 pi->link_cfg.link_ok = 0; 774 775 clear_bit(pi->port_id, &adapter->open_device_map);
+11
drivers/net/cxgb4vf/t4vf_hw.c
··· 147 /* 148 * Write the command array into the Mailbox Data register array and 149 * transfer ownership of the mailbox to the firmware. 150 */ 151 for (i = 0, p = cmd; i < size; i += 8) 152 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); 153 t4_write_reg(adapter, mbox_ctl, 154 MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 155 t4_read_reg(adapter, mbox_ctl); /* flush write */
··· 147 /* 148 * Write the command array into the Mailbox Data register array and 149 * transfer ownership of the mailbox to the firmware. 150 + * 151 + * For the VFs, the Mailbox Data "registers" are actually backed by 152 + * T4's "MA" interface rather than PL Registers (as is the case for 153 + * the PFs). Because these are in different coherency domains, the 154 + * write to the VF's PL-register-backed Mailbox Control can race in 155 + * front of the writes to the MA-backed VF Mailbox Data "registers". 156 + * So we need to do a read-back on at least one byte of the VF Mailbox 157 + * Data registers before doing the write to the VF Mailbox Control 158 + * register. 159 */ 160 for (i = 0, p = cmd; i < size; i += 8) 161 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); 162 + t4_read_reg(adapter, mbox_data); /* flush write */ 163 + 164 t4_write_reg(adapter, mbox_ctl, 165 MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 166 t4_read_reg(adapter, mbox_ctl); /* flush write */
+260 -68
drivers/net/e1000/e1000_hw.c
··· 130 if (hw->mac_type == e1000_82541 || 131 hw->mac_type == e1000_82541_rev_2 || 132 hw->mac_type == e1000_82547 || 133 - hw->mac_type == e1000_82547_rev_2) { 134 hw->phy_type = e1000_phy_igp; 135 - break; 136 - } 137 default: 138 /* Should never have loaded on this device */ 139 hw->phy_type = e1000_phy_undefined; ··· 323 case E1000_DEV_ID_82547GI: 324 hw->mac_type = e1000_82547_rev_2; 325 break; 326 default: 327 /* Should never have loaded on this device */ 328 return -E1000_ERR_MAC_TYPE; ··· 379 case e1000_82542_rev2_0: 380 case e1000_82542_rev2_1: 381 hw->media_type = e1000_media_type_fiber; 382 break; 383 default: 384 status = er32(STATUS); ··· 471 /* Reset is performed on a shadow of the control register */ 472 ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); 473 break; 474 default: 475 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 476 break; ··· 964 } 965 966 /** 967 * e1000_copper_link_preconfig - early configuration for copper 968 * @hw: Struct containing variables accessed by shared code 969 * ··· 1359 if (hw->autoneg_advertised == 0) 1360 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1361 1362 e_dbg("Reconfiguring auto-neg advertisement params\n"); 1363 ret_val = e1000_phy_setup_autoneg(hw); 1364 if (ret_val) { ··· 1418 s32 ret_val; 1419 e_dbg("e1000_copper_link_postconfig"); 1420 1421 - if (hw->mac_type >= e1000_82544) { 1422 e1000_config_collision_dist(hw); 1423 } else { 1424 ret_val = e1000_config_mac_to_phy(hw); ··· 1472 ret_val = e1000_copper_link_mgp_setup(hw); 1473 if (ret_val) 1474 return ret_val; 1475 } 1476 1477 if (hw->autoneg) { ··· 1544 return ret_val; 1545 1546 /* Read the MII 1000Base-T Control Register (Address 9). */ 1547 - ret_val = 1548 - e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1549 if (ret_val) 1550 return ret_val; 1551 1552 /* Need to parse both autoneg_advertised and fc and set up 1553 * the appropriate PHY registers. First we will parse for ··· 1661 1662 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1663 1664 - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1665 - if (ret_val) 1666 - return ret_val; 1667 1668 return E1000_SUCCESS; 1669 } ··· 1949 1950 /* 82544 or newer MAC, Auto Speed Detection takes care of 1951 * MAC speed/duplex configuration.*/ 1952 - if (hw->mac_type >= e1000_82544) 1953 return E1000_SUCCESS; 1954 1955 /* Read the Device Control Register and set the bits to Force Speed ··· 1959 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1960 ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); 1961 1962 - /* Set up duplex in the Device Control and Transmit Control 1963 - * registers depending on negotiated values. 1964 - */ 1965 - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 1966 - if (ret_val) 1967 - return ret_val; 1968 1969 - if (phy_data & M88E1000_PSSR_DPLX) 1970 - ctrl |= E1000_CTRL_FD; 1971 - else 1972 - ctrl &= ~E1000_CTRL_FD; 1973 1974 - e1000_config_collision_dist(hw); 1975 1976 - /* Set up speed in the Device Control register depending on 1977 - * negotiated values. 1978 - */ 1979 - if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 1980 - ctrl |= E1000_CTRL_SPD_1000; 1981 - else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) 1982 - ctrl |= E1000_CTRL_SPD_100; 1983 1984 /* Write the configured values back to the Device Control Reg. */ 1985 ew32(CTRL, ctrl); ··· 2512 * speed/duplex on the MAC to the current PHY speed/duplex 2513 * settings. 2514 */ 2515 - if (hw->mac_type >= e1000_82544) 2516 e1000_config_collision_dist(hw); 2517 else { 2518 ret_val = e1000_config_mac_to_phy(hw); ··· 2850 { 2851 u32 i; 2852 u32 mdic = 0; 2853 - const u32 phy_addr = 1; 2854 2855 e_dbg("e1000_read_phy_reg_ex"); 2856 ··· 2864 * Control register. The MAC will take care of interfacing with the 2865 * PHY to retrieve the desired data. 2866 */ 2867 - mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | 2868 - (phy_addr << E1000_MDIC_PHY_SHIFT) | 2869 - (E1000_MDIC_OP_READ)); 2870 2871 - ew32(MDIC, mdic); 2872 2873 - /* Poll the ready bit to see if the MDI read completed */ 2874 - for (i = 0; i < 64; i++) { 2875 - udelay(50); 2876 - mdic = er32(MDIC); 2877 - if (mdic & E1000_MDIC_READY) 2878 - break; 2879 } 2880 - if (!(mdic & E1000_MDIC_READY)) { 2881 - e_dbg("MDI Read did not complete\n"); 2882 - return -E1000_ERR_PHY; 2883 - } 2884 - if (mdic & E1000_MDIC_ERROR) { 2885 - e_dbg("MDI Error\n"); 2886 - return -E1000_ERR_PHY; 2887 - } 2888 - *phy_data = (u16) mdic; 2889 } else { 2890 /* We must first send a preamble through the MDIO pin to signal the 2891 * beginning of an MII instruction. This is done by sending 32 ··· 2985 { 2986 u32 i; 2987 u32 mdic = 0; 2988 - const u32 phy_addr = 1; 2989 2990 e_dbg("e1000_write_phy_reg_ex"); 2991 ··· 2995 } 2996 2997 if (hw->mac_type > e1000_82543) { 2998 - /* Set up Op-code, Phy Address, register address, and data intended 2999 - * for the PHY register in the MDI Control register. The MAC will take 3000 - * care of interfacing with the PHY to send the desired data. 3001 */ 3002 - mdic = (((u32) phy_data) | 3003 - (reg_addr << E1000_MDIC_REG_SHIFT) | 3004 - (phy_addr << E1000_MDIC_PHY_SHIFT) | 3005 - (E1000_MDIC_OP_WRITE)); 3006 3007 - ew32(MDIC, mdic); 3008 3009 - /* Poll the ready bit to see if the MDI read completed */ 3010 - for (i = 0; i < 641; i++) { 3011 - udelay(5); 3012 - mdic = er32(MDIC); 3013 - if (mdic & E1000_MDIC_READY) 3014 - break; 3015 - } 3016 - if (!(mdic & E1000_MDIC_READY)) { 3017 - e_dbg("MDI Write did not complete\n"); 3018 - return -E1000_ERR_PHY; 3019 } 3020 } else { 3021 /* We'll need to use the SW defined pins to shift the write command ··· 3218 case e1000_82546: 3219 case e1000_82546_rev_3: 3220 if (hw->phy_id == M88E1011_I_PHY_ID) 3221 match = true; 3222 break; 3223 case e1000_82541: ··· 3468 3469 if (hw->phy_type == e1000_phy_igp) 3470 return e1000_phy_igp_get_info(hw, phy_info); 3471 else 3472 return e1000_phy_m88_get_info(hw, phy_info); 3473 } ··· 3922 3923 e_dbg("e1000_read_eeprom"); 3924 3925 /* If eeprom is not yet detected, do so now */ 3926 if (eeprom->word_size == 0) 3927 e1000_init_eeprom_params(hw); ··· 4089 s32 status = 0; 4090 4091 e_dbg("e1000_write_eeprom"); 4092 4093 /* If eeprom is not yet detected, do so now */ 4094 if (eeprom->word_size == 0)
··· 130 if (hw->mac_type == e1000_82541 || 131 hw->mac_type == e1000_82541_rev_2 || 132 hw->mac_type == e1000_82547 || 133 + hw->mac_type == e1000_82547_rev_2) 134 hw->phy_type = e1000_phy_igp; 135 + break; 136 + case RTL8211B_PHY_ID: 137 + hw->phy_type = e1000_phy_8211; 138 + break; 139 + case RTL8201N_PHY_ID: 140 + hw->phy_type = e1000_phy_8201; 141 + break; 142 default: 143 /* Should never have loaded on this device */ 144 hw->phy_type = e1000_phy_undefined; ··· 318 case E1000_DEV_ID_82547GI: 319 hw->mac_type = e1000_82547_rev_2; 320 break; 321 + case E1000_DEV_ID_INTEL_CE4100_GBE: 322 + hw->mac_type = e1000_ce4100; 323 + break; 324 default: 325 /* Should never have loaded on this device */ 326 return -E1000_ERR_MAC_TYPE; ··· 371 case e1000_82542_rev2_0: 372 case e1000_82542_rev2_1: 373 hw->media_type = e1000_media_type_fiber; 374 + break; 375 + case e1000_ce4100: 376 + hw->media_type = e1000_media_type_copper; 377 break; 378 default: 379 status = er32(STATUS); ··· 460 /* Reset is performed on a shadow of the control register */ 461 ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); 462 break; 463 + case e1000_ce4100: 464 default: 465 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 466 break; ··· 952 } 953 954 /** 955 + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. 956 + * @hw: Struct containing variables accessed by shared code 957 + * 958 + * Commits changes to PHY configuration by calling e1000_phy_reset(). 959 + */ 960 + static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) 961 + { 962 + s32 ret_val; 963 + 964 + /* SW reset the PHY so all changes take effect */ 965 + ret_val = e1000_phy_reset(hw); 966 + if (ret_val) { 967 + e_dbg("Error Resetting the PHY\n"); 968 + return ret_val; 969 + } 970 + 971 + return E1000_SUCCESS; 972 + } 973 + 974 + static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) 975 + { 976 + s32 ret_val; 977 + u32 ctrl_aux; 978 + 979 + switch (hw->phy_type) { 980 + case e1000_phy_8211: 981 + ret_val = e1000_copper_link_rtl_setup(hw); 982 + if (ret_val) { 983 + e_dbg("e1000_copper_link_rtl_setup failed!\n"); 984 + return ret_val; 985 + } 986 + break; 987 + case e1000_phy_8201: 988 + /* Set RMII mode */ 989 + ctrl_aux = er32(CTL_AUX); 990 + ctrl_aux |= E1000_CTL_AUX_RMII; 991 + ew32(CTL_AUX, ctrl_aux); 992 + E1000_WRITE_FLUSH(); 993 + 994 + /* Disable the J/K bits required for receive */ 995 + ctrl_aux = er32(CTL_AUX); 996 + ctrl_aux |= 0x4; 997 + ctrl_aux &= ~0x2; 998 + ew32(CTL_AUX, ctrl_aux); 999 + E1000_WRITE_FLUSH(); 1000 + ret_val = e1000_copper_link_rtl_setup(hw); 1001 + 1002 + if (ret_val) { 1003 + e_dbg("e1000_copper_link_rtl_setup failed!\n"); 1004 + return ret_val; 1005 + } 1006 + break; 1007 + default: 1008 + e_dbg("Error Resetting the PHY\n"); 1009 + return E1000_ERR_PHY_TYPE; 1010 + } 1011 + 1012 + return E1000_SUCCESS; 1013 + } 1014 + 1015 + /** 1016 * e1000_copper_link_preconfig - early configuration for copper 1017 * @hw: Struct containing variables accessed by shared code 1018 * ··· 1286 if (hw->autoneg_advertised == 0) 1287 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1288 1289 + /* IFE/RTL8201N PHY only supports 10/100 */ 1290 + if (hw->phy_type == e1000_phy_8201) 1291 + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; 1292 + 1293 e_dbg("Reconfiguring auto-neg advertisement params\n"); 1294 ret_val = e1000_phy_setup_autoneg(hw); 1295 if (ret_val) { ··· 1341 s32 ret_val; 1342 e_dbg("e1000_copper_link_postconfig"); 1343 1344 + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { 1345 e1000_config_collision_dist(hw); 1346 } else { 1347 ret_val = e1000_config_mac_to_phy(hw); ··· 1395 ret_val = e1000_copper_link_mgp_setup(hw); 1396 if (ret_val) 1397 return ret_val; 1398 + } else { 1399 + ret_val = gbe_dhg_phy_setup(hw); 1400 + if (ret_val) { 1401 + e_dbg("gbe_dhg_phy_setup failed!\n"); 1402 + return ret_val; 1403 + } 1404 } 1405 1406 if (hw->autoneg) { ··· 1461 return ret_val; 1462 1463 /* Read the MII 1000Base-T Control Register (Address 9). */ 1464 + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1465 if (ret_val) 1466 return ret_val; 1467 + else if (hw->phy_type == e1000_phy_8201) 1468 + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; 1469 1470 /* Need to parse both autoneg_advertised and fc and set up 1471 * the appropriate PHY registers. First we will parse for ··· 1577 1578 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1579 1580 + if (hw->phy_type == e1000_phy_8201) { 1581 + mii_1000t_ctrl_reg = 0; 1582 + } else { 1583 + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, 1584 + mii_1000t_ctrl_reg); 1585 + if (ret_val) 1586 + return ret_val; 1587 + } 1588 1589 return E1000_SUCCESS; 1590 } ··· 1860 1861 /* 82544 or newer MAC, Auto Speed Detection takes care of 1862 * MAC speed/duplex configuration.*/ 1863 + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) 1864 return E1000_SUCCESS; 1865 1866 /* Read the Device Control Register and set the bits to Force Speed ··· 1870 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1871 ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); 1872 1873 + switch (hw->phy_type) { 1874 + case e1000_phy_8201: 1875 + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 1876 + if (ret_val) 1877 + return ret_val; 1878 1879 + if (phy_data & RTL_PHY_CTRL_FD) 1880 + ctrl |= E1000_CTRL_FD; 1881 + else 1882 + ctrl &= ~E1000_CTRL_FD; 1883 1884 + if (phy_data & RTL_PHY_CTRL_SPD_100) 1885 + ctrl |= E1000_CTRL_SPD_100; 1886 + else 1887 + ctrl |= E1000_CTRL_SPD_10; 1888 1889 + e1000_config_collision_dist(hw); 1890 + break; 1891 + default: 1892 + /* Set up duplex in the Device Control and Transmit Control 1893 + * registers depending on negotiated values. 1894 + */ 1895 + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 1896 + &phy_data); 1897 + if (ret_val) 1898 + return ret_val; 1899 + 1900 + if (phy_data & M88E1000_PSSR_DPLX) 1901 + ctrl |= E1000_CTRL_FD; 1902 + else 1903 + ctrl &= ~E1000_CTRL_FD; 1904 + 1905 + e1000_config_collision_dist(hw); 1906 + 1907 + /* Set up speed in the Device Control register depending on 1908 + * negotiated values. 1909 + */ 1910 + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 1911 + ctrl |= E1000_CTRL_SPD_1000; 1912 + else if ((phy_data & M88E1000_PSSR_SPEED) == 1913 + M88E1000_PSSR_100MBS) 1914 + ctrl |= E1000_CTRL_SPD_100; 1915 + } 1916 1917 /* Write the configured values back to the Device Control Reg. */ 1918 ew32(CTRL, ctrl); ··· 2401 * speed/duplex on the MAC to the current PHY speed/duplex 2402 * settings. 2403 */ 2404 + if ((hw->mac_type >= e1000_82544) && 2405 + (hw->mac_type != e1000_ce4100)) 2406 e1000_config_collision_dist(hw); 2407 else { 2408 ret_val = e1000_config_mac_to_phy(hw); ··· 2738 { 2739 u32 i; 2740 u32 mdic = 0; 2741 + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2742 2743 e_dbg("e1000_read_phy_reg_ex"); 2744 ··· 2752 * Control register. The MAC will take care of interfacing with the 2753 * PHY to retrieve the desired data. 2754 */ 2755 + if (hw->mac_type == e1000_ce4100) { 2756 + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | 2757 + (phy_addr << E1000_MDIC_PHY_SHIFT) | 2758 + (INTEL_CE_GBE_MDIC_OP_READ) | 2759 + (INTEL_CE_GBE_MDIC_GO)); 2760 2761 + writel(mdic, E1000_MDIO_CMD); 2762 2763 + /* Poll the ready bit to see if the MDI read 2764 + * completed 2765 + */ 2766 + for (i = 0; i < 64; i++) { 2767 + udelay(50); 2768 + mdic = readl(E1000_MDIO_CMD); 2769 + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) 2770 + break; 2771 + } 2772 + 2773 + if (mdic & INTEL_CE_GBE_MDIC_GO) { 2774 + e_dbg("MDI Read did not complete\n"); 2775 + return -E1000_ERR_PHY; 2776 + } 2777 + 2778 + mdic = readl(E1000_MDIO_STS); 2779 + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { 2780 + e_dbg("MDI Read Error\n"); 2781 + return -E1000_ERR_PHY; 2782 + } 2783 + *phy_data = (u16) mdic; 2784 + } else { 2785 + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | 2786 + (phy_addr << E1000_MDIC_PHY_SHIFT) | 2787 + (E1000_MDIC_OP_READ)); 2788 + 2789 + ew32(MDIC, mdic); 2790 + 2791 + /* Poll the ready bit to see if the MDI read 2792 + * completed 2793 + */ 2794 + for (i = 0; i < 64; i++) { 2795 + udelay(50); 2796 + mdic = er32(MDIC); 2797 + if (mdic & E1000_MDIC_READY) 2798 + break; 2799 + } 2800 + if (!(mdic & E1000_MDIC_READY)) { 2801 + e_dbg("MDI Read did not complete\n"); 2802 + return -E1000_ERR_PHY; 2803 + } 2804 + if (mdic & E1000_MDIC_ERROR) { 2805 + e_dbg("MDI Error\n"); 2806 + return -E1000_ERR_PHY; 2807 + } 2808 + *phy_data = (u16) mdic; 2809 } 2810 } else { 2811 /* We must first send a preamble through the MDIO pin to signal the 2812 * beginning of an MII instruction. This is done by sending 32 ··· 2840 { 2841 u32 i; 2842 u32 mdic = 0; 2843 + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; 2844 2845 e_dbg("e1000_write_phy_reg_ex"); 2846 ··· 2850 } 2851 2852 if (hw->mac_type > e1000_82543) { 2853 + /* Set up Op-code, Phy Address, register address, and data 2854 + * intended for the PHY register in the MDI Control register. 2855 + * The MAC will take care of interfacing with the PHY to send 2856 + * the desired data. 2857 */ 2858 + if (hw->mac_type == e1000_ce4100) { 2859 + mdic = (((u32) phy_data) | 2860 + (reg_addr << E1000_MDIC_REG_SHIFT) | 2861 + (phy_addr << E1000_MDIC_PHY_SHIFT) | 2862 + (INTEL_CE_GBE_MDIC_OP_WRITE) | 2863 + (INTEL_CE_GBE_MDIC_GO)); 2864 2865 + writel(mdic, E1000_MDIO_CMD); 2866 2867 + /* Poll the ready bit to see if the MDI read 2868 + * completed 2869 + */ 2870 + for (i = 0; i < 640; i++) { 2871 + udelay(5); 2872 + mdic = readl(E1000_MDIO_CMD); 2873 + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) 2874 + break; 2875 + } 2876 + if (mdic & INTEL_CE_GBE_MDIC_GO) { 2877 + e_dbg("MDI Write did not complete\n"); 2878 + return -E1000_ERR_PHY; 2879 + } 2880 + } else { 2881 + mdic = (((u32) phy_data) | 2882 + (reg_addr << E1000_MDIC_REG_SHIFT) | 2883 + (phy_addr << E1000_MDIC_PHY_SHIFT) | 2884 + (E1000_MDIC_OP_WRITE)); 2885 + 2886 + ew32(MDIC, mdic); 2887 + 2888 + /* Poll the ready bit to see if the MDI read 2889 + * completed 2890 + */ 2891 + for (i = 0; i < 641; i++) { 2892 + udelay(5); 2893 + mdic = er32(MDIC); 2894 + if (mdic & E1000_MDIC_READY) 2895 + break; 2896 + } 2897 + if (!(mdic & E1000_MDIC_READY)) { 2898 + e_dbg("MDI Write did not complete\n"); 2899 + return -E1000_ERR_PHY; 2900 + } 2901 } 2902 } else { 2903 /* We'll need to use the SW defined pins to shift the write command ··· 3046 case e1000_82546: 3047 case e1000_82546_rev_3: 3048 if (hw->phy_id == M88E1011_I_PHY_ID) 3049 + match = true; 3050 + break; 3051 + case e1000_ce4100: 3052 + if ((hw->phy_id == RTL8211B_PHY_ID) || 3053 + (hw->phy_id == RTL8201N_PHY_ID)) 3054 match = true; 3055 break; 3056 case e1000_82541: ··· 3291 3292 if (hw->phy_type == e1000_phy_igp) 3293 return e1000_phy_igp_get_info(hw, phy_info); 3294 + else if ((hw->phy_type == e1000_phy_8211) || 3295 + (hw->phy_type == e1000_phy_8201)) 3296 + return E1000_SUCCESS; 3297 else 3298 return e1000_phy_m88_get_info(hw, phy_info); 3299 } ··· 3742 3743 e_dbg("e1000_read_eeprom"); 3744 3745 + if (hw->mac_type == e1000_ce4100) { 3746 + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, 3747 + data); 3748 + return E1000_SUCCESS; 3749 + } 3750 + 3751 /* If eeprom is not yet detected, do so now */ 3752 if (eeprom->word_size == 0) 3753 e1000_init_eeprom_params(hw); ··· 3903 s32 status = 0; 3904 3905 e_dbg("e1000_write_eeprom"); 3906 + 3907 + if (hw->mac_type == e1000_ce4100) { 3908 + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, 3909 + data); 3910 + return E1000_SUCCESS; 3911 + } 3912 3913 /* If eeprom is not yet detected, do so now */ 3914 if (eeprom->word_size == 0)
+56 -3
drivers/net/e1000/e1000_hw.h
··· 52 e1000_82545, 53 e1000_82545_rev_3, 54 e1000_82546, 55 e1000_82546_rev_3, 56 e1000_82541, 57 e1000_82541_rev_2, ··· 210 } e1000_1000t_rx_status; 211 212 typedef enum { 213 - e1000_phy_m88 = 0, 214 - e1000_phy_igp, 215 - e1000_phy_undefined = 0xFF 216 } e1000_phy_type; 217 218 typedef enum { ··· 445 #define E1000_DEV_ID_82547EI 0x1019 446 #define E1000_DEV_ID_82547EI_MOBILE 0x101A 447 #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 448 449 #define NODE_ADDRESS_SIZE 6 450 #define ETH_LENGTH_OF_ADDRESS 6 ··· 812 #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ 813 #define E1000_FLA 0x0001C /* Flash Access - RW */ 814 #define E1000_MDIC 0x00020 /* MDI Control - RW */ 815 #define E1000_SCTL 0x00024 /* SerDes Control - RW */ 816 #define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ 817 #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ ··· 834 #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ 835 #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 836 #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ 837 #define E1000_RCTL 0x00100 /* RX Control - RW */ 838 #define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ 839 #define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ ··· 1053 * in more current versions of the 8254x. Despite the difference in location, 1054 * the registers function in the same manner. 1055 */ 1056 #define E1000_82542_CTRL E1000_CTRL 1057 #define E1000_82542_CTRL_DUP E1000_CTRL_DUP 1058 #define E1000_82542_STATUS E1000_STATUS ··· 1613 #define E1000_MDIC_READY 0x10000000 1614 #define E1000_MDIC_INT_EN 0x20000000 1615 #define E1000_MDIC_ERROR 0x40000000 1616 1617 #define E1000_KUMCTRLSTA_MASK 0x0000FFFF 1618 #define E1000_KUMCTRLSTA_OFFSET 0x001F0000 ··· 2918 #define M88E1011_I_REV_4 0x04 2919 #define M88E1111_I_PHY_ID 0x01410CC0 2920 #define L1LXT971A_PHY_ID 0x001378E0 2921 2922 /* Bits... 2923 * 15-5: page
··· 52 e1000_82545, 53 e1000_82545_rev_3, 54 e1000_82546, 55 + e1000_ce4100, 56 e1000_82546_rev_3, 57 e1000_82541, 58 e1000_82541_rev_2, ··· 209 } e1000_1000t_rx_status; 210 211 typedef enum { 212 + e1000_phy_m88 = 0, 213 + e1000_phy_igp, 214 + e1000_phy_8211, 215 + e1000_phy_8201, 216 + e1000_phy_undefined = 0xFF 217 } e1000_phy_type; 218 219 typedef enum { ··· 442 #define E1000_DEV_ID_82547EI 0x1019 443 #define E1000_DEV_ID_82547EI_MOBILE 0x101A 444 #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 445 + #define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E 446 447 #define NODE_ADDRESS_SIZE 6 448 #define ETH_LENGTH_OF_ADDRESS 6 ··· 808 #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ 809 #define E1000_FLA 0x0001C /* Flash Access - RW */ 810 #define E1000_MDIC 0x00020 /* MDI Control - RW */ 811 + 812 + extern void __iomem *ce4100_gbe_mdio_base_virt; 813 + #define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt) 814 + #define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) 815 + #define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) 816 + #define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) 817 + #define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) 818 + #define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) 819 + #define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) 820 + 821 #define E1000_SCTL 0x00024 /* SerDes Control - RW */ 822 #define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ 823 #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ ··· 820 #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ 821 #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 822 #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ 823 + 824 + /* Auxiliary Control Register. This register is CE4100 specific, 825 + * RMII/RGMII function is switched by this register - RW 826 + * Following are bits definitions of the Auxiliary Control Register 827 + */ 828 + #define E1000_CTL_AUX 0x000E0 829 + #define E1000_CTL_AUX_END_SEL_SHIFT 10 830 + #define E1000_CTL_AUX_ENDIANESS_SHIFT 8 831 + #define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 832 + 833 + /* descriptor and packet transfer use CTL_AUX.ENDIANESS */ 834 + #define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) 835 + /* descriptor use CTL_AUX.ENDIANESS, packet use default */ 836 + #define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) 837 + /* descriptor use default, packet use CTL_AUX.ENDIANESS */ 838 + #define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) 839 + /* all use CTL_AUX.ENDIANESS */ 840 + #define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) 841 + 842 + #define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) 843 + #define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) 844 + 845 + /* LW little endian, Byte big endian */ 846 + #define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) 847 + #define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) 848 + #define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) 849 + #define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) 850 + 851 #define E1000_RCTL 0x00100 /* RX Control - RW */ 852 #define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ 853 #define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ ··· 1011 * in more current versions of the 8254x. Despite the difference in location, 1012 * the registers function in the same manner. 1013 */ 1014 + #define E1000_82542_CTL_AUX E1000_CTL_AUX 1015 #define E1000_82542_CTRL E1000_CTRL 1016 #define E1000_82542_CTRL_DUP E1000_CTRL_DUP 1017 #define E1000_82542_STATUS E1000_STATUS ··· 1570 #define E1000_MDIC_READY 0x10000000 1571 #define E1000_MDIC_INT_EN 0x20000000 1572 #define E1000_MDIC_ERROR 0x40000000 1573 + 1574 + #define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 1575 + #define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 1576 + #define INTEL_CE_GBE_MDIC_GO 0x80000000 1577 + #define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 1578 1579 #define E1000_KUMCTRLSTA_MASK 0x0000FFFF 1580 #define E1000_KUMCTRLSTA_OFFSET 0x001F0000 ··· 2870 #define M88E1011_I_REV_4 0x04 2871 #define M88E1111_I_PHY_ID 0x01410CC0 2872 #define L1LXT971A_PHY_ID 0x001378E0 2873 + 2874 + #define RTL8211B_PHY_ID 0x001CC910 2875 + #define RTL8201N_PHY_ID 0x8200 2876 + #define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ 2877 + #define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ 2878 2879 /* Bits... 2880 * 15-5: page
+35
drivers/net/e1000/e1000_main.c
··· 28 29 #include "e1000.h" 30 #include <net/ip6_checksum.h> 31 32 char e1000_driver_name[] = "e1000"; 33 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; ··· 85 INTEL_E1000_ETHERNET_DEVICE(0x108A), 86 INTEL_E1000_ETHERNET_DEVICE(0x1099), 87 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 88 /* required last entry */ 89 {0,} 90 }; ··· 466 case e1000_82545: 467 case e1000_82545_rev_3: 468 case e1000_82546: 469 case e1000_82546_rev_3: 470 case e1000_82541: 471 case e1000_82541_rev_2: ··· 581 case e1000_82545: 582 case e1000_82545_rev_3: 583 case e1000_82546: 584 case e1000_82546_rev_3: 585 pba = E1000_PBA_48K; 586 break; ··· 903 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 904 int i, err, pci_using_dac; 905 u16 eeprom_data = 0; 906 u16 eeprom_apme_mask = E1000_EEPROM_APME; 907 int bars, need_ioport; 908 ··· 1006 goto err_sw_init; 1007 1008 err = -EIO; 1009 1010 if (hw->mac_type >= e1000_82543) { 1011 netdev->features = NETIF_F_SG | ··· 1153 adapter->wol = adapter->eeprom_wol; 1154 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1155 1156 /* reset the hardware with the new settings */ 1157 e1000_reset(adapter); 1158 ··· 1203 kfree(adapter->rx_ring); 1204 err_dma: 1205 err_sw_init: 1206 iounmap(hw->hw_addr); 1207 err_ioremap: 1208 free_netdev(netdev); ··· 1443 /* First rev 82545 and 82546 need to not allow any memory 1444 * write location to cross 64k boundary due to errata 23 */ 1445 if (hw->mac_type == e1000_82545 || 1446 hw->mac_type == e1000_82546) { 1447 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; 1448 }
··· 28 29 #include "e1000.h" 30 #include <net/ip6_checksum.h> 31 + #include <linux/io.h> 32 + 33 + /* Intel Media SOC GbE MDIO physical base address */ 34 + static unsigned long ce4100_gbe_mdio_base_phy; 35 + /* Intel Media SOC GbE MDIO virtual base address */ 36 + void __iomem *ce4100_gbe_mdio_base_virt; 37 38 char e1000_driver_name[] = "e1000"; 39 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; ··· 79 INTEL_E1000_ETHERNET_DEVICE(0x108A), 80 INTEL_E1000_ETHERNET_DEVICE(0x1099), 81 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 82 + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), 83 /* required last entry */ 84 {0,} 85 }; ··· 459 case e1000_82545: 460 case e1000_82545_rev_3: 461 case e1000_82546: 462 + case e1000_ce4100: 463 case e1000_82546_rev_3: 464 case e1000_82541: 465 case e1000_82541_rev_2: ··· 573 case e1000_82545: 574 case e1000_82545_rev_3: 575 case e1000_82546: 576 + case e1000_ce4100: 577 case e1000_82546_rev_3: 578 pba = E1000_PBA_48K; 579 break; ··· 894 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 895 int i, err, pci_using_dac; 896 u16 eeprom_data = 0; 897 + u16 tmp = 0; 898 u16 eeprom_apme_mask = E1000_EEPROM_APME; 899 int bars, need_ioport; 900 ··· 996 goto err_sw_init; 997 998 err = -EIO; 999 + if (hw->mac_type == e1000_ce4100) { 1000 + ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1); 1001 + ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy, 1002 + pci_resource_len(pdev, BAR_1)); 1003 + 1004 + if (!ce4100_gbe_mdio_base_virt) 1005 + goto err_mdio_ioremap; 1006 + } 1007 1008 if (hw->mac_type >= e1000_82543) { 1009 netdev->features = NETIF_F_SG | ··· 1135 adapter->wol = adapter->eeprom_wol; 1136 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1137 1138 + /* Auto detect PHY address */ 1139 + if (hw->mac_type == e1000_ce4100) { 1140 + for (i = 0; i < 32; i++) { 1141 + hw->phy_addr = i; 1142 + e1000_read_phy_reg(hw, PHY_ID2, &tmp); 1143 + if (tmp == 0 || tmp == 0xFF) { 1144 + if (i == 31) 1145 + goto err_eeprom; 1146 + continue; 1147 + } else 1148 + break; 1149 + } 1150 + } 1151 + 1152 /* reset the hardware with the new settings */ 1153 e1000_reset(adapter); 1154 ··· 1171 kfree(adapter->rx_ring); 1172 err_dma: 1173 err_sw_init: 1174 + err_mdio_ioremap: 1175 + iounmap(ce4100_gbe_mdio_base_virt); 1176 iounmap(hw->hw_addr); 1177 err_ioremap: 1178 free_netdev(netdev); ··· 1409 /* First rev 82545 and 82546 need to not allow any memory 1410 * write location to cross 64k boundary due to errata 23 */ 1411 if (hw->mac_type == e1000_82545 || 1412 + hw->mac_type == e1000_ce4100 || 1413 hw->mac_type == e1000_82546) { 1414 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; 1415 }
+14 -5
drivers/net/e1000/e1000_osdep.h
··· 34 #ifndef _E1000_OSDEP_H_ 35 #define _E1000_OSDEP_H_ 36 37 - #include <linux/types.h> 38 - #include <linux/pci.h> 39 - #include <linux/delay.h> 40 #include <asm/io.h> 41 - #include <linux/interrupt.h> 42 - #include <linux/sched.h> 43 44 #define er32(reg) \ 45 (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
··· 34 #ifndef _E1000_OSDEP_H_ 35 #define _E1000_OSDEP_H_ 36 37 #include <asm/io.h> 38 + 39 + #define CONFIG_RAM_BASE 0x60000 40 + #define GBE_CONFIG_OFFSET 0x0 41 + 42 + #define GBE_CONFIG_RAM_BASE \ 43 + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) 44 + 45 + #define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) 46 + 47 + #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ 48 + (iowrite16_rep(base + offset, data, count)) 49 + 50 + #define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ 51 + (ioread16_rep(base + (offset << 1), data, count)) 52 53 #define er32(reg) \ 54 (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
+70 -7
drivers/net/e1000e/82571.c
··· 78 static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); 79 static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); 80 static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); 81 82 /** 83 * e1000_init_phy_params_82571 - Init PHY func ptrs. ··· 115 phy->type = e1000_phy_bm; 116 phy->ops.acquire = e1000_get_hw_semaphore_82574; 117 phy->ops.release = e1000_put_hw_semaphore_82574; 118 break; 119 default: 120 return -E1000_ERR_PHY; ··· 125 126 /* This can only be done after all function pointers are setup. */ 127 ret_val = e1000_get_phy_id_82571(hw); 128 129 /* Verify phy id */ 130 switch (hw->mac.type) { 131 case e1000_82571: 132 case e1000_82572: 133 if (phy->id != IGP01E1000_I_PHY_ID) 134 - return -E1000_ERR_PHY; 135 break; 136 case e1000_82573: 137 if (phy->id != M88E1111_I_PHY_ID) 138 - return -E1000_ERR_PHY; 139 break; 140 case e1000_82574: 141 case e1000_82583: 142 if (phy->id != BME1000_E_PHY_ID_R2) 143 - return -E1000_ERR_PHY; 144 break; 145 default: 146 - return -E1000_ERR_PHY; 147 break; 148 } 149 150 - return 0; 151 } 152 153 /** ··· 660 } 661 662 /** 663 * e1000_acquire_nvm_82571 - Request for access to the EEPROM 664 * @hw: pointer to the HW structure 665 * ··· 1019 **/ 1020 static s32 e1000_reset_hw_82571(struct e1000_hw *hw) 1021 { 1022 - u32 ctrl, ctrl_ext, icr; 1023 s32 ret_val; 1024 1025 /* ··· 1103 1104 /* Clear any pending interrupt events. */ 1105 ew32(IMC, 0xffffffff); 1106 - icr = er32(ICR); 1107 1108 if (hw->mac.type == e1000_82571) { 1109 /* Install any alternate MAC address into RAR0 */
··· 78 static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); 79 static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); 80 static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); 81 + static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); 82 + static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); 83 84 /** 85 * e1000_init_phy_params_82571 - Init PHY func ptrs. ··· 113 phy->type = e1000_phy_bm; 114 phy->ops.acquire = e1000_get_hw_semaphore_82574; 115 phy->ops.release = e1000_put_hw_semaphore_82574; 116 + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; 117 + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; 118 break; 119 default: 120 return -E1000_ERR_PHY; ··· 121 122 /* This can only be done after all function pointers are setup. */ 123 ret_val = e1000_get_phy_id_82571(hw); 124 + if (ret_val) { 125 + e_dbg("Error getting PHY ID\n"); 126 + return ret_val; 127 + } 128 129 /* Verify phy id */ 130 switch (hw->mac.type) { 131 case e1000_82571: 132 case e1000_82572: 133 if (phy->id != IGP01E1000_I_PHY_ID) 134 + ret_val = -E1000_ERR_PHY; 135 break; 136 case e1000_82573: 137 if (phy->id != M88E1111_I_PHY_ID) 138 + ret_val = -E1000_ERR_PHY; 139 break; 140 case e1000_82574: 141 case e1000_82583: 142 if (phy->id != BME1000_E_PHY_ID_R2) 143 + ret_val = -E1000_ERR_PHY; 144 break; 145 default: 146 + ret_val = -E1000_ERR_PHY; 147 break; 148 } 149 150 + if (ret_val) 151 + e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); 152 + 153 + return ret_val; 154 } 155 156 /** ··· 649 } 650 651 /** 652 + * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state 653 + * @hw: pointer to the HW structure 654 + * @active: true to enable LPLU, false to disable 655 + * 656 + * Sets the LPLU D0 state according to the active flag. 657 + * LPLU will not be activated unless the 658 + * device autonegotiation advertisement meets standards of 659 + * either 10 or 10/100 or 10/100/1000 at all duplexes. 660 + * This is a function pointer entry point only called by 661 + * PHY setup routines. 662 + **/ 663 + static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) 664 + { 665 + u16 data = er32(POEMB); 666 + 667 + if (active) 668 + data |= E1000_PHY_CTRL_D0A_LPLU; 669 + else 670 + data &= ~E1000_PHY_CTRL_D0A_LPLU; 671 + 672 + ew32(POEMB, data); 673 + return 0; 674 + } 675 + 676 + /** 677 + * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 678 + * @hw: pointer to the HW structure 679 + * @active: boolean used to enable/disable lplu 680 + * 681 + * The low power link up (lplu) state is set to the power management level D3 682 + * when active is true, else clear lplu for D3. LPLU 683 + * is used during Dx states where the power conservation is most important. 684 + * During driver activity, SmartSpeed should be enabled so performance is 685 + * maintained. 686 + **/ 687 + static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) 688 + { 689 + u16 data = er32(POEMB); 690 + 691 + if (!active) { 692 + data &= ~E1000_PHY_CTRL_NOND0A_LPLU; 693 + } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 694 + (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || 695 + (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { 696 + data |= E1000_PHY_CTRL_NOND0A_LPLU; 697 + } 698 + 699 + ew32(POEMB, data); 700 + return 0; 701 + } 702 + 703 + /** 704 * e1000_acquire_nvm_82571 - Request for access to the EEPROM 705 * @hw: pointer to the HW structure 706 * ··· 956 **/ 957 static s32 e1000_reset_hw_82571(struct e1000_hw *hw) 958 { 959 + u32 ctrl, ctrl_ext; 960 s32 ret_val; 961 962 /* ··· 1040 1041 /* Clear any pending interrupt events. */ 1042 ew32(IMC, 0xffffffff); 1043 + er32(ICR); 1044 1045 if (hw->mac.type == e1000_82571) { 1046 /* Install any alternate MAC address into RAR0 */
+3
drivers/net/e1000e/e1000.h
··· 38 #include <linux/netdevice.h> 39 #include <linux/pci.h> 40 #include <linux/pci-aspm.h> 41 42 #include "hw.h" 43 ··· 497 extern void e1000e_update_stats(struct e1000_adapter *adapter); 498 extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 499 extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 500 extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); 501 502 extern unsigned int copybreak;
··· 38 #include <linux/netdevice.h> 39 #include <linux/pci.h> 40 #include <linux/pci-aspm.h> 41 + #include <linux/crc32.h> 42 43 #include "hw.h" 44 ··· 496 extern void e1000e_update_stats(struct e1000_adapter *adapter); 497 extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 498 extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 499 + extern void e1000e_get_hw_control(struct e1000_adapter *adapter); 500 + extern void e1000e_release_hw_control(struct e1000_adapter *adapter); 501 extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); 502 503 extern unsigned int copybreak;
+2 -2
drivers/net/e1000e/es2lan.c
··· 784 **/ 785 static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) 786 { 787 - u32 ctrl, icr; 788 s32 ret_val; 789 790 /* ··· 818 819 /* Clear any pending interrupt events. */ 820 ew32(IMC, 0xffffffff); 821 - icr = er32(ICR); 822 823 ret_val = e1000_check_alt_mac_addr_generic(hw); 824
··· 784 **/ 785 static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) 786 { 787 + u32 ctrl; 788 s32 ret_val; 789 790 /* ··· 818 819 /* Clear any pending interrupt events. */ 820 ew32(IMC, 0xffffffff); 821 + er32(ICR); 822 823 ret_val = e1000_check_alt_mac_addr_generic(hw); 824
+34 -20
drivers/net/e1000e/ethtool.c
··· 624 struct e1000_adapter *adapter = netdev_priv(netdev); 625 char firmware_version[32]; 626 627 - strncpy(drvinfo->driver, e1000e_driver_name, 32); 628 - strncpy(drvinfo->version, e1000e_driver_version, 32); 629 630 /* 631 * EEPROM image version # is reported as firmware version # for 632 * PCI-E controllers 633 */ 634 - sprintf(firmware_version, "%d.%d-%d", 635 (adapter->eeprom_vers & 0xF000) >> 12, 636 (adapter->eeprom_vers & 0x0FF0) >> 4, 637 (adapter->eeprom_vers & 0x000F)); 638 639 - strncpy(drvinfo->fw_version, firmware_version, 32); 640 - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 641 drvinfo->regdump_len = e1000_get_regs_len(netdev); 642 drvinfo->eedump_len = e1000_get_eeprom_len(netdev); 643 } ··· 1708 bool if_running = netif_running(netdev); 1709 1710 set_bit(__E1000_TESTING, &adapter->state); 1711 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1712 /* Offline tests */ 1713 ··· 1734 if (if_running) 1735 /* indicate we're in test mode */ 1736 dev_close(netdev); 1737 - else 1738 - e1000e_reset(adapter); 1739 1740 if (e1000_reg_test(adapter, &data[0])) 1741 eth_test->flags |= ETH_TEST_FL_FAILED; ··· 1747 eth_test->flags |= ETH_TEST_FL_FAILED; 1748 1749 e1000e_reset(adapter); 1750 - /* make sure the phy is powered up */ 1751 - e1000e_power_up_phy(adapter); 1752 if (e1000_loopback_test(adapter, &data[3])) 1753 eth_test->flags |= ETH_TEST_FL_FAILED; 1754 ··· 1768 if (if_running) 1769 dev_open(netdev); 1770 } else { 1771 - if (!if_running && (adapter->flags & FLAG_HAS_AMT)) { 1772 - clear_bit(__E1000_TESTING, &adapter->state); 1773 - dev_open(netdev); 1774 - set_bit(__E1000_TESTING, &adapter->state); 1775 - } 1776 1777 e_info("online testing starting\n"); 1778 - /* Online tests */ 1779 - if (e1000_link_test(adapter, &data[4])) 1780 - eth_test->flags |= ETH_TEST_FL_FAILED; 1781 1782 - /* Online tests aren't run; pass by default */ 1783 data[0] = 0; 1784 data[1] = 0; 1785 data[2] = 0; 1786 data[3] = 0; 1787 1788 - if (!if_running && (adapter->flags & FLAG_HAS_AMT)) 1789 - dev_close(netdev); 1790 1791 clear_bit(__E1000_TESTING, &adapter->state); 1792 } 1793 msleep_interruptible(4 * 1000); 1794 } 1795
··· 624 struct e1000_adapter *adapter = netdev_priv(netdev); 625 char firmware_version[32]; 626 627 + strncpy(drvinfo->driver, e1000e_driver_name, 628 + sizeof(drvinfo->driver) - 1); 629 + strncpy(drvinfo->version, e1000e_driver_version, 630 + sizeof(drvinfo->version) - 1); 631 632 /* 633 * EEPROM image version # is reported as firmware version # for 634 * PCI-E controllers 635 */ 636 + snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", 637 (adapter->eeprom_vers & 0xF000) >> 12, 638 (adapter->eeprom_vers & 0x0FF0) >> 4, 639 (adapter->eeprom_vers & 0x000F)); 640 641 + strncpy(drvinfo->fw_version, firmware_version, 642 + sizeof(drvinfo->fw_version) - 1); 643 + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 644 + sizeof(drvinfo->bus_info) - 1); 645 drvinfo->regdump_len = e1000_get_regs_len(netdev); 646 drvinfo->eedump_len = e1000_get_eeprom_len(netdev); 647 } ··· 1704 bool if_running = netif_running(netdev); 1705 1706 set_bit(__E1000_TESTING, &adapter->state); 1707 + 1708 + if (!if_running) { 1709 + /* Get control of and reset hardware */ 1710 + if (adapter->flags & FLAG_HAS_AMT) 1711 + e1000e_get_hw_control(adapter); 1712 + 1713 + e1000e_power_up_phy(adapter); 1714 + 1715 + adapter->hw.phy.autoneg_wait_to_complete = 1; 1716 + e1000e_reset(adapter); 1717 + adapter->hw.phy.autoneg_wait_to_complete = 0; 1718 + } 1719 + 1720 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1721 /* Offline tests */ 1722 ··· 1717 if (if_running) 1718 /* indicate we're in test mode */ 1719 dev_close(netdev); 1720 1721 if (e1000_reg_test(adapter, &data[0])) 1722 eth_test->flags |= ETH_TEST_FL_FAILED; ··· 1732 eth_test->flags |= ETH_TEST_FL_FAILED; 1733 1734 e1000e_reset(adapter); 1735 if (e1000_loopback_test(adapter, &data[3])) 1736 eth_test->flags |= ETH_TEST_FL_FAILED; 1737 ··· 1755 if (if_running) 1756 dev_open(netdev); 1757 } else { 1758 + /* Online tests */ 1759 1760 e_info("online testing starting\n"); 1761 1762 + /* register, eeprom, intr and loopback tests not run online */ 1763 data[0] = 0; 1764 data[1] = 0; 1765 data[2] = 0; 1766 data[3] = 0; 1767 1768 + if (e1000_link_test(adapter, &data[4])) 1769 + eth_test->flags |= ETH_TEST_FL_FAILED; 1770 1771 clear_bit(__E1000_TESTING, &adapter->state); 1772 } 1773 + 1774 + if (!if_running) { 1775 + e1000e_reset(adapter); 1776 + 1777 + if (adapter->flags & FLAG_HAS_AMT) 1778 + e1000e_release_hw_control(adapter); 1779 + } 1780 + 1781 msleep_interruptible(4 * 1000); 1782 } 1783
+1
drivers/net/e1000e/hw.h
··· 83 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ 84 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ 85 E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ 86 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ 87 E1000_PBS = 0x01008, /* Packet Buffer Size */ 88 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
··· 83 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ 84 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ 85 E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ 86 + #define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ 87 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ 88 E1000_PBS = 0x01008, /* Packet Buffer Size */ 89 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
+28 -49
drivers/net/e1000e/ich8lan.c
··· 1395 } 1396 } 1397 1398 - static u32 e1000_calc_rx_da_crc(u8 mac[]) 1399 - { 1400 - u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */ 1401 - u32 i, j, mask, crc; 1402 - 1403 - crc = 0xffffffff; 1404 - for (i = 0; i < 6; i++) { 1405 - crc = crc ^ mac[i]; 1406 - for (j = 8; j > 0; j--) { 1407 - mask = (crc & 1) * (-1); 1408 - crc = (crc >> 1) ^ (poly & mask); 1409 - } 1410 - } 1411 - return ~crc; 1412 - } 1413 - 1414 /** 1415 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation 1416 * with 82579 PHY ··· 1437 mac_addr[4] = (addr_high & 0xFF); 1438 mac_addr[5] = ((addr_high >> 8) & 0xFF); 1439 1440 - ew32(PCH_RAICC(i), 1441 - e1000_calc_rx_da_crc(mac_addr)); 1442 } 1443 1444 /* Write Rx addresses to the PHY */ ··· 2960 { 2961 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 2962 u16 reg; 2963 - u32 ctrl, icr, kab; 2964 s32 ret_val; 2965 2966 /* ··· 3050 ew32(CRC_OFFSET, 0x65656565); 3051 3052 ew32(IMC, 0xffffffff); 3053 - icr = er32(ICR); 3054 3055 kab = er32(KABGTXD); 3056 kab |= E1000_KABGTXD_BGSQLBIAS; ··· 3101 * Reset the phy after disabling host wakeup to reset the Rx buffer. 3102 */ 3103 if (hw->phy.type == e1000_phy_82578) { 3104 - hw->phy.ops.read_reg(hw, BM_WUC, &i); 3105 ret_val = e1000_phy_hw_reset_ich8lan(hw); 3106 if (ret_val) 3107 return ret_val; ··· 3259 (hw->phy.type == e1000_phy_82577)) { 3260 ew32(FCRTV_PCH, hw->fc.refresh_time); 3261 3262 - ret_val = hw->phy.ops.write_reg(hw, 3263 - PHY_REG(BM_PORT_CTRL_PAGE, 27), 3264 - hw->fc.pause_time); 3265 if (ret_val) 3266 return ret_val; 3267 } ··· 3324 return ret_val; 3325 break; 3326 case e1000_phy_ife: 3327 - ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, 3328 - &reg_data); 3329 if (ret_val) 3330 return ret_val; 3331 ··· 3342 reg_data |= IFE_PMC_AUTO_MDIX; 3343 break; 3344 } 3345 - ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, 3346 - reg_data); 3347 if (ret_val) 3348 return ret_val; 3349 break; ··· 3626 { 3627 if (hw->phy.type == e1000_phy_ife) 3628 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 3629 - (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); 3630 3631 ew32(LEDCTL, hw->mac.ledctl_mode1); 3632 return 0; ··· 3641 **/ 3642 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) 3643 { 3644 - return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 3645 - (u16)hw->mac.ledctl_mode1); 3646 } 3647 3648 /** ··· 3652 **/ 3653 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) 3654 { 3655 - return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 3656 - (u16)hw->mac.ledctl_default); 3657 } 3658 3659 /** ··· 3683 } 3684 } 3685 3686 - return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 3687 } 3688 3689 /** ··· 3714 } 3715 } 3716 3717 - return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 3718 } 3719 3720 /** ··· 3823 if ((hw->phy.type == e1000_phy_82578) || 3824 (hw->phy.type == e1000_phy_82579) || 3825 (hw->phy.type == e1000_phy_82577)) { 3826 - hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); 3827 - hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); 3828 - hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data); 3829 - hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data); 3830 - hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data); 3831 - hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data); 3832 - hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data); 3833 - hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data); 3834 - hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data); 3835 - hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data); 3836 - hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data); 3837 - hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data); 3838 - hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data); 3839 - hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data); 3840 } 3841 } 3842
··· 1395 } 1396 } 1397 1398 /** 1399 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation 1400 * with 82579 PHY ··· 1453 mac_addr[4] = (addr_high & 0xFF); 1454 mac_addr[5] = ((addr_high >> 8) & 0xFF); 1455 1456 + ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); 1457 } 1458 1459 /* Write Rx addresses to the PHY */ ··· 2977 { 2978 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 2979 u16 reg; 2980 + u32 ctrl, kab; 2981 s32 ret_val; 2982 2983 /* ··· 3067 ew32(CRC_OFFSET, 0x65656565); 3068 3069 ew32(IMC, 0xffffffff); 3070 + er32(ICR); 3071 3072 kab = er32(KABGTXD); 3073 kab |= E1000_KABGTXD_BGSQLBIAS; ··· 3118 * Reset the phy after disabling host wakeup to reset the Rx buffer. 3119 */ 3120 if (hw->phy.type == e1000_phy_82578) { 3121 + e1e_rphy(hw, BM_WUC, &i); 3122 ret_val = e1000_phy_hw_reset_ich8lan(hw); 3123 if (ret_val) 3124 return ret_val; ··· 3276 (hw->phy.type == e1000_phy_82577)) { 3277 ew32(FCRTV_PCH, hw->fc.refresh_time); 3278 3279 + ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), 3280 + hw->fc.pause_time); 3281 if (ret_val) 3282 return ret_val; 3283 } ··· 3342 return ret_val; 3343 break; 3344 case e1000_phy_ife: 3345 + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data); 3346 if (ret_val) 3347 return ret_val; 3348 ··· 3361 reg_data |= IFE_PMC_AUTO_MDIX; 3362 break; 3363 } 3364 + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); 3365 if (ret_val) 3366 return ret_val; 3367 break; ··· 3646 { 3647 if (hw->phy.type == e1000_phy_ife) 3648 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 3649 + (IFE_PSCL_PROBE_MODE | 3650 + IFE_PSCL_PROBE_LEDS_OFF)); 3651 3652 ew32(LEDCTL, hw->mac.ledctl_mode1); 3653 return 0; ··· 3660 **/ 3661 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) 3662 { 3663 + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); 3664 } 3665 3666 /** ··· 3672 **/ 3673 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) 3674 { 3675 + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); 3676 } 3677 3678 /** ··· 3704 } 3705 } 3706 3707 + return e1e_wphy(hw, HV_LED_CONFIG, data); 3708 } 3709 3710 /** ··· 3735 } 3736 } 3737 3738 + return e1e_wphy(hw, HV_LED_CONFIG, data); 3739 } 3740 3741 /** ··· 3844 if ((hw->phy.type == e1000_phy_82578) || 3845 (hw->phy.type == e1000_phy_82579) || 3846 (hw->phy.type == e1000_phy_82577)) { 3847 + e1e_rphy(hw, HV_SCC_UPPER, &phy_data); 3848 + e1e_rphy(hw, HV_SCC_LOWER, &phy_data); 3849 + e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); 3850 + e1e_rphy(hw, HV_ECOL_LOWER, &phy_data); 3851 + e1e_rphy(hw, HV_MCC_UPPER, &phy_data); 3852 + e1e_rphy(hw, HV_MCC_LOWER, &phy_data); 3853 + e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); 3854 + e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data); 3855 + e1e_rphy(hw, HV_COLC_UPPER, &phy_data); 3856 + e1e_rphy(hw, HV_COLC_LOWER, &phy_data); 3857 + e1e_rphy(hw, HV_DC_UPPER, &phy_data); 3858 + e1e_rphy(hw, HV_DC_LOWER, &phy_data); 3859 + e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); 3860 + e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data); 3861 } 3862 } 3863
+2 -1
drivers/net/e1000e/lib.c
··· 1135 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); 1136 if (ret_val) 1137 return ret_val; 1138 - ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); 1139 if (ret_val) 1140 return ret_val; 1141
··· 1135 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); 1136 if (ret_val) 1137 return ret_val; 1138 + ret_val = 1139 + e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); 1140 if (ret_val) 1141 return ret_val; 1142
+31 -22
drivers/net/e1000e/netdev.c
··· 1980 } 1981 1982 /** 1983 - * e1000_get_hw_control - get control of the h/w from f/w 1984 * @adapter: address of board private structure 1985 * 1986 - * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. 1987 * For ASF and Pass Through versions of f/w this means that 1988 * the driver is loaded. For AMT version (only with 82573) 1989 * of the f/w this means that the network i/f is open. 1990 **/ 1991 - static void e1000_get_hw_control(struct e1000_adapter *adapter) 1992 { 1993 struct e1000_hw *hw = &adapter->hw; 1994 u32 ctrl_ext; ··· 2005 } 2006 2007 /** 2008 - * e1000_release_hw_control - release control of the h/w to f/w 2009 * @adapter: address of board private structure 2010 * 2011 - * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2012 * For ASF and Pass Through versions of f/w this means that the 2013 * driver is no longer loaded. For AMT version (only with 82573) i 2014 * of the f/w this means that the network i/f is closed. 2015 * 2016 **/ 2017 - static void e1000_release_hw_control(struct e1000_adapter *adapter) 2018 { 2019 struct e1000_hw *hw = &adapter->hw; 2020 u32 ctrl_ext; ··· 2445 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2446 (vid == adapter->mng_vlan_id)) { 2447 /* release control to f/w */ 2448 - e1000_release_hw_control(adapter); 2449 return; 2450 } 2451 ··· 2734 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2735 else 2736 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2737 } 2738 2739 /* Program MC offset vector base */ ··· 3187 ew32(PBA, pba); 3188 } 3189 3190 - 3191 /* 3192 * flow control settings 3193 * ··· 3274 * that the network interface is in control 3275 */ 3276 if (adapter->flags & FLAG_HAS_AMT) 3277 - e1000_get_hw_control(adapter); 3278 3279 ew32(WUC, 0); 3280 ··· 3287 ew32(VET, ETH_P_8021Q); 3288 3289 e1000e_reset_adaptive(hw); 3290 e1000_get_phy_info(hw); 3291 3292 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && ··· 3579 * interface is now open and reset the part to a known state. 3580 */ 3581 if (adapter->flags & FLAG_HAS_AMT) { 3582 - e1000_get_hw_control(adapter); 3583 e1000e_reset(adapter); 3584 } 3585 ··· 3643 return 0; 3644 3645 err_req_irq: 3646 - e1000_release_hw_control(adapter); 3647 e1000_power_down_phy(adapter); 3648 e1000e_free_rx_resources(adapter); 3649 err_setup_rx: ··· 3698 * If AMT is enabled, let the firmware know that the network 3699 * interface is now closed 3700 */ 3701 - if (adapter->flags & FLAG_HAS_AMT) 3702 - e1000_release_hw_control(adapter); 3703 3704 if ((adapter->flags & FLAG_HAS_ERT) || 3705 (adapter->hw.mac.type == e1000_pch2lan)) ··· 5219 * Release control of h/w to f/w. If f/w is AMT enabled, this 5220 * would have already happened in close and is redundant. 5221 */ 5222 - e1000_release_hw_control(adapter); 5223 5224 pci_disable_device(pdev); 5225 ··· 5376 * under the control of the driver. 5377 */ 5378 if (!(adapter->flags & FLAG_HAS_AMT)) 5379 - e1000_get_hw_control(adapter); 5380 5381 return 0; 5382 } ··· 5623 * under the control of the driver. 5624 */ 5625 if (!(adapter->flags & FLAG_HAS_AMT)) 5626 - e1000_get_hw_control(adapter); 5627 5628 } 5629 ··· 5646 ret_val = e1000_read_pba_string_generic(hw, pba_str, 5647 E1000_PBANUM_LENGTH); 5648 if (ret_val) 5649 - strcpy(pba_str, "Unknown"); 5650 e_info("MAC: %d, PHY: %d, PBA No: %s\n", 5651 hw->mac.type, hw->phy.type, pba_str); 5652 } ··· 5973 * under the control of the driver. 5974 */ 5975 if (!(adapter->flags & FLAG_HAS_AMT)) 5976 - e1000_get_hw_control(adapter); 5977 5978 - strcpy(netdev->name, "eth%d"); 5979 err = register_netdev(netdev); 5980 if (err) 5981 goto err_register; ··· 5992 5993 err_register: 5994 if (!(adapter->flags & FLAG_HAS_AMT)) 5995 - e1000_release_hw_control(adapter); 5996 err_eeprom: 5997 if (!e1000_check_reset_block(&adapter->hw)) 5998 e1000_phy_hw_reset(&adapter->hw); 5999 err_hw_init: 6000 - 6001 kfree(adapter->tx_ring); 6002 kfree(adapter->rx_ring); 6003 err_sw_init: ··· 6062 * Release control of h/w to f/w. If f/w is AMT enabled, this 6063 * would have already happened in close and is redundant. 6064 */ 6065 - e1000_release_hw_control(adapter); 6066 6067 e1000e_reset_interrupt_capability(adapter); 6068 kfree(adapter->tx_ring);
··· 1980 } 1981 1982 /** 1983 + * e1000e_get_hw_control - get control of the h/w from f/w 1984 * @adapter: address of board private structure 1985 * 1986 + * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. 1987 * For ASF and Pass Through versions of f/w this means that 1988 * the driver is loaded. For AMT version (only with 82573) 1989 * of the f/w this means that the network i/f is open. 1990 **/ 1991 + void e1000e_get_hw_control(struct e1000_adapter *adapter) 1992 { 1993 struct e1000_hw *hw = &adapter->hw; 1994 u32 ctrl_ext; ··· 2005 } 2006 2007 /** 2008 + * e1000e_release_hw_control - release control of the h/w to f/w 2009 * @adapter: address of board private structure 2010 * 2011 + * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2012 * For ASF and Pass Through versions of f/w this means that the 2013 * driver is no longer loaded. For AMT version (only with 82573) i 2014 * of the f/w this means that the network i/f is closed. 2015 * 2016 **/ 2017 + void e1000e_release_hw_control(struct e1000_adapter *adapter) 2018 { 2019 struct e1000_hw *hw = &adapter->hw; 2020 u32 ctrl_ext; ··· 2445 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2446 (vid == adapter->mng_vlan_id)) { 2447 /* release control to f/w */ 2448 + e1000e_release_hw_control(adapter); 2449 return; 2450 } 2451 ··· 2734 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2735 else 2736 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2737 + 2738 + if (ret_val) 2739 + e_dbg("failed to enable jumbo frame workaround mode\n"); 2740 } 2741 2742 /* Program MC offset vector base */ ··· 3184 ew32(PBA, pba); 3185 } 3186 3187 /* 3188 * flow control settings 3189 * ··· 3272 * that the network interface is in control 3273 */ 3274 if (adapter->flags & FLAG_HAS_AMT) 3275 + e1000e_get_hw_control(adapter); 3276 3277 ew32(WUC, 0); 3278 ··· 3285 ew32(VET, ETH_P_8021Q); 3286 3287 e1000e_reset_adaptive(hw); 3288 + 3289 + if (!netif_running(adapter->netdev) && 3290 + !test_bit(__E1000_TESTING, &adapter->state)) { 3291 + e1000_power_down_phy(adapter); 3292 + return; 3293 + } 3294 + 3295 e1000_get_phy_info(hw); 3296 3297 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && ··· 3570 * interface is now open and reset the part to a known state. 3571 */ 3572 if (adapter->flags & FLAG_HAS_AMT) { 3573 + e1000e_get_hw_control(adapter); 3574 e1000e_reset(adapter); 3575 } 3576 ··· 3634 return 0; 3635 3636 err_req_irq: 3637 + e1000e_release_hw_control(adapter); 3638 e1000_power_down_phy(adapter); 3639 e1000e_free_rx_resources(adapter); 3640 err_setup_rx: ··· 3689 * If AMT is enabled, let the firmware know that the network 3690 * interface is now closed 3691 */ 3692 + if ((adapter->flags & FLAG_HAS_AMT) && 3693 + !test_bit(__E1000_TESTING, &adapter->state)) 3694 + e1000e_release_hw_control(adapter); 3695 3696 if ((adapter->flags & FLAG_HAS_ERT) || 3697 (adapter->hw.mac.type == e1000_pch2lan)) ··· 5209 * Release control of h/w to f/w. If f/w is AMT enabled, this 5210 * would have already happened in close and is redundant. 5211 */ 5212 + e1000e_release_hw_control(adapter); 5213 5214 pci_disable_device(pdev); 5215 ··· 5366 * under the control of the driver. 5367 */ 5368 if (!(adapter->flags & FLAG_HAS_AMT)) 5369 + e1000e_get_hw_control(adapter); 5370 5371 return 0; 5372 } ··· 5613 * under the control of the driver. 5614 */ 5615 if (!(adapter->flags & FLAG_HAS_AMT)) 5616 + e1000e_get_hw_control(adapter); 5617 5618 } 5619 ··· 5636 ret_val = e1000_read_pba_string_generic(hw, pba_str, 5637 E1000_PBANUM_LENGTH); 5638 if (ret_val) 5639 + strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); 5640 e_info("MAC: %d, PHY: %d, PBA No: %s\n", 5641 hw->mac.type, hw->phy.type, pba_str); 5642 } ··· 5963 * under the control of the driver. 5964 */ 5965 if (!(adapter->flags & FLAG_HAS_AMT)) 5966 + e1000e_get_hw_control(adapter); 5967 5968 + strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1); 5969 err = register_netdev(netdev); 5970 if (err) 5971 goto err_register; ··· 5982 5983 err_register: 5984 if (!(adapter->flags & FLAG_HAS_AMT)) 5985 + e1000e_release_hw_control(adapter); 5986 err_eeprom: 5987 if (!e1000_check_reset_block(&adapter->hw)) 5988 e1000_phy_hw_reset(&adapter->hw); 5989 err_hw_init: 5990 kfree(adapter->tx_ring); 5991 kfree(adapter->rx_ring); 5992 err_sw_init: ··· 6053 * Release control of h/w to f/w. If f/w is AMT enabled, this 6054 * would have already happened in close and is redundant. 6055 */ 6056 + e1000e_release_hw_control(adapter); 6057 6058 e1000e_reset_interrupt_capability(adapter); 6059 kfree(adapter->tx_ring);
+17 -23
drivers/net/e1000e/phy.c
··· 637 **/ 638 s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) 639 { 640 - struct e1000_phy_info *phy = &hw->phy; 641 s32 ret_val; 642 u16 phy_data; 643 644 /* Enable CRS on TX. This must be set for half-duplex operation. */ 645 - ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data); 646 if (ret_val) 647 goto out; 648 ··· 650 /* Enable downshift */ 651 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; 652 653 - ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data); 654 655 out: 656 return ret_val; ··· 773 } 774 775 if (phy->type == e1000_phy_82578) { 776 - ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 777 - &phy_data); 778 if (ret_val) 779 return ret_val; 780 781 /* 82578 PHY - set the downshift count to 1x. */ 782 phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; 783 phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; 784 - ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 785 - phy_data); 786 if (ret_val) 787 return ret_val; 788 } ··· 1316 * We didn't get link. 1317 * Reset the DSP and cross our fingers. 1318 */ 1319 - ret_val = e1e_wphy(hw, 1320 - M88E1000_PHY_PAGE_SELECT, 1321 - 0x001d); 1322 if (ret_val) 1323 return ret_val; 1324 ret_val = e1000e_phy_reset_dsp(hw); ··· 3067 goto out; 3068 3069 /* Do not apply workaround if in PHY loopback bit 14 set */ 3070 - hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); 3071 if (data & PHY_CONTROL_LB) 3072 goto out; 3073 3074 /* check if link is up and at 1Gbps */ 3075 - ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); 3076 if (ret_val) 3077 goto out; 3078 ··· 3088 mdelay(200); 3089 3090 /* flush the packets in the fifo buffer */ 3091 - ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, 3092 - HV_MUX_DATA_CTRL_GEN_TO_MAC | 3093 - HV_MUX_DATA_CTRL_FORCE_SPEED); 3094 if (ret_val) 3095 goto out; 3096 3097 - ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, 3098 - HV_MUX_DATA_CTRL_GEN_TO_MAC); 3099 3100 out: 3101 return ret_val; ··· 3113 s32 ret_val; 3114 u16 data; 3115 3116 - ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); 3117 3118 if (!ret_val) 3119 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) ··· 3136 u16 phy_data; 3137 bool link; 3138 3139 - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); 3140 if (ret_val) 3141 goto out; 3142 3143 e1000e_phy_force_speed_duplex_setup(hw, &phy_data); 3144 3145 - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); 3146 if (ret_val) 3147 goto out; 3148 ··· 3206 if (ret_val) 3207 goto out; 3208 3209 - ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); 3210 if (ret_val) 3211 goto out; 3212 ··· 3218 if (ret_val) 3219 goto out; 3220 3221 - ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); 3222 if (ret_val) 3223 goto out; 3224 ··· 3252 s32 ret_val; 3253 u16 phy_data, length; 3254 3255 - ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); 3256 if (ret_val) 3257 goto out; 3258
··· 637 **/ 638 s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) 639 { 640 s32 ret_val; 641 u16 phy_data; 642 643 /* Enable CRS on TX. This must be set for half-duplex operation. */ 644 + ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); 645 if (ret_val) 646 goto out; 647 ··· 651 /* Enable downshift */ 652 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; 653 654 + ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); 655 656 out: 657 return ret_val; ··· 774 } 775 776 if (phy->type == e1000_phy_82578) { 777 + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 778 if (ret_val) 779 return ret_val; 780 781 /* 82578 PHY - set the downshift count to 1x. */ 782 phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; 783 phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; 784 + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); 785 if (ret_val) 786 return ret_val; 787 } ··· 1319 * We didn't get link. 1320 * Reset the DSP and cross our fingers. 1321 */ 1322 + ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 1323 + 0x001d); 1324 if (ret_val) 1325 return ret_val; 1326 ret_val = e1000e_phy_reset_dsp(hw); ··· 3071 goto out; 3072 3073 /* Do not apply workaround if in PHY loopback bit 14 set */ 3074 + e1e_rphy(hw, PHY_CONTROL, &data); 3075 if (data & PHY_CONTROL_LB) 3076 goto out; 3077 3078 /* check if link is up and at 1Gbps */ 3079 + ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); 3080 if (ret_val) 3081 goto out; 3082 ··· 3092 mdelay(200); 3093 3094 /* flush the packets in the fifo buffer */ 3095 + ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC | 3096 + HV_MUX_DATA_CTRL_FORCE_SPEED); 3097 if (ret_val) 3098 goto out; 3099 3100 + ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); 3101 3102 out: 3103 return ret_val; ··· 3119 s32 ret_val; 3120 u16 data; 3121 3122 + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); 3123 3124 if (!ret_val) 3125 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) ··· 3142 u16 phy_data; 3143 bool link; 3144 3145 + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); 3146 if (ret_val) 3147 goto out; 3148 3149 e1000e_phy_force_speed_duplex_setup(hw, &phy_data); 3150 3151 + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); 3152 if (ret_val) 3153 goto out; 3154 ··· 3212 if (ret_val) 3213 goto out; 3214 3215 + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); 3216 if (ret_val) 3217 goto out; 3218 ··· 3224 if (ret_val) 3225 goto out; 3226 3227 + ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); 3228 if (ret_val) 3229 goto out; 3230 ··· 3258 s32 ret_val; 3259 u16 phy_data, length; 3260 3261 + ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); 3262 if (ret_val) 3263 goto out; 3264
+1 -1
drivers/net/ehea/ehea.h
··· 40 #include <asm/io.h> 41 42 #define DRV_NAME "ehea" 43 - #define DRV_VERSION "EHEA_0106" 44 45 /* eHEA capability flags */ 46 #define DLPAR_PORT_ADD_REM 1
··· 40 #include <asm/io.h> 41 42 #define DRV_NAME "ehea" 43 + #define DRV_VERSION "EHEA_0107" 44 45 /* eHEA capability flags */ 46 #define DLPAR_PORT_ADD_REM 1
+2 -4
drivers/net/ehea/ehea_main.c
··· 437 } 438 } 439 /* Ring doorbell */ 440 - ehea_update_rq1a(pr->qp, i); 441 } 442 443 static int ehea_refill_rq_def(struct ehea_port_res *pr, ··· 1329 int ret; 1330 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; 1331 1332 - ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 1333 - - init_attr->act_nr_rwqes_rq2 1334 - - init_attr->act_nr_rwqes_rq3 - 1); 1335 1336 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); 1337
··· 437 } 438 } 439 /* Ring doorbell */ 440 + ehea_update_rq1a(pr->qp, i - 1); 441 } 442 443 static int ehea_refill_rq_def(struct ehea_port_res *pr, ··· 1329 int ret; 1330 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; 1331 1332 + ehea_init_fill_rq1(pr, pr->rq1_skba.len); 1333 1334 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); 1335
+179 -67
drivers/net/fec.c
··· 17 * 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 19 * Copyright (c) 2004-2006 Macq Electronique SA. 20 */ 21 22 #include <linux/module.h> ··· 47 48 #include <asm/cacheflush.h> 49 50 - #ifndef CONFIG_ARCH_MXC 51 #include <asm/coldfire.h> 52 #include <asm/mcfsim.h> 53 #endif 54 55 #include "fec.h" 56 57 - #ifdef CONFIG_ARCH_MXC 58 - #include <mach/hardware.h> 59 #define FEC_ALIGNMENT 0xf 60 #else 61 #define FEC_ALIGNMENT 0x3 62 #endif 63 64 - /* 65 - * Define the fixed address of the FEC hardware. 66 - */ 67 - #if defined(CONFIG_M5272) 68 69 - static unsigned char fec_mac_default[] = { 70 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 71 }; 72 73 /* 74 * Some hardware gets it MAC address out of local flash memory. 75 * if this is non-zero then assume it is the address to get MAC from. ··· 147 * account when setting it. 148 */ 149 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 150 - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) 151 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 152 #else 153 #define OPT_FRAME_SIZE 0 ··· 201 int mii_timeout; 202 uint phy_speed; 203 phy_interface_t phy_interface; 204 - int index; 205 int link; 206 int full_duplex; 207 struct completion mdio_done; ··· 227 /* Transmitter timeout */ 228 #define TX_TIMEOUT (2 * HZ) 229 230 static netdev_tx_t 231 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 232 { 233 struct fec_enet_private *fep = netdev_priv(dev); 234 struct bufdesc *bdp; 235 void *bufaddr; 236 unsigned short status; ··· 287 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); 288 bufaddr = fep->tx_bounce[index]; 289 } 290 291 /* Save skb pointer */ 292 fep->tx_skbuff[fep->skb_cur] = skb; ··· 464 fec_enet_rx(struct net_device *dev) 465 { 466 struct fec_enet_private *fep = netdev_priv(dev); 467 struct bufdesc *bdp; 468 unsigned short status; 469 struct sk_buff *skb; ··· 529 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, 530 DMA_FROM_DEVICE); 531 532 /* This does 16 byte alignment, exactly what we need. 533 * The packet length includes FCS, but we don't want to 534 * include that when passing upstream as it messes up ··· 578 } 579 580 /* ------------------------------------------------------------------------- */ 581 - #ifdef CONFIG_M5272 582 static void __inline__ fec_get_mac(struct net_device *dev) 583 { 584 struct fec_enet_private *fep = netdev_priv(dev); 585 unsigned char *iap, tmpaddr[ETH_ALEN]; 586 587 - if (FEC_FLASHMAC) { 588 - /* 589 - * Get MAC address from FLASH. 590 - * If it is all 1's or 0's, use the default. 591 - */ 592 - iap = (unsigned char *)FEC_FLASHMAC; 593 - if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 594 - (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 595 - iap = fec_mac_default; 596 - if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 597 - (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 598 - iap = fec_mac_default; 599 - } else { 600 - *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); 601 - *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 602 iap = &tmpaddr[0]; 603 } 604 605 memcpy(dev->dev_addr, iap, ETH_ALEN); 606 607 - /* Adjust MAC if using default MAC address */ 608 - if (iap == fec_mac_default) 609 - dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 610 } 611 - #endif 612 613 /* ------------------------------------------------------------------------- */ 614 ··· 704 fep->mii_timeout = 0; 705 init_completion(&fep->mdio_done); 706 707 - /* start a read op */ 708 - writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 709 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 710 FEC_MMFR_TA | FEC_MMFR_DATA(value), 711 fep->hwp + FEC_MII_DATA); ··· 734 char mdio_bus_id[MII_BUS_ID_SIZE]; 735 char phy_name[MII_BUS_ID_SIZE + 3]; 736 int phy_id; 737 738 fep->phy_dev = NULL; 739 ··· 745 if (fep->mii_bus->phy_map[phy_id] == NULL) 746 continue; 747 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) 748 continue; 749 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 750 break; ··· 785 786 static int fec_enet_mii_init(struct platform_device *pdev) 787 { 788 struct net_device *dev = platform_get_drvdata(pdev); 789 struct fec_enet_private *fep = netdev_priv(dev); 790 int err = -ENXIO, i; 791 792 fep->mii_timeout = 0; 793 ··· 849 850 if (mdiobus_register(fep->mii_bus)) 851 goto err_out_free_mdio_irq; 852 853 return 0; 854 ··· 1152 /* 1153 * XXX: We need to clean up on failure exits here. 1154 * 1155 - * index is only used in legacy code 1156 */ 1157 - static int fec_enet_init(struct net_device *dev, int index) 1158 { 1159 struct fec_enet_private *fep = netdev_priv(dev); 1160 struct bufdesc *cbd_base; ··· 1170 1171 spin_lock_init(&fep->hw_lock); 1172 1173 - fep->index = index; 1174 fep->hwp = (void __iomem *)dev->base_addr; 1175 fep->netdev = dev; 1176 1177 - /* Set the Ethernet address */ 1178 - #ifdef CONFIG_M5272 1179 fec_get_mac(dev); 1180 - #else 1181 - { 1182 - unsigned long l; 1183 - l = readl(fep->hwp + FEC_ADDR_LOW); 1184 - dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); 1185 - dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); 1186 - dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); 1187 - dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); 1188 - l = readl(fep->hwp + FEC_ADDR_HIGH); 1189 - dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); 1190 - dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); 1191 - } 1192 - #endif 1193 1194 /* Set receive and transmit descriptor base. */ 1195 fep->rx_bd_base = cbd_base; ··· 1225 fec_restart(struct net_device *dev, int duplex) 1226 { 1227 struct fec_enet_private *fep = netdev_priv(dev); 1228 int i; 1229 1230 /* Whack a reset. We should wait for this. */ 1231 writel(1, fep->hwp + FEC_ECNTRL); 1232 udelay(10); 1233 1234 /* Clear any outstanding interrupt. */ 1235 writel(0xffc00000, fep->hwp + FEC_IEVENT); ··· 1290 /* Set MII speed */ 1291 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1292 1293 #ifdef FEC_MIIGSK_ENR 1294 - if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { 1295 - /* disable the gasket and wait */ 1296 - writel(0, fep->hwp + FEC_MIIGSK_ENR); 1297 - while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1298 - udelay(1); 1299 1300 - /* configure the gasket: RMII, 50 MHz, no loopback, no echo */ 1301 - writel(1, fep->hwp + FEC_MIIGSK_CFGR); 1302 1303 - /* re-enable the gasket */ 1304 - writel(2, fep->hwp + FEC_MIIGSK_ENR); 1305 - } 1306 #endif 1307 1308 /* And last, enable the transmit and receive processing */ 1309 writel(2, fep->hwp + FEC_ECNTRL); ··· 1423 } 1424 clk_enable(fep->clk); 1425 1426 - ret = fec_enet_init(ndev, 0); 1427 if (ret) 1428 goto failed_init; 1429 ··· 1487 1488 if (ndev) { 1489 fep = netdev_priv(ndev); 1490 - if (netif_running(ndev)) 1491 - fec_enet_close(ndev); 1492 clk_disable(fep->clk); 1493 } 1494 return 0; ··· 1505 if (ndev) { 1506 fep = netdev_priv(ndev); 1507 clk_enable(fep->clk); 1508 - if (netif_running(ndev)) 1509 - fec_enet_open(ndev); 1510 } 1511 return 0; 1512 } ··· 1525 1526 static struct platform_driver fec_driver = { 1527 .driver = { 1528 - .name = "fec", 1529 .owner = THIS_MODULE, 1530 #ifdef CONFIG_PM 1531 .pm = &fec_pm_ops, 1532 #endif 1533 }, 1534 .probe = fec_probe, 1535 .remove = __devexit_p(fec_drv_remove), 1536 };
··· 17 * 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 19 * Copyright (c) 2004-2006 Macq Electronique SA. 20 + * 21 + * Copyright (C) 2010 Freescale Semiconductor, Inc. 22 */ 23 24 #include <linux/module.h> ··· 45 46 #include <asm/cacheflush.h> 47 48 + #ifndef CONFIG_ARM 49 #include <asm/coldfire.h> 50 #include <asm/mcfsim.h> 51 #endif 52 53 #include "fec.h" 54 55 + #if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 56 #define FEC_ALIGNMENT 0xf 57 #else 58 #define FEC_ALIGNMENT 0x3 59 #endif 60 61 + #define DRIVER_NAME "fec" 62 63 + /* Controller is ENET-MAC */ 64 + #define FEC_QUIRK_ENET_MAC (1 << 0) 65 + /* Controller needs driver to swap frame */ 66 + #define FEC_QUIRK_SWAP_FRAME (1 << 1) 67 + 68 + static struct platform_device_id fec_devtype[] = { 69 + { 70 + .name = DRIVER_NAME, 71 + .driver_data = 0, 72 + }, { 73 + .name = "imx28-fec", 74 + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, 75 + } 76 }; 77 78 + static unsigned char macaddr[ETH_ALEN]; 79 + module_param_array(macaddr, byte, NULL, 0); 80 + MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); 81 + 82 + #if defined(CONFIG_M5272) 83 /* 84 * Some hardware gets it MAC address out of local flash memory. 85 * if this is non-zero then assume it is the address to get MAC from. ··· 133 * account when setting it. 134 */ 135 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 136 + defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 137 + defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 138 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 139 #else 140 #define OPT_FRAME_SIZE 0 ··· 186 int mii_timeout; 187 uint phy_speed; 188 phy_interface_t phy_interface; 189 int link; 190 int full_duplex; 191 struct completion mdio_done; ··· 213 /* Transmitter timeout */ 214 #define TX_TIMEOUT (2 * HZ) 215 216 + static void *swap_buffer(void *bufaddr, int len) 217 + { 218 + int i; 219 + unsigned int *buf = bufaddr; 220 + 221 + for (i = 0; i < (len + 3) / 4; i++, buf++) 222 + *buf = cpu_to_be32(*buf); 223 + 224 + return bufaddr; 225 + } 226 + 227 static netdev_tx_t 228 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 229 { 230 struct fec_enet_private *fep = netdev_priv(dev); 231 + const struct platform_device_id *id_entry = 232 + platform_get_device_id(fep->pdev); 233 struct bufdesc *bdp; 234 void *bufaddr; 235 unsigned short status; ··· 260 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); 261 bufaddr = fep->tx_bounce[index]; 262 } 263 + 264 + /* 265 + * Some design made an incorrect assumption on endian mode of 266 + * the system that it's running on. As the result, driver has to 267 + * swap every frame going to and coming from the controller. 268 + */ 269 + if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 270 + swap_buffer(bufaddr, skb->len); 271 272 /* Save skb pointer */ 273 fep->tx_skbuff[fep->skb_cur] = skb; ··· 429 fec_enet_rx(struct net_device *dev) 430 { 431 struct fec_enet_private *fep = netdev_priv(dev); 432 + const struct platform_device_id *id_entry = 433 + platform_get_device_id(fep->pdev); 434 struct bufdesc *bdp; 435 unsigned short status; 436 struct sk_buff *skb; ··· 492 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, 493 DMA_FROM_DEVICE); 494 495 + if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 496 + swap_buffer(data, pkt_len); 497 + 498 /* This does 16 byte alignment, exactly what we need. 499 * The packet length includes FCS, but we don't want to 500 * include that when passing upstream as it messes up ··· 538 } 539 540 /* ------------------------------------------------------------------------- */ 541 static void __inline__ fec_get_mac(struct net_device *dev) 542 { 543 struct fec_enet_private *fep = netdev_priv(dev); 544 + struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 545 unsigned char *iap, tmpaddr[ETH_ALEN]; 546 547 + /* 548 + * try to get mac address in following order: 549 + * 550 + * 1) module parameter via kernel command line in form 551 + * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 552 + */ 553 + iap = macaddr; 554 + 555 + /* 556 + * 2) from flash or fuse (via platform data) 557 + */ 558 + if (!is_valid_ether_addr(iap)) { 559 + #ifdef CONFIG_M5272 560 + if (FEC_FLASHMAC) 561 + iap = (unsigned char *)FEC_FLASHMAC; 562 + #else 563 + if (pdata) 564 + memcpy(iap, pdata->mac, ETH_ALEN); 565 + #endif 566 + } 567 + 568 + /* 569 + * 3) FEC mac registers set by bootloader 570 + */ 571 + if (!is_valid_ether_addr(iap)) { 572 + *((unsigned long *) &tmpaddr[0]) = 573 + be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW)); 574 + *((unsigned short *) &tmpaddr[4]) = 575 + be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 576 iap = &tmpaddr[0]; 577 } 578 579 memcpy(dev->dev_addr, iap, ETH_ALEN); 580 581 + /* Adjust MAC if using macaddr */ 582 + if (iap == macaddr) 583 + dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; 584 } 585 586 /* ------------------------------------------------------------------------- */ 587 ··· 651 fep->mii_timeout = 0; 652 init_completion(&fep->mdio_done); 653 654 + /* start a write op */ 655 + writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | 656 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 657 FEC_MMFR_TA | FEC_MMFR_DATA(value), 658 fep->hwp + FEC_MII_DATA); ··· 681 char mdio_bus_id[MII_BUS_ID_SIZE]; 682 char phy_name[MII_BUS_ID_SIZE + 3]; 683 int phy_id; 684 + int dev_id = fep->pdev->id; 685 686 fep->phy_dev = NULL; 687 ··· 691 if (fep->mii_bus->phy_map[phy_id] == NULL) 692 continue; 693 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) 694 + continue; 695 + if (dev_id--) 696 continue; 697 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 698 break; ··· 729 730 static int fec_enet_mii_init(struct platform_device *pdev) 731 { 732 + static struct mii_bus *fec0_mii_bus; 733 struct net_device *dev = platform_get_drvdata(pdev); 734 struct fec_enet_private *fep = netdev_priv(dev); 735 + const struct platform_device_id *id_entry = 736 + platform_get_device_id(fep->pdev); 737 int err = -ENXIO, i; 738 + 739 + /* 740 + * The dual fec interfaces are not equivalent with enet-mac. 741 + * Here are the differences: 742 + * 743 + * - fec0 supports MII & RMII modes while fec1 only supports RMII 744 + * - fec0 acts as the 1588 time master while fec1 is slave 745 + * - external phys can only be configured by fec0 746 + * 747 + * That is to say fec1 can not work independently. It only works 748 + * when fec0 is working. The reason behind this design is that the 749 + * second interface is added primarily for Switch mode. 750 + * 751 + * Because of the last point above, both phys are attached on fec0 752 + * mdio interface in board design, and need to be configured by 753 + * fec0 mii_bus. 754 + */ 755 + if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) { 756 + /* fec1 uses fec0 mii_bus */ 757 + fep->mii_bus = fec0_mii_bus; 758 + return 0; 759 + } 760 761 fep->mii_timeout = 0; 762 ··· 768 769 if (mdiobus_register(fep->mii_bus)) 770 goto err_out_free_mdio_irq; 771 + 772 + /* save fec0 mii_bus */ 773 + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) 774 + fec0_mii_bus = fep->mii_bus; 775 776 return 0; 777 ··· 1067 /* 1068 * XXX: We need to clean up on failure exits here. 1069 * 1070 */ 1071 + static int fec_enet_init(struct net_device *dev) 1072 { 1073 struct fec_enet_private *fep = netdev_priv(dev); 1074 struct bufdesc *cbd_base; ··· 1086 1087 spin_lock_init(&fep->hw_lock); 1088 1089 fep->hwp = (void __iomem *)dev->base_addr; 1090 fep->netdev = dev; 1091 1092 + /* Get the Ethernet address */ 1093 fec_get_mac(dev); 1094 1095 /* Set receive and transmit descriptor base. */ 1096 fep->rx_bd_base = cbd_base; ··· 1156 fec_restart(struct net_device *dev, int duplex) 1157 { 1158 struct fec_enet_private *fep = netdev_priv(dev); 1159 + const struct platform_device_id *id_entry = 1160 + platform_get_device_id(fep->pdev); 1161 int i; 1162 + u32 val, temp_mac[2]; 1163 1164 /* Whack a reset. We should wait for this. */ 1165 writel(1, fep->hwp + FEC_ECNTRL); 1166 udelay(10); 1167 + 1168 + /* 1169 + * enet-mac reset will reset mac address registers too, 1170 + * so need to reconfigure it. 1171 + */ 1172 + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { 1173 + memcpy(&temp_mac, dev->dev_addr, ETH_ALEN); 1174 + writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); 1175 + writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); 1176 + } 1177 1178 /* Clear any outstanding interrupt. */ 1179 writel(0xffc00000, fep->hwp + FEC_IEVENT); ··· 1208 /* Set MII speed */ 1209 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1210 1211 + /* 1212 + * The phy interface and speed need to get configured 1213 + * differently on enet-mac. 1214 + */ 1215 + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { 1216 + val = readl(fep->hwp + FEC_R_CNTRL); 1217 + 1218 + /* MII or RMII */ 1219 + if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1220 + val |= (1 << 8); 1221 + else 1222 + val &= ~(1 << 8); 1223 + 1224 + /* 10M or 100M */ 1225 + if (fep->phy_dev && fep->phy_dev->speed == SPEED_100) 1226 + val &= ~(1 << 9); 1227 + else 1228 + val |= (1 << 9); 1229 + 1230 + writel(val, fep->hwp + FEC_R_CNTRL); 1231 + } else { 1232 #ifdef FEC_MIIGSK_ENR 1233 + if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { 1234 + /* disable the gasket and wait */ 1235 + writel(0, fep->hwp + FEC_MIIGSK_ENR); 1236 + while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1237 + udelay(1); 1238 1239 + /* 1240 + * configure the gasket: 1241 + * RMII, 50 MHz, no loopback, no echo 1242 + */ 1243 + writel(1, fep->hwp + FEC_MIIGSK_CFGR); 1244 1245 + /* re-enable the gasket */ 1246 + writel(2, fep->hwp + FEC_MIIGSK_ENR); 1247 + } 1248 #endif 1249 + } 1250 1251 /* And last, enable the transmit and receive processing */ 1252 writel(2, fep->hwp + FEC_ECNTRL); ··· 1316 } 1317 clk_enable(fep->clk); 1318 1319 + ret = fec_enet_init(ndev); 1320 if (ret) 1321 goto failed_init; 1322 ··· 1380 1381 if (ndev) { 1382 fep = netdev_priv(ndev); 1383 + if (netif_running(ndev)) { 1384 + fec_stop(ndev); 1385 + netif_device_detach(ndev); 1386 + } 1387 clk_disable(fep->clk); 1388 } 1389 return 0; ··· 1396 if (ndev) { 1397 fep = netdev_priv(ndev); 1398 clk_enable(fep->clk); 1399 + if (netif_running(ndev)) { 1400 + fec_restart(ndev, fep->full_duplex); 1401 + netif_device_attach(ndev); 1402 + } 1403 } 1404 return 0; 1405 } ··· 1414 1415 static struct platform_driver fec_driver = { 1416 .driver = { 1417 + .name = DRIVER_NAME, 1418 .owner = THIS_MODULE, 1419 #ifdef CONFIG_PM 1420 .pm = &fec_pm_ops, 1421 #endif 1422 }, 1423 + .id_table = fec_devtype, 1424 .probe = fec_probe, 1425 .remove = __devexit_p(fec_drv_remove), 1426 };
+3 -2
drivers/net/fec.h
··· 14 /****************************************************************************/ 15 16 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 17 - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) 18 /* 19 * Just figures, Motorola would have to change the offsets for 20 * registers in the same peripheral device on different models ··· 79 /* 80 * Define the buffer descriptor structure. 81 */ 82 - #ifdef CONFIG_ARCH_MXC 83 struct bufdesc { 84 unsigned short cbd_datlen; /* Data length */ 85 unsigned short cbd_sc; /* Control and status info */
··· 14 /****************************************************************************/ 15 16 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 17 + defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 18 + defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 19 /* 20 * Just figures, Motorola would have to change the offsets for 21 * registers in the same peripheral device on different models ··· 78 /* 79 * Define the buffer descriptor structure. 80 */ 81 + #if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 82 struct bufdesc { 83 unsigned short cbd_datlen; /* Data length */ 84 unsigned short cbd_sc; /* Control and status info */
+12 -22
drivers/net/forcedeth.c
··· 3949 writel(flags, base + NvRegWakeUpFlags); 3950 spin_unlock_irq(&np->lock); 3951 } 3952 return 0; 3953 } 3954 ··· 5489 /* set mac address */ 5490 nv_copy_mac_to_hw(dev); 5491 5492 - /* Workaround current PCI init glitch: wakeup bits aren't 5493 - * being set from PCI PM capability. 5494 - */ 5495 - device_init_wakeup(&pci_dev->dev, 1); 5496 - 5497 /* disable WOL */ 5498 writel(0, base + NvRegWakeUpFlags); 5499 np->wolenabled = 0; 5500 5501 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5502 ··· 5743 } 5744 5745 #ifdef CONFIG_PM 5746 - static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 5747 { 5748 struct net_device *dev = pci_get_drvdata(pdev); 5749 struct fe_priv *np = netdev_priv(dev); 5750 u8 __iomem *base = get_hwbase(dev); ··· 5761 for (i = 0; i <= np->register_size/sizeof(u32); i++) 5762 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 5763 5764 - pci_save_state(pdev); 5765 - pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); 5766 - pci_disable_device(pdev); 5767 - pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5768 return 0; 5769 } 5770 5771 - static int nv_resume(struct pci_dev *pdev) 5772 { 5773 struct net_device *dev = pci_get_drvdata(pdev); 5774 struct fe_priv *np = netdev_priv(dev); 5775 u8 __iomem *base = get_hwbase(dev); 5776 int i, rc = 0; 5777 - 5778 - pci_set_power_state(pdev, PCI_D0); 5779 - pci_restore_state(pdev); 5780 - /* ack any pending wake events, disable PME */ 5781 - pci_enable_wake(pdev, PCI_D0, 0); 5782 5783 /* restore non-pci configuration space */ 5784 for (i = 0; i <= np->register_size/sizeof(u32); i++) ··· 5789 } 5790 return rc; 5791 } 5792 5793 static void nv_shutdown(struct pci_dev *pdev) 5794 { ··· 5815 * only put the device into D3 if we really go for poweroff. 5816 */ 5817 if (system_state == SYSTEM_POWER_OFF) { 5818 - if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) 5819 - pci_enable_wake(pdev, PCI_D3hot, np->wolenabled); 5820 pci_set_power_state(pdev, PCI_D3hot); 5821 } 5822 } 5823 #else 5824 - #define nv_suspend NULL 5825 #define nv_shutdown NULL 5826 - #define nv_resume NULL 5827 #endif /* CONFIG_PM */ 5828 5829 static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { ··· 5993 .id_table = pci_tbl, 5994 .probe = nv_probe, 5995 .remove = __devexit_p(nv_remove), 5996 - .suspend = nv_suspend, 5997 - .resume = nv_resume, 5998 .shutdown = nv_shutdown, 5999 }; 6000 6001 static int __init init_nic(void)
··· 3949 writel(flags, base + NvRegWakeUpFlags); 3950 spin_unlock_irq(&np->lock); 3951 } 3952 + device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled); 3953 return 0; 3954 } 3955 ··· 5488 /* set mac address */ 5489 nv_copy_mac_to_hw(dev); 5490 5491 /* disable WOL */ 5492 writel(0, base + NvRegWakeUpFlags); 5493 np->wolenabled = 0; 5494 + device_set_wakeup_enable(&pci_dev->dev, false); 5495 5496 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5497 ··· 5746 } 5747 5748 #ifdef CONFIG_PM 5749 + static int nv_suspend(struct device *device) 5750 { 5751 + struct pci_dev *pdev = to_pci_dev(device); 5752 struct net_device *dev = pci_get_drvdata(pdev); 5753 struct fe_priv *np = netdev_priv(dev); 5754 u8 __iomem *base = get_hwbase(dev); ··· 5763 for (i = 0; i <= np->register_size/sizeof(u32); i++) 5764 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 5765 5766 return 0; 5767 } 5768 5769 + static int nv_resume(struct device *device) 5770 { 5771 + struct pci_dev *pdev = to_pci_dev(device); 5772 struct net_device *dev = pci_get_drvdata(pdev); 5773 struct fe_priv *np = netdev_priv(dev); 5774 u8 __iomem *base = get_hwbase(dev); 5775 int i, rc = 0; 5776 5777 /* restore non-pci configuration space */ 5778 for (i = 0; i <= np->register_size/sizeof(u32); i++) ··· 5799 } 5800 return rc; 5801 } 5802 + 5803 + static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume); 5804 + #define NV_PM_OPS (&nv_pm_ops) 5805 5806 static void nv_shutdown(struct pci_dev *pdev) 5807 { ··· 5822 * only put the device into D3 if we really go for poweroff. 5823 */ 5824 if (system_state == SYSTEM_POWER_OFF) { 5825 + pci_wake_from_d3(pdev, np->wolenabled); 5826 pci_set_power_state(pdev, PCI_D3hot); 5827 } 5828 } 5829 #else 5830 + #define NV_PM_OPS NULL 5831 #define nv_shutdown NULL 5832 #endif /* CONFIG_PM */ 5833 5834 static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { ··· 6002 .id_table = pci_tbl, 6003 .probe = nv_probe, 6004 .remove = __devexit_p(nv_remove), 6005 .shutdown = nv_shutdown, 6006 + .driver.pm = NV_PM_OPS, 6007 }; 6008 6009 static int __init init_nic(void)
+2 -2
drivers/net/hamradio/yam.c
··· 396 while (p) { 397 if (p->bitrate == bitrate) { 398 memcpy(p->bits, bits, YAM_FPGA_SIZE); 399 - return p->bits; 400 } 401 p = p->next; 402 } ··· 411 p->bitrate = bitrate; 412 p->next = yam_data; 413 yam_data = p; 414 - 415 release_firmware(fw); 416 return p->bits; 417 }
··· 396 while (p) { 397 if (p->bitrate == bitrate) { 398 memcpy(p->bits, bits, YAM_FPGA_SIZE); 399 + goto out; 400 } 401 p = p->next; 402 } ··· 411 p->bitrate = bitrate; 412 p->next = yam_data; 413 yam_data = p; 414 + out: 415 release_firmware(fw); 416 return p->bits; 417 }
+5 -16
drivers/net/ixgbe/ixgbe.h
··· 508 extern void ixgbe_free_tx_resources(struct ixgbe_ring *); 509 extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 510 extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 511 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 512 extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 513 extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); ··· 526 extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 527 extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 528 extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 529 - struct ixgbe_atr_input *input, 530 u8 queue); 531 extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 532 - struct ixgbe_atr_input *input, 533 struct ixgbe_atr_input_masks *input_masks, 534 u16 soft_id, u8 queue); 535 - extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, 536 - u16 vlan_id); 537 - extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, 538 - u32 src_addr); 539 - extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, 540 - u32 dst_addr); 541 - extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, 542 - u16 src_port); 543 - extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, 544 - u16 dst_port); 545 - extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, 546 - u16 flex_byte); 547 - extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, 548 - u8 l4type); 549 extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, 550 struct ixgbe_ring *ring); 551 extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
··· 508 extern void ixgbe_free_tx_resources(struct ixgbe_ring *); 509 extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 510 extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 511 + extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, 512 + struct ixgbe_ring *); 513 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 514 extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 515 extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); ··· 524 extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 525 extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 526 extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 527 + union ixgbe_atr_hash_dword input, 528 + union ixgbe_atr_hash_dword common, 529 u8 queue); 530 extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 531 + union ixgbe_atr_input *input, 532 struct ixgbe_atr_input_masks *input_masks, 533 u16 soft_id, u8 queue); 534 extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, 535 struct ixgbe_ring *ring); 536 extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+299 -460
drivers/net/ixgbe/ixgbe_82599.c
··· 1003 udelay(10); 1004 } 1005 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1006 - hw_dbg(hw ,"Flow Director previous command isn't complete, " 1007 "aborting table re-initialization.\n"); 1008 return IXGBE_ERR_FDIR_REINIT_FAILED; 1009 } ··· 1113 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1114 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1115 1116 - fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; 1117 1118 /* Prime the keys for hashing */ 1119 - IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1120 - htonl(IXGBE_ATR_BUCKET_HASH_KEY)); 1121 - IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, 1122 - htonl(IXGBE_ATR_SIGNATURE_HASH_KEY)); 1123 1124 /* 1125 * Poll init-done after we write the register. Estimated times: ··· 1206 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1207 1208 /* Prime the keys for hashing */ 1209 - IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1210 - htonl(IXGBE_ATR_BUCKET_HASH_KEY)); 1211 - IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, 1212 - htonl(IXGBE_ATR_SIGNATURE_HASH_KEY)); 1213 1214 /* 1215 * Poll init-done after we write the register. Estimated times: ··· 1246 * @stream: input bitstream to compute the hash on 1247 * @key: 32-bit hash key 1248 **/ 1249 - static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, 1250 - u32 key) 1251 { 1252 /* 1253 * The algorithm is as follows: ··· 1267 * To simplify for programming, the algorithm is implemented 1268 * in software this way: 1269 * 1270 - * Key[31:0], Stream[335:0] 1271 * 1272 - * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times 1273 - * int_key[350:0] = tmp_key[351:1] 1274 - * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321] 1275 * 1276 - * hash[15:0] = 0; 1277 - * for (i = 0; i < 351; i++) { 1278 - * if (int_key[i]) 1279 - * hash ^= int_stream[(i + 15):i]; 1280 * } 1281 */ 1282 1283 - union { 1284 - u64 fill[6]; 1285 - u32 key[11]; 1286 - u8 key_stream[44]; 1287 - } tmp_key; 1288 1289 - u8 *stream = (u8 *)atr_input; 1290 - u8 int_key[44]; /* upper-most bit unused */ 1291 - u8 hash_str[46]; /* upper-most 2 bits unused */ 1292 - u16 hash_result = 0; 1293 - int i, j, k, h; 1294 1295 /* 1296 - * Initialize the fill member to prevent warnings 1297 - * on some compilers 1298 */ 1299 - tmp_key.fill[0] = 0; 1300 1301 - /* First load the temporary key stream */ 1302 - for (i = 0; i < 6; i++) { 1303 - u64 fillkey = ((u64)key << 32) | key; 1304 - tmp_key.fill[i] = fillkey; 1305 } 1306 1307 - /* 1308 - * Set the interim key for the hashing. Bit 352 is unused, so we must 1309 - * shift and compensate when building the key. 1310 - */ 1311 - 1312 - int_key[0] = tmp_key.key_stream[0] >> 1; 1313 - for (i = 1, j = 0; i < 44; i++) { 1314 - unsigned int this_key = tmp_key.key_stream[j] << 7; 1315 - j++; 1316 - int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1)); 1317 - } 1318 - 1319 - /* 1320 - * Set the interim bit string for the hashing. Bits 368 and 367 are 1321 - * unused, so shift and compensate when building the string. 1322 - */ 1323 - hash_str[0] = (stream[40] & 0x7f) >> 1; 1324 - for (i = 1, j = 40; i < 46; i++) { 1325 - unsigned int this_str = stream[j] << 7; 1326 - j++; 1327 - if (j > 41) 1328 - j = 0; 1329 - hash_str[i] = (u8)(this_str | (stream[j] >> 1)); 1330 - } 1331 - 1332 - /* 1333 - * Now compute the hash. i is the index into hash_str, j is into our 1334 - * key stream, k is counting the number of bits, and h interates within 1335 - * each byte. 1336 - */ 1337 - for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) { 1338 - for (h = 0; h < 8 && k < 351; h++, k++) { 1339 - if (int_key[j] & (1 << h)) { 1340 - /* 1341 - * Key bit is set, XOR in the current 16-bit 1342 - * string. Example of processing: 1343 - * h = 0, 1344 - * tmp = (hash_str[i - 2] & 0 << 16) | 1345 - * (hash_str[i - 1] & 0xff << 8) | 1346 - * (hash_str[i] & 0xff >> 0) 1347 - * So tmp = hash_str[15 + k:k], since the 1348 - * i + 2 clause rolls off the 16-bit value 1349 - * h = 7, 1350 - * tmp = (hash_str[i - 2] & 0x7f << 9) | 1351 - * (hash_str[i - 1] & 0xff << 1) | 1352 - * (hash_str[i] & 0x80 >> 7) 1353 - */ 1354 - int tmp = (hash_str[i] >> h); 1355 - tmp |= (hash_str[i - 1] << (8 - h)); 1356 - tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1)) 1357 - << (16 - h); 1358 - hash_result ^= (u16)tmp; 1359 - } 1360 - } 1361 - } 1362 - 1363 - return hash_result; 1364 } 1365 1366 - /** 1367 - * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream 1368 - * @input: input stream to modify 1369 - * @vlan: the VLAN id to load 1370 - **/ 1371 - s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan) 1372 - { 1373 - input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8; 1374 - input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff; 1375 - 1376 - return 0; 1377 - } 1378 - 1379 - /** 1380 - * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address 1381 - * @input: input stream to modify 1382 - * @src_addr: the IP address to load 1383 - **/ 1384 - s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr) 1385 - { 1386 - input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24; 1387 - input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] = 1388 - (src_addr >> 16) & 0xff; 1389 - input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] = 1390 - (src_addr >> 8) & 0xff; 1391 - input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff; 1392 - 1393 - return 0; 1394 - } 1395 1396 /** 1397 - * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address 1398 - * @input: input stream to modify 1399 - * @dst_addr: the IP address to load 1400 - **/ 1401 - s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr) 1402 - { 1403 - input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24; 1404 - input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] = 1405 - (dst_addr >> 16) & 0xff; 1406 - input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] = 1407 - (dst_addr >> 8) & 0xff; 1408 - input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff; 1409 - 1410 - return 0; 1411 - } 1412 - 1413 - /** 1414 - * ixgbe_atr_set_src_port_82599 - Sets the source port 1415 - * @input: input stream to modify 1416 - * @src_port: the source port to load 1417 - **/ 1418 - s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port) 1419 - { 1420 - input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8; 1421 - input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff; 1422 - 1423 - return 0; 1424 - } 1425 - 1426 - /** 1427 - * ixgbe_atr_set_dst_port_82599 - Sets the destination port 1428 - * @input: input stream to modify 1429 - * @dst_port: the destination port to load 1430 - **/ 1431 - s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port) 1432 - { 1433 - input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8; 1434 - input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff; 1435 - 1436 - return 0; 1437 - } 1438 - 1439 - /** 1440 - * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes 1441 - * @input: input stream to modify 1442 - * @flex_bytes: the flexible bytes to load 1443 - **/ 1444 - s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte) 1445 - { 1446 - input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8; 1447 - input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff; 1448 - 1449 - return 0; 1450 - } 1451 - 1452 - /** 1453 - * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type 1454 - * @input: input stream to modify 1455 - * @l4type: the layer 4 type value to load 1456 - **/ 1457 - s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type) 1458 - { 1459 - input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type; 1460 - 1461 - return 0; 1462 - } 1463 - 1464 - /** 1465 - * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream 1466 - * @input: input stream to search 1467 - * @vlan: the VLAN id to load 1468 - **/ 1469 - static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan) 1470 - { 1471 - *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET]; 1472 - *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8; 1473 - 1474 - return 0; 1475 - } 1476 - 1477 - /** 1478 - * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address 1479 - * @input: input stream to search 1480 - * @src_addr: the IP address to load 1481 - **/ 1482 - static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, 1483 - u32 *src_addr) 1484 - { 1485 - *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET]; 1486 - *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8; 1487 - *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16; 1488 - *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24; 1489 - 1490 - return 0; 1491 - } 1492 - 1493 - /** 1494 - * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address 1495 - * @input: input stream to search 1496 - * @dst_addr: the IP address to load 1497 - **/ 1498 - static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, 1499 - u32 *dst_addr) 1500 - { 1501 - *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET]; 1502 - *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8; 1503 - *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16; 1504 - *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24; 1505 - 1506 - return 0; 1507 - } 1508 - 1509 - /** 1510 - * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address 1511 - * @input: input stream to search 1512 - * @src_addr_1: the first 4 bytes of the IP address to load 1513 - * @src_addr_2: the second 4 bytes of the IP address to load 1514 - * @src_addr_3: the third 4 bytes of the IP address to load 1515 - * @src_addr_4: the fourth 4 bytes of the IP address to load 1516 - **/ 1517 - static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, 1518 - u32 *src_addr_1, u32 *src_addr_2, 1519 - u32 *src_addr_3, u32 *src_addr_4) 1520 - { 1521 - *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12]; 1522 - *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8; 1523 - *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16; 1524 - *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24; 1525 - 1526 - *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8]; 1527 - *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8; 1528 - *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16; 1529 - *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24; 1530 - 1531 - *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4]; 1532 - *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8; 1533 - *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16; 1534 - *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24; 1535 - 1536 - *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET]; 1537 - *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8; 1538 - *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16; 1539 - *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24; 1540 - 1541 - return 0; 1542 - } 1543 - 1544 - /** 1545 - * ixgbe_atr_get_src_port_82599 - Gets the source port 1546 - * @input: input stream to modify 1547 - * @src_port: the source port to load 1548 * 1549 - * Even though the input is given in big-endian, the FDIRPORT registers 1550 - * expect the ports to be programmed in little-endian. Hence the need to swap 1551 - * endianness when retrieving the data. This can be confusing since the 1552 - * internal hash engine expects it to be big-endian. 1553 **/ 1554 - static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, 1555 - u16 *src_port) 1556 { 1557 - *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; 1558 - *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; 1559 1560 - return 0; 1561 - } 1562 1563 - /** 1564 - * ixgbe_atr_get_dst_port_82599 - Gets the destination port 1565 - * @input: input stream to modify 1566 - * @dst_port: the destination port to load 1567 - * 1568 - * Even though the input is given in big-endian, the FDIRPORT registers 1569 - * expect the ports to be programmed in little-endian. Hence the need to swap 1570 - * endianness when retrieving the data. This can be confusing since the 1571 - * internal hash engine expects it to be big-endian. 1572 - **/ 1573 - static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, 1574 - u16 *dst_port) 1575 - { 1576 - *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8; 1577 - *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1]; 1578 1579 - return 0; 1580 - } 1581 1582 - /** 1583 - * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes 1584 - * @input: input stream to modify 1585 - * @flex_bytes: the flexible bytes to load 1586 - **/ 1587 - static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, 1588 - u16 *flex_byte) 1589 - { 1590 - *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET]; 1591 - *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8; 1592 1593 - return 0; 1594 - } 1595 1596 - /** 1597 - * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type 1598 - * @input: input stream to modify 1599 - * @l4type: the layer 4 type value to load 1600 - **/ 1601 - static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, 1602 - u8 *l4type) 1603 - { 1604 - *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET]; 1605 1606 - return 0; 1607 } 1608 1609 /** 1610 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1611 * @hw: pointer to hardware structure 1612 - * @stream: input bitstream 1613 * @queue: queue index to direct traffic to 1614 **/ 1615 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1616 - struct ixgbe_atr_input *input, 1617 u8 queue) 1618 { 1619 u64 fdirhashcmd; 1620 - u64 fdircmd; 1621 - u32 fdirhash; 1622 - u16 bucket_hash, sig_hash; 1623 - u8 l4type; 1624 1625 - bucket_hash = ixgbe_atr_compute_hash_82599(input, 1626 - IXGBE_ATR_BUCKET_HASH_KEY); 1627 1628 - /* bucket_hash is only 15 bits */ 1629 - bucket_hash &= IXGBE_ATR_HASH_MASK; 1630 - 1631 - sig_hash = ixgbe_atr_compute_hash_82599(input, 1632 - IXGBE_ATR_SIGNATURE_HASH_KEY); 1633 - 1634 - /* Get the l4type in order to program FDIRCMD properly */ 1635 - /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ 1636 - ixgbe_atr_get_l4type_82599(input, &l4type); 1637 1638 /* 1639 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1640 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1641 */ 1642 - fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; 1643 - 1644 - fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1645 - IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); 1646 - 1647 - switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1648 - case IXGBE_ATR_L4TYPE_TCP: 1649 - fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; 1650 - break; 1651 - case IXGBE_ATR_L4TYPE_UDP: 1652 - fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; 1653 - break; 1654 - case IXGBE_ATR_L4TYPE_SCTP: 1655 - fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; 1656 - break; 1657 - default: 1658 - hw_dbg(hw, "Error on l4type input\n"); 1659 - return IXGBE_ERR_CONFIG; 1660 - } 1661 - 1662 - if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) 1663 - fdircmd |= IXGBE_FDIRCMD_IPV6; 1664 - 1665 - fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); 1666 - fdirhashcmd = ((fdircmd << 32) | fdirhash); 1667 1668 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1669 1670 return 0; 1671 } 1672 1673 /** 1674 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter ··· 1522 * hardware writes must be protected from one another. 1523 **/ 1524 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1525 - struct ixgbe_atr_input *input, 1526 struct ixgbe_atr_input_masks *input_masks, 1527 u16 soft_id, u8 queue) 1528 { 1529 - u32 fdircmd = 0; 1530 u32 fdirhash; 1531 - u32 src_ipv4 = 0, dst_ipv4 = 0; 1532 - u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; 1533 - u16 src_port, dst_port, vlan_id, flex_bytes; 1534 - u16 bucket_hash; 1535 - u8 l4type; 1536 - u8 fdirm = 0; 1537 - 1538 - /* Get our input values */ 1539 - ixgbe_atr_get_l4type_82599(input, &l4type); 1540 1541 /* 1542 - * Check l4type formatting, and bail out before we touch the hardware 1543 * if there's a configuration issue 1544 */ 1545 - switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1546 - case IXGBE_ATR_L4TYPE_TCP: 1547 - fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; 1548 - break; 1549 - case IXGBE_ATR_L4TYPE_UDP: 1550 - fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; 1551 - break; 1552 - case IXGBE_ATR_L4TYPE_SCTP: 1553 - fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; 1554 break; 1555 default: 1556 - hw_dbg(hw, "Error on l4type input\n"); 1557 return IXGBE_ERR_CONFIG; 1558 } 1559 1560 - bucket_hash = ixgbe_atr_compute_hash_82599(input, 1561 - IXGBE_ATR_BUCKET_HASH_KEY); 1562 - 1563 - /* bucket_hash is only 15 bits */ 1564 - bucket_hash &= IXGBE_ATR_HASH_MASK; 1565 - 1566 - ixgbe_atr_get_vlan_id_82599(input, &vlan_id); 1567 - ixgbe_atr_get_src_port_82599(input, &src_port); 1568 - ixgbe_atr_get_dst_port_82599(input, &dst_port); 1569 - ixgbe_atr_get_flex_byte_82599(input, &flex_bytes); 1570 - 1571 - fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; 1572 - 1573 - /* Now figure out if we're IPv4 or IPv6 */ 1574 - if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) { 1575 - /* IPv6 */ 1576 - ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2, 1577 - &src_ipv6_3, &src_ipv6_4); 1578 - 1579 - IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1); 1580 - IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2); 1581 - IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3); 1582 - /* The last 4 bytes is the same register as IPv4 */ 1583 - IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4); 1584 - 1585 - fdircmd |= IXGBE_FDIRCMD_IPV6; 1586 - fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH; 1587 - } else { 1588 - /* IPv4 */ 1589 - ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4); 1590 - IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4); 1591 - } 1592 - 1593 - ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4); 1594 - IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4); 1595 - 1596 - IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id | 1597 - (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT))); 1598 - IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port | 1599 - (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); 1600 - 1601 /* 1602 - * Program the relevant mask registers. L4type cannot be 1603 - * masked out in this implementation. 1604 * 1605 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1606 * point in time. 1607 */ 1608 - IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask); 1609 - IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask); 1610 1611 - switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1612 - case IXGBE_ATR_L4TYPE_TCP: 1613 - IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask); 1614 - IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 1615 - (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | 1616 - (input_masks->dst_port_mask << 16))); 1617 break; 1618 - case IXGBE_ATR_L4TYPE_UDP: 1619 - IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask); 1620 - IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 1621 - (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) | 1622 - (input_masks->src_port_mask << 16))); 1623 break; 1624 default: 1625 - /* this already would have failed above */ 1626 - break; 1627 } 1628 1629 - /* Program the last mask register, FDIRM */ 1630 - if (input_masks->vlan_id_mask) 1631 - /* Mask both VLAN and VLANP - bits 0 and 1 */ 1632 - fdirm |= 0x3; 1633 - 1634 - if (input_masks->data_mask) 1635 - /* Flex bytes need masking, so mask the whole thing - bit 4 */ 1636 - fdirm |= 0x10; 1637 1638 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1639 - fdirm |= 0x24; 1640 - 1641 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1642 1643 - fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; 1644 - fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; 1645 - fdircmd |= IXGBE_FDIRCMD_LAST; 1646 - fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; 1647 - fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1648 1649 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1650 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1651 1652 return 0; 1653 } 1654 /** 1655 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1656 * @hw: pointer to hardware structure
··· 1003 udelay(10); 1004 } 1005 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1006 + hw_dbg(hw, "Flow Director previous command isn't complete, " 1007 "aborting table re-initialization.\n"); 1008 return IXGBE_ERR_FDIR_REINIT_FAILED; 1009 } ··· 1113 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1114 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1115 1116 1117 /* Prime the keys for hashing */ 1118 + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1119 + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1120 1121 /* 1122 * Poll init-done after we write the register. Estimated times: ··· 1209 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1210 1211 /* Prime the keys for hashing */ 1212 + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1213 + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1214 1215 /* 1216 * Poll init-done after we write the register. Estimated times: ··· 1251 * @stream: input bitstream to compute the hash on 1252 * @key: 32-bit hash key 1253 **/ 1254 + static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, 1255 + u32 key) 1256 { 1257 /* 1258 * The algorithm is as follows: ··· 1272 * To simplify for programming, the algorithm is implemented 1273 * in software this way: 1274 * 1275 + * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] 1276 * 1277 + * for (i = 0; i < 352; i+=32) 1278 + * hi_hash_dword[31:0] ^= Stream[(i+31):i]; 1279 * 1280 + * lo_hash_dword[15:0] ^= Stream[15:0]; 1281 + * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; 1282 + * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; 1283 + * 1284 + * hi_hash_dword[31:0] ^= Stream[351:320]; 1285 + * 1286 + * if(key[0]) 1287 + * hash[15:0] ^= Stream[15:0]; 1288 + * 1289 + * for (i = 0; i < 16; i++) { 1290 + * if (key[i]) 1291 + * hash[15:0] ^= lo_hash_dword[(i+15):i]; 1292 + * if (key[i + 16]) 1293 + * hash[15:0] ^= hi_hash_dword[(i+15):i]; 1294 * } 1295 + * 1296 */ 1297 + __be32 common_hash_dword = 0; 1298 + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1299 + u32 hash_result = 0; 1300 + u8 i; 1301 1302 + /* record the flow_vm_vlan bits as they are a key part to the hash */ 1303 + flow_vm_vlan = ntohl(atr_input->dword_stream[0]); 1304 1305 + /* generate common hash dword */ 1306 + for (i = 10; i; i -= 2) 1307 + common_hash_dword ^= atr_input->dword_stream[i] ^ 1308 + atr_input->dword_stream[i - 1]; 1309 + 1310 + hi_hash_dword = ntohl(common_hash_dword); 1311 + 1312 + /* low dword is word swapped version of common */ 1313 + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1314 + 1315 + /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1316 + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1317 + 1318 + /* Process bits 0 and 16 */ 1319 + if (key & 0x0001) hash_result ^= lo_hash_dword; 1320 + if (key & 0x00010000) hash_result ^= hi_hash_dword; 1321 1322 /* 1323 + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1324 + * delay this because bit 0 of the stream should not be processed 1325 + * so we do not add the vlan until after bit 0 was processed 1326 */ 1327 + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1328 1329 + 1330 + /* process the remaining 30 bits in the key 2 bits at a time */ 1331 + for (i = 15; i; i-- ) { 1332 + if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; 1333 + if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; 1334 } 1335 1336 + return hash_result & IXGBE_ATR_HASH_MASK; 1337 } 1338 1339 + /* 1340 + * These defines allow us to quickly generate all of the necessary instructions 1341 + * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION 1342 + * for values 0 through 15 1343 + */ 1344 + #define IXGBE_ATR_COMMON_HASH_KEY \ 1345 + (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) 1346 + #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ 1347 + do { \ 1348 + u32 n = (_n); \ 1349 + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ 1350 + common_hash ^= lo_hash_dword >> n; \ 1351 + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1352 + bucket_hash ^= lo_hash_dword >> n; \ 1353 + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ 1354 + sig_hash ^= lo_hash_dword << (16 - n); \ 1355 + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ 1356 + common_hash ^= hi_hash_dword >> n; \ 1357 + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1358 + bucket_hash ^= hi_hash_dword >> n; \ 1359 + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1360 + sig_hash ^= hi_hash_dword << (16 - n); \ 1361 + } while (0); 1362 1363 /** 1364 + * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1365 + * @stream: input bitstream to compute the hash on 1366 * 1367 + * This function is almost identical to the function above but contains 1368 + * several optomizations such as unwinding all of the loops, letting the 1369 + * compiler work out all of the conditional ifs since the keys are static 1370 + * defines, and computing two keys at once since the hashed dword stream 1371 + * will be the same for both keys. 1372 **/ 1373 + static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, 1374 + union ixgbe_atr_hash_dword common) 1375 { 1376 + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1377 + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; 1378 1379 + /* record the flow_vm_vlan bits as they are a key part to the hash */ 1380 + flow_vm_vlan = ntohl(input.dword); 1381 1382 + /* generate common hash dword */ 1383 + hi_hash_dword = ntohl(common.dword); 1384 1385 + /* low dword is word swapped version of common */ 1386 + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1387 1388 + /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1389 + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1390 1391 + /* Process bits 0 and 16 */ 1392 + IXGBE_COMPUTE_SIG_HASH_ITERATION(0); 1393 1394 + /* 1395 + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1396 + * delay this because bit 0 of the stream should not be processed 1397 + * so we do not add the vlan until after bit 0 was processed 1398 + */ 1399 + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1400 1401 + /* Process remaining 30 bit of the key */ 1402 + IXGBE_COMPUTE_SIG_HASH_ITERATION(1); 1403 + IXGBE_COMPUTE_SIG_HASH_ITERATION(2); 1404 + IXGBE_COMPUTE_SIG_HASH_ITERATION(3); 1405 + IXGBE_COMPUTE_SIG_HASH_ITERATION(4); 1406 + IXGBE_COMPUTE_SIG_HASH_ITERATION(5); 1407 + IXGBE_COMPUTE_SIG_HASH_ITERATION(6); 1408 + IXGBE_COMPUTE_SIG_HASH_ITERATION(7); 1409 + IXGBE_COMPUTE_SIG_HASH_ITERATION(8); 1410 + IXGBE_COMPUTE_SIG_HASH_ITERATION(9); 1411 + IXGBE_COMPUTE_SIG_HASH_ITERATION(10); 1412 + IXGBE_COMPUTE_SIG_HASH_ITERATION(11); 1413 + IXGBE_COMPUTE_SIG_HASH_ITERATION(12); 1414 + IXGBE_COMPUTE_SIG_HASH_ITERATION(13); 1415 + IXGBE_COMPUTE_SIG_HASH_ITERATION(14); 1416 + IXGBE_COMPUTE_SIG_HASH_ITERATION(15); 1417 + 1418 + /* combine common_hash result with signature and bucket hashes */ 1419 + bucket_hash ^= common_hash; 1420 + bucket_hash &= IXGBE_ATR_HASH_MASK; 1421 + 1422 + sig_hash ^= common_hash << 16; 1423 + sig_hash &= IXGBE_ATR_HASH_MASK << 16; 1424 + 1425 + /* return completed signature hash */ 1426 + return sig_hash ^ bucket_hash; 1427 } 1428 1429 /** 1430 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1431 * @hw: pointer to hardware structure 1432 + * @input: unique input dword 1433 + * @common: compressed common input dword 1434 * @queue: queue index to direct traffic to 1435 **/ 1436 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1437 + union ixgbe_atr_hash_dword input, 1438 + union ixgbe_atr_hash_dword common, 1439 u8 queue) 1440 { 1441 u64 fdirhashcmd; 1442 + u32 fdircmd; 1443 1444 + /* 1445 + * Get the flow_type in order to program FDIRCMD properly 1446 + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 1447 + */ 1448 + switch (input.formatted.flow_type) { 1449 + case IXGBE_ATR_FLOW_TYPE_TCPV4: 1450 + case IXGBE_ATR_FLOW_TYPE_UDPV4: 1451 + case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1452 + case IXGBE_ATR_FLOW_TYPE_TCPV6: 1453 + case IXGBE_ATR_FLOW_TYPE_UDPV6: 1454 + case IXGBE_ATR_FLOW_TYPE_SCTPV6: 1455 + break; 1456 + default: 1457 + hw_dbg(hw, " Error on flow type input\n"); 1458 + return IXGBE_ERR_CONFIG; 1459 + } 1460 1461 + /* configure FDIRCMD register */ 1462 + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1463 + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1464 + fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1465 + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1466 1467 /* 1468 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1469 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1470 */ 1471 + fdirhashcmd = (u64)fdircmd << 32; 1472 + fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); 1473 1474 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1475 1476 + hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); 1477 + 1478 return 0; 1479 } 1480 + 1481 + /** 1482 + * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks 1483 + * @input_mask: mask to be bit swapped 1484 + * 1485 + * The source and destination port masks for flow director are bit swapped 1486 + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to 1487 + * generate a correctly swapped value we need to bit swap the mask and that 1488 + * is what is accomplished by this function. 1489 + **/ 1490 + static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks) 1491 + { 1492 + u32 mask = ntohs(input_masks->dst_port_mask); 1493 + mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1494 + mask |= ntohs(input_masks->src_port_mask); 1495 + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1496 + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); 1497 + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); 1498 + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); 1499 + } 1500 + 1501 + /* 1502 + * These two macros are meant to address the fact that we have registers 1503 + * that are either all or in part big-endian. As a result on big-endian 1504 + * systems we will end up byte swapping the value to little-endian before 1505 + * it is byte swapped again and written to the hardware in the original 1506 + * big-endian format. 1507 + */ 1508 + #define IXGBE_STORE_AS_BE32(_value) \ 1509 + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ 1510 + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) 1511 + 1512 + #define IXGBE_WRITE_REG_BE32(a, reg, value) \ 1513 + IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) 1514 + 1515 + #define IXGBE_STORE_AS_BE16(_value) \ 1516 + (((u16)(_value) >> 8) | ((u16)(_value) << 8)) 1517 1518 /** 1519 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter ··· 1687 * hardware writes must be protected from one another. 1688 **/ 1689 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1690 + union ixgbe_atr_input *input, 1691 struct ixgbe_atr_input_masks *input_masks, 1692 u16 soft_id, u8 queue) 1693 { 1694 u32 fdirhash; 1695 + u32 fdircmd; 1696 + u32 fdirport, fdirtcpm; 1697 + u32 fdirvlan; 1698 + /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */ 1699 + u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX | 1700 + IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6; 1701 1702 /* 1703 + * Check flow_type formatting, and bail out before we touch the hardware 1704 * if there's a configuration issue 1705 */ 1706 + switch (input->formatted.flow_type) { 1707 + case IXGBE_ATR_FLOW_TYPE_IPV4: 1708 + /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ 1709 + fdirm |= IXGBE_FDIRM_L4P; 1710 + case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1711 + if (input_masks->dst_port_mask || input_masks->src_port_mask) { 1712 + hw_dbg(hw, " Error on src/dst port mask\n"); 1713 + return IXGBE_ERR_CONFIG; 1714 + } 1715 + case IXGBE_ATR_FLOW_TYPE_TCPV4: 1716 + case IXGBE_ATR_FLOW_TYPE_UDPV4: 1717 break; 1718 default: 1719 + hw_dbg(hw, " Error on flow type input\n"); 1720 return IXGBE_ERR_CONFIG; 1721 } 1722 1723 /* 1724 + * Program the relevant mask registers. If src/dst_port or src/dst_addr 1725 + * are zero, then assume a full mask for that field. Also assume that 1726 + * a VLAN of 0 is unspecified, so mask that out as well. L4type 1727 + * cannot be masked out in this implementation. 1728 * 1729 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1730 * point in time. 1731 */ 1732 1733 + /* Program FDIRM */ 1734 + switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) { 1735 + case 0xEFFF: 1736 + /* Unmask VLAN ID - bit 0 and fall through to unmask prio */ 1737 + fdirm &= ~IXGBE_FDIRM_VLANID; 1738 + case 0xE000: 1739 + /* Unmask VLAN prio - bit 1 */ 1740 + fdirm &= ~IXGBE_FDIRM_VLANP; 1741 break; 1742 + case 0x0FFF: 1743 + /* Unmask VLAN ID - bit 0 */ 1744 + fdirm &= ~IXGBE_FDIRM_VLANID; 1745 + break; 1746 + case 0x0000: 1747 + /* do nothing, vlans already masked */ 1748 break; 1749 default: 1750 + hw_dbg(hw, " Error on VLAN mask\n"); 1751 + return IXGBE_ERR_CONFIG; 1752 } 1753 1754 + if (input_masks->flex_mask & 0xFFFF) { 1755 + if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) { 1756 + hw_dbg(hw, " Error on flexible byte mask\n"); 1757 + return IXGBE_ERR_CONFIG; 1758 + } 1759 + /* Unmask Flex Bytes - bit 4 */ 1760 + fdirm &= ~IXGBE_FDIRM_FLEX; 1761 + } 1762 1763 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1764 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1765 1766 + /* store the TCP/UDP port masks, bit reversed from port layout */ 1767 + fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks); 1768 + 1769 + /* write both the same so that UDP and TCP use the same mask */ 1770 + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); 1771 + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); 1772 + 1773 + /* store source and destination IP masks (big-enian) */ 1774 + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 1775 + ~input_masks->src_ip_mask[0]); 1776 + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 1777 + ~input_masks->dst_ip_mask[0]); 1778 + 1779 + /* Apply masks to input data */ 1780 + input->formatted.vlan_id &= input_masks->vlan_id_mask; 1781 + input->formatted.flex_bytes &= input_masks->flex_mask; 1782 + input->formatted.src_port &= input_masks->src_port_mask; 1783 + input->formatted.dst_port &= input_masks->dst_port_mask; 1784 + input->formatted.src_ip[0] &= input_masks->src_ip_mask[0]; 1785 + input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0]; 1786 + 1787 + /* record vlan (little-endian) and flex_bytes(big-endian) */ 1788 + fdirvlan = 1789 + IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes)); 1790 + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; 1791 + fdirvlan |= ntohs(input->formatted.vlan_id); 1792 + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); 1793 + 1794 + /* record source and destination port (little-endian)*/ 1795 + fdirport = ntohs(input->formatted.dst_port); 1796 + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; 1797 + fdirport |= ntohs(input->formatted.src_port); 1798 + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); 1799 + 1800 + /* record the first 32 bits of the destination address (big-endian) */ 1801 + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); 1802 + 1803 + /* record the source address (big-endian) */ 1804 + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); 1805 + 1806 + /* configure FDIRCMD register */ 1807 + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1808 + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1809 + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1810 + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1811 + 1812 + /* we only want the bucket hash so drop the upper 16 bits */ 1813 + fdirhash = ixgbe_atr_compute_hash_82599(input, 1814 + IXGBE_ATR_BUCKET_HASH_KEY); 1815 + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1816 1817 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1818 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1819 1820 return 0; 1821 } 1822 + 1823 /** 1824 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1825 * @hw: pointer to hardware structure
+96 -42
drivers/net/ixgbe/ixgbe_ethtool.c
··· 1477 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1478 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1479 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1480 - reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx)); 1481 - reg_ctl &= ~IXGBE_RXDCTL_ENABLE; 1482 - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl); 1483 1484 /* now Tx */ 1485 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); ··· 2277 struct ethtool_rx_ntuple *cmd) 2278 { 2279 struct ixgbe_adapter *adapter = netdev_priv(dev); 2280 - struct ethtool_rx_ntuple_flow_spec fs = cmd->fs; 2281 - struct ixgbe_atr_input input_struct; 2282 struct ixgbe_atr_input_masks input_masks; 2283 int target_queue; 2284 2285 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2286 return -EOPNOTSUPP; ··· 2290 * Don't allow programming if the action is a queue greater than 2291 * the number of online Tx queues. 2292 */ 2293 - if ((fs.action >= adapter->num_tx_queues) || 2294 - (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP)) 2295 return -EINVAL; 2296 2297 - memset(&input_struct, 0, sizeof(struct ixgbe_atr_input)); 2298 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); 2299 2300 - input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src; 2301 - input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst; 2302 - input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc; 2303 - input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst; 2304 - input_masks.vlan_id_mask = fs.vlan_tag_mask; 2305 - /* only use the lowest 2 bytes for flex bytes */ 2306 - input_masks.data_mask = (fs.data_mask & 0xffff); 2307 - 2308 - switch (fs.flow_type) { 2309 case TCP_V4_FLOW: 2310 - ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP); 2311 break; 2312 case UDP_V4_FLOW: 2313 - ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP); 2314 break; 2315 case SCTP_V4_FLOW: 2316 - ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP); 2317 break; 2318 default: 2319 return -1; 2320 } 2321 2322 - /* Mask bits from the inputs based on user-supplied mask */ 2323 - ixgbe_atr_set_src_ipv4_82599(&input_struct, 2324 - (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src)); 2325 - ixgbe_atr_set_dst_ipv4_82599(&input_struct, 2326 - (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst)); 2327 - /* 82599 expects these to be byte-swapped for perfect filtering */ 2328 - ixgbe_atr_set_src_port_82599(&input_struct, 2329 - ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc)); 2330 - ixgbe_atr_set_dst_port_82599(&input_struct, 2331 - ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst)); 2332 2333 - /* VLAN and Flex bytes are either completely masked or not */ 2334 - if (!fs.vlan_tag_mask) 2335 - ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag); 2336 2337 - if (!input_masks.data_mask) 2338 - /* make sure we only use the first 2 bytes of user data */ 2339 - ixgbe_atr_set_flex_byte_82599(&input_struct, 2340 - (fs.data & 0xffff)); 2341 2342 /* determine if we need to drop or route the packet */ 2343 - if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) 2344 target_queue = MAX_RX_QUEUES - 1; 2345 else 2346 - target_queue = fs.action; 2347 2348 spin_lock(&adapter->fdir_perfect_lock); 2349 - ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct, 2350 - &input_masks, 0, target_queue); 2351 spin_unlock(&adapter->fdir_perfect_lock); 2352 2353 - return 0; 2354 } 2355 2356 static const struct ethtool_ops ixgbe_ethtool_ops = {
··· 1477 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1478 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1479 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1480 + ixgbe_disable_rx_queue(adapter, rx_ring); 1481 1482 /* now Tx */ 1483 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); ··· 2279 struct ethtool_rx_ntuple *cmd) 2280 { 2281 struct ixgbe_adapter *adapter = netdev_priv(dev); 2282 + struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs; 2283 + union ixgbe_atr_input input_struct; 2284 struct ixgbe_atr_input_masks input_masks; 2285 int target_queue; 2286 + int err; 2287 2288 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2289 return -EOPNOTSUPP; ··· 2291 * Don't allow programming if the action is a queue greater than 2292 * the number of online Tx queues. 2293 */ 2294 + if ((fs->action >= adapter->num_tx_queues) || 2295 + (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP)) 2296 return -EINVAL; 2297 2298 + memset(&input_struct, 0, sizeof(union ixgbe_atr_input)); 2299 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); 2300 2301 + /* record flow type */ 2302 + switch (fs->flow_type) { 2303 + case IPV4_FLOW: 2304 + input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; 2305 + break; 2306 case TCP_V4_FLOW: 2307 + input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 2308 break; 2309 case UDP_V4_FLOW: 2310 + input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; 2311 break; 2312 case SCTP_V4_FLOW: 2313 + input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; 2314 break; 2315 default: 2316 return -1; 2317 } 2318 2319 + /* copy vlan tag minus the CFI bit */ 2320 + if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) { 2321 + input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF); 2322 + if (!fs->vlan_tag_mask) { 2323 + input_masks.vlan_id_mask = htons(0xEFFF); 2324 + } else { 2325 + switch (~fs->vlan_tag_mask & 0xEFFF) { 2326 + /* all of these are valid vlan-mask values */ 2327 + case 0xEFFF: 2328 + case 0xE000: 2329 + case 0x0FFF: 2330 + case 0x0000: 2331 + input_masks.vlan_id_mask = 2332 + htons(~fs->vlan_tag_mask); 2333 + break; 2334 + /* exit with error if vlan-mask is invalid */ 2335 + default: 2336 + e_err(drv, "Partial VLAN ID or " 2337 + "priority mask in vlan-mask is not " 2338 + "supported by hardware\n"); 2339 + return -1; 2340 + } 2341 + } 2342 + } 2343 2344 + /* make sure we only use the first 2 bytes of user data */ 2345 + if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) { 2346 + input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF); 2347 + if (!(fs->data_mask & 0xFFFF)) { 2348 + input_masks.flex_mask = 0xFFFF; 2349 + } else if (~fs->data_mask & 0xFFFF) { 2350 + e_err(drv, "Partial user-def-mask is not " 2351 + "supported by hardware\n"); 2352 + return -1; 2353 + } 2354 + } 2355 2356 + /* 2357 + * Copy input into formatted structures 2358 + * 2359 + * These assignments are based on the following logic 2360 + * If neither input or mask are set assume value is masked out. 2361 + * If input is set, but mask is not mask should default to accept all. 2362 + * If input is not set, but mask is set then mask likely results in 0. 2363 + * If input is set and mask is set then assign both. 2364 + */ 2365 + if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) { 2366 + input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src; 2367 + if (!fs->m_u.tcp_ip4_spec.ip4src) 2368 + input_masks.src_ip_mask[0] = 0xFFFFFFFF; 2369 + else 2370 + input_masks.src_ip_mask[0] = 2371 + ~fs->m_u.tcp_ip4_spec.ip4src; 2372 + } 2373 + if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) { 2374 + input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst; 2375 + if (!fs->m_u.tcp_ip4_spec.ip4dst) 2376 + input_masks.dst_ip_mask[0] = 0xFFFFFFFF; 2377 + else 2378 + input_masks.dst_ip_mask[0] = 2379 + ~fs->m_u.tcp_ip4_spec.ip4dst; 2380 + } 2381 + if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) { 2382 + input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc; 2383 + if (!fs->m_u.tcp_ip4_spec.psrc) 2384 + input_masks.src_port_mask = 0xFFFF; 2385 + else 2386 + input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc; 2387 + } 2388 + if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) { 2389 + input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst; 2390 + if (!fs->m_u.tcp_ip4_spec.pdst) 2391 + input_masks.dst_port_mask = 0xFFFF; 2392 + else 2393 + input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst; 2394 + } 2395 2396 /* determine if we need to drop or route the packet */ 2397 + if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP) 2398 target_queue = MAX_RX_QUEUES - 1; 2399 else 2400 + target_queue = fs->action; 2401 2402 spin_lock(&adapter->fdir_perfect_lock); 2403 + err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, 2404 + &input_struct, 2405 + &input_masks, 0, 2406 + target_queue); 2407 spin_unlock(&adapter->fdir_perfect_lock); 2408 2409 + return err ? -1 : 0; 2410 } 2411 2412 static const struct ethtool_ops ixgbe_ethtool_ops = {
+124 -45
drivers/net/ixgbe/ixgbe_main.c
··· 3024 } 3025 } 3026 3027 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, 3028 struct ixgbe_ring *ring) 3029 { ··· 3064 3065 /* disable queue to avoid issues while updating state */ 3066 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3067 - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), 3068 - rxdctl & ~IXGBE_RXDCTL_ENABLE); 3069 - IXGBE_WRITE_FLUSH(hw); 3070 3071 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); 3072 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); ··· 4092 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4093 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 4094 4095 - IXGBE_WRITE_FLUSH(hw); 4096 msleep(10); 4097 4098 netif_tx_stop_all_queues(netdev); ··· 4821 4822 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 4823 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 4824 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 4825 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 4826 adapter->atr_sample_rate = 0; ··· 5132 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 5133 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) 5134 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 5135 - if (dev->features & NETIF_F_NTUPLE) { 5136 - /* Flow Director perfect filter enabled */ 5137 - adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 5138 - adapter->atr_sample_rate = 0; 5139 - spin_lock_init(&adapter->fdir_perfect_lock); 5140 - } else { 5141 - /* Flow Director hash filters enabled */ 5142 - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 5143 - adapter->atr_sample_rate = 20; 5144 - } 5145 adapter->ring_feature[RING_F_FDIR].indices = 5146 IXGBE_MAX_FDIR_INDICES; 5147 adapter->fdir_pballoc = 0; ··· 6507 writel(i, tx_ring->tail); 6508 } 6509 6510 - static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6511 - u8 queue, u32 tx_flags, __be16 protocol) 6512 { 6513 - struct ixgbe_atr_input atr_input; 6514 - struct iphdr *iph = ip_hdr(skb); 6515 - struct ethhdr *eth = (struct ethhdr *)skb->data; 6516 struct tcphdr *th; 6517 - u16 vlan_id; 6518 6519 - /* Right now, we support IPv4 w/ TCP only */ 6520 - if (protocol != htons(ETH_P_IP) || 6521 - iph->protocol != IPPROTO_TCP) 6522 return; 6523 6524 - memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); 6525 6526 - vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> 6527 - IXGBE_TX_FLAGS_VLAN_SHIFT; 6528 6529 th = tcp_hdr(skb); 6530 6531 - ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); 6532 - ixgbe_atr_set_src_port_82599(&atr_input, th->dest); 6533 - ixgbe_atr_set_dst_port_82599(&atr_input, th->source); 6534 - ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto); 6535 - ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP); 6536 - /* src and dst are inverted, think how the receiver sees them */ 6537 - ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr); 6538 - ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr); 6539 6540 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 6541 - ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); 6542 } 6543 6544 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) ··· 6763 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); 6764 if (count) { 6765 /* add the ATR filter if ATR is on */ 6766 - if (tx_ring->atr_sample_rate) { 6767 - ++tx_ring->atr_count; 6768 - if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && 6769 - test_bit(__IXGBE_TX_FDIR_INIT_DONE, 6770 - &tx_ring->state)) { 6771 - ixgbe_atr(adapter, skb, tx_ring->queue_index, 6772 - tx_flags, protocol); 6773 - tx_ring->atr_count = 0; 6774 - } 6775 - } 6776 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); 6777 txq->tx_bytes += skb->len; 6778 txq->tx_packets++;
··· 3024 } 3025 } 3026 3027 + void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, 3028 + struct ixgbe_ring *ring) 3029 + { 3030 + struct ixgbe_hw *hw = &adapter->hw; 3031 + int wait_loop = IXGBE_MAX_RX_DESC_POLL; 3032 + u32 rxdctl; 3033 + u8 reg_idx = ring->reg_idx; 3034 + 3035 + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3036 + rxdctl &= ~IXGBE_RXDCTL_ENABLE; 3037 + 3038 + /* write value back with RXDCTL.ENABLE bit cleared */ 3039 + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 3040 + 3041 + if (hw->mac.type == ixgbe_mac_82598EB && 3042 + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) 3043 + return; 3044 + 3045 + /* the hardware may take up to 100us to really disable the rx queue */ 3046 + do { 3047 + udelay(10); 3048 + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3049 + } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); 3050 + 3051 + if (!wait_loop) { 3052 + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " 3053 + "the polling period\n", reg_idx); 3054 + } 3055 + } 3056 + 3057 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, 3058 struct ixgbe_ring *ring) 3059 { ··· 3034 3035 /* disable queue to avoid issues while updating state */ 3036 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3037 + ixgbe_disable_rx_queue(adapter, ring); 3038 3039 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); 3040 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); ··· 4064 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4065 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 4066 4067 + /* disable all enabled rx queues */ 4068 + for (i = 0; i < adapter->num_rx_queues; i++) 4069 + /* this call also flushes the previous write */ 4070 + ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); 4071 + 4072 msleep(10); 4073 4074 netif_tx_stop_all_queues(netdev); ··· 4789 4790 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 4791 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 4792 + if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE | 4793 + IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { 4794 + e_err(probe, 4795 + "Flow Director is not supported while multiple " 4796 + "queues are disabled. Disabling Flow Director\n"); 4797 + } 4798 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 4799 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 4800 adapter->atr_sample_rate = 0; ··· 5094 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 5095 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) 5096 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 5097 + /* n-tuple support exists, always init our spinlock */ 5098 + spin_lock_init(&adapter->fdir_perfect_lock); 5099 + /* Flow Director hash filters enabled */ 5100 + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 5101 + adapter->atr_sample_rate = 20; 5102 adapter->ring_feature[RING_F_FDIR].indices = 5103 IXGBE_MAX_FDIR_INDICES; 5104 adapter->fdir_pballoc = 0; ··· 6474 writel(i, tx_ring->tail); 6475 } 6476 6477 + static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, 6478 + u32 tx_flags, __be16 protocol) 6479 { 6480 + struct ixgbe_q_vector *q_vector = ring->q_vector; 6481 + union ixgbe_atr_hash_dword input = { .dword = 0 }; 6482 + union ixgbe_atr_hash_dword common = { .dword = 0 }; 6483 + union { 6484 + unsigned char *network; 6485 + struct iphdr *ipv4; 6486 + struct ipv6hdr *ipv6; 6487 + } hdr; 6488 struct tcphdr *th; 6489 + __be16 vlan_id; 6490 6491 + /* if ring doesn't have a interrupt vector, cannot perform ATR */ 6492 + if (!q_vector) 6493 return; 6494 6495 + /* do nothing if sampling is disabled */ 6496 + if (!ring->atr_sample_rate) 6497 + return; 6498 6499 + ring->atr_count++; 6500 + 6501 + /* snag network header to get L4 type and address */ 6502 + hdr.network = skb_network_header(skb); 6503 + 6504 + /* Currently only IPv4/IPv6 with TCP is supported */ 6505 + if ((protocol != __constant_htons(ETH_P_IPV6) || 6506 + hdr.ipv6->nexthdr != IPPROTO_TCP) && 6507 + (protocol != __constant_htons(ETH_P_IP) || 6508 + hdr.ipv4->protocol != IPPROTO_TCP)) 6509 + return; 6510 6511 th = tcp_hdr(skb); 6512 6513 + /* skip this packet since the socket is closing */ 6514 + if (th->fin) 6515 + return; 6516 + 6517 + /* sample on all syn packets or once every atr sample count */ 6518 + if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) 6519 + return; 6520 + 6521 + /* reset sample count */ 6522 + ring->atr_count = 0; 6523 + 6524 + vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); 6525 + 6526 + /* 6527 + * src and dst are inverted, think how the receiver sees them 6528 + * 6529 + * The input is broken into two sections, a non-compressed section 6530 + * containing vm_pool, vlan_id, and flow_type. The rest of the data 6531 + * is XORed together and stored in the compressed dword. 6532 + */ 6533 + input.formatted.vlan_id = vlan_id; 6534 + 6535 + /* 6536 + * since src port and flex bytes occupy the same word XOR them together 6537 + * and write the value to source port portion of compressed dword 6538 + */ 6539 + if (vlan_id) 6540 + common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); 6541 + else 6542 + common.port.src ^= th->dest ^ protocol; 6543 + common.port.dst ^= th->source; 6544 + 6545 + if (protocol == __constant_htons(ETH_P_IP)) { 6546 + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 6547 + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; 6548 + } else { 6549 + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; 6550 + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ 6551 + hdr.ipv6->saddr.s6_addr32[1] ^ 6552 + hdr.ipv6->saddr.s6_addr32[2] ^ 6553 + hdr.ipv6->saddr.s6_addr32[3] ^ 6554 + hdr.ipv6->daddr.s6_addr32[0] ^ 6555 + hdr.ipv6->daddr.s6_addr32[1] ^ 6556 + hdr.ipv6->daddr.s6_addr32[2] ^ 6557 + hdr.ipv6->daddr.s6_addr32[3]; 6558 + } 6559 6560 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 6561 + ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, 6562 + input, common, ring->queue_index); 6563 } 6564 6565 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) ··· 6676 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); 6677 if (count) { 6678 /* add the ATR filter if ATR is on */ 6679 + if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) 6680 + ixgbe_atr(tx_ring, skb, tx_flags, protocol); 6681 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); 6682 txq->tx_bytes += skb->len; 6683 txq->tx_packets++;
+60 -31
drivers/net/ixgbe/ixgbe_type.h
··· 1947 #define IXGBE_FDIRM_VLANID 0x00000001 1948 #define IXGBE_FDIRM_VLANP 0x00000002 1949 #define IXGBE_FDIRM_POOL 0x00000004 1950 - #define IXGBE_FDIRM_L3P 0x00000008 1951 - #define IXGBE_FDIRM_L4P 0x00000010 1952 - #define IXGBE_FDIRM_FLEX 0x00000020 1953 - #define IXGBE_FDIRM_DIPv6 0x00000040 1954 1955 #define IXGBE_FDIRFREE_FREE_MASK 0xFFFF 1956 #define IXGBE_FDIRFREE_FREE_SHIFT 0 ··· 1989 #define IXGBE_FDIRCMD_LAST 0x00000800 1990 #define IXGBE_FDIRCMD_COLLISION 0x00001000 1991 #define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 1992 #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 1993 #define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 1994 #define IXGBE_FDIR_INIT_DONE_POLL 10 ··· 2147 #define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) 2148 2149 /* Software ATR hash keys */ 2150 - #define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D 2151 - #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 2152 2153 - /* Software ATR input stream offsets and masks */ 2154 - #define IXGBE_ATR_VLAN_OFFSET 0 2155 - #define IXGBE_ATR_SRC_IPV6_OFFSET 2 2156 - #define IXGBE_ATR_SRC_IPV4_OFFSET 14 2157 - #define IXGBE_ATR_DST_IPV6_OFFSET 18 2158 - #define IXGBE_ATR_DST_IPV4_OFFSET 30 2159 - #define IXGBE_ATR_SRC_PORT_OFFSET 34 2160 - #define IXGBE_ATR_DST_PORT_OFFSET 36 2161 - #define IXGBE_ATR_FLEX_BYTE_OFFSET 38 2162 - #define IXGBE_ATR_VM_POOL_OFFSET 40 2163 - #define IXGBE_ATR_L4TYPE_OFFSET 41 2164 - 2165 #define IXGBE_ATR_L4TYPE_MASK 0x3 2166 - #define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 2167 #define IXGBE_ATR_L4TYPE_UDP 0x1 2168 #define IXGBE_ATR_L4TYPE_TCP 0x2 2169 #define IXGBE_ATR_L4TYPE_SCTP 0x3 2170 - #define IXGBE_ATR_HASH_MASK 0x7fff 2171 2172 /* Flow Director ATR input struct. */ 2173 - struct ixgbe_atr_input { 2174 - /* Byte layout in order, all values with MSB first: 2175 * 2176 * vlan_id - 2 bytes 2177 * src_ip - 16 bytes 2178 * dst_ip - 16 bytes 2179 * src_port - 2 bytes 2180 * dst_port - 2 bytes 2181 * flex_bytes - 2 bytes 2182 - * vm_pool - 1 byte 2183 - * l4type - 1 byte 2184 */ 2185 - u8 byte_stream[42]; 2186 }; 2187 2188 struct ixgbe_atr_input_masks { 2189 - u32 src_ip_mask; 2190 - u32 dst_ip_mask; 2191 - u16 src_port_mask; 2192 - u16 dst_port_mask; 2193 - u16 vlan_id_mask; 2194 - u16 data_mask; 2195 }; 2196 2197 enum ixgbe_eeprom_type {
··· 1947 #define IXGBE_FDIRM_VLANID 0x00000001 1948 #define IXGBE_FDIRM_VLANP 0x00000002 1949 #define IXGBE_FDIRM_POOL 0x00000004 1950 + #define IXGBE_FDIRM_L4P 0x00000008 1951 + #define IXGBE_FDIRM_FLEX 0x00000010 1952 + #define IXGBE_FDIRM_DIPv6 0x00000020 1953 1954 #define IXGBE_FDIRFREE_FREE_MASK 0xFFFF 1955 #define IXGBE_FDIRFREE_FREE_SHIFT 0 ··· 1990 #define IXGBE_FDIRCMD_LAST 0x00000800 1991 #define IXGBE_FDIRCMD_COLLISION 0x00001000 1992 #define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 1993 + #define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 1994 #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 1995 #define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 1996 #define IXGBE_FDIR_INIT_DONE_POLL 10 ··· 2147 #define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) 2148 2149 /* Software ATR hash keys */ 2150 + #define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 2151 + #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 2152 2153 + /* Software ATR input stream values and masks */ 2154 + #define IXGBE_ATR_HASH_MASK 0x7fff 2155 #define IXGBE_ATR_L4TYPE_MASK 0x3 2156 #define IXGBE_ATR_L4TYPE_UDP 0x1 2157 #define IXGBE_ATR_L4TYPE_TCP 0x2 2158 #define IXGBE_ATR_L4TYPE_SCTP 0x3 2159 + #define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 2160 + enum ixgbe_atr_flow_type { 2161 + IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, 2162 + IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, 2163 + IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, 2164 + IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, 2165 + IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, 2166 + IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, 2167 + IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, 2168 + IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, 2169 + }; 2170 2171 /* Flow Director ATR input struct. */ 2172 + union ixgbe_atr_input { 2173 + /* 2174 + * Byte layout in order, all values with MSB first: 2175 * 2176 + * vm_pool - 1 byte 2177 + * flow_type - 1 byte 2178 * vlan_id - 2 bytes 2179 * src_ip - 16 bytes 2180 * dst_ip - 16 bytes 2181 * src_port - 2 bytes 2182 * dst_port - 2 bytes 2183 * flex_bytes - 2 bytes 2184 + * rsvd0 - 2 bytes - space reserved must be 0. 2185 */ 2186 + struct { 2187 + u8 vm_pool; 2188 + u8 flow_type; 2189 + __be16 vlan_id; 2190 + __be32 dst_ip[4]; 2191 + __be32 src_ip[4]; 2192 + __be16 src_port; 2193 + __be16 dst_port; 2194 + __be16 flex_bytes; 2195 + __be16 rsvd0; 2196 + } formatted; 2197 + __be32 dword_stream[11]; 2198 + }; 2199 + 2200 + /* Flow Director compressed ATR hash input struct */ 2201 + union ixgbe_atr_hash_dword { 2202 + struct { 2203 + u8 vm_pool; 2204 + u8 flow_type; 2205 + __be16 vlan_id; 2206 + } formatted; 2207 + __be32 ip; 2208 + struct { 2209 + __be16 src; 2210 + __be16 dst; 2211 + } port; 2212 + __be16 flex_bytes; 2213 + __be32 dword; 2214 }; 2215 2216 struct ixgbe_atr_input_masks { 2217 + __be16 rsvd0; 2218 + __be16 vlan_id_mask; 2219 + __be32 dst_ip_mask[4]; 2220 + __be32 src_ip_mask[4]; 2221 + __be16 src_port_mask; 2222 + __be16 dst_port_mask; 2223 + __be16 flex_mask; 2224 }; 2225 2226 enum ixgbe_eeprom_type {
+2 -1
drivers/net/mlx4/en_netdev.c
··· 972 int i; 973 int err; 974 975 - dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); 976 if (dev == NULL) { 977 mlx4_err(mdev, "Net device allocation failed\n"); 978 return -ENOMEM;
··· 972 int i; 973 int err; 974 975 + dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 976 + prof->tx_ring_num, prof->rx_ring_num); 977 if (dev == NULL) { 978 mlx4_err(mdev, "Net device allocation failed\n"); 979 return -ENOMEM;
+1
drivers/net/pcmcia/pcnet_cs.c
··· 1536 PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), 1537 PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), 1538 PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), 1539 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), 1540 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), 1541 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
··· 1536 PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), 1537 PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), 1538 PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), 1539 + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b), 1540 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), 1541 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), 1542 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
+5 -5
drivers/net/ppp_async.c
··· 32 #include <linux/init.h> 33 #include <linux/jiffies.h> 34 #include <linux/slab.h> 35 #include <asm/uaccess.h> 36 #include <asm/string.h> 37 ··· 543 data = ap->tpkt->data; 544 count = ap->tpkt->len; 545 fcs = ap->tfcs; 546 - proto = (data[0] << 8) + data[1]; 547 548 /* 549 * LCP packets with code values between 1 (configure-reqest) ··· 964 code = data[0]; 965 if (code != CONFACK && code != CONFREQ) 966 return; 967 - dlen = (data[2] << 8) + data[3]; 968 if (len < dlen) 969 return; /* packet got truncated or length is bogus */ 970 ··· 998 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { 999 switch (data[0]) { 1000 case LCP_MRU: 1001 - val = (data[2] << 8) + data[3]; 1002 if (inbound) 1003 ap->mru = val; 1004 else 1005 ap->chan.mtu = val; 1006 break; 1007 case LCP_ASYNCMAP: 1008 - val = (data[2] << 24) + (data[3] << 16) 1009 - + (data[4] << 8) + data[5]; 1010 if (inbound) 1011 ap->raccm = val; 1012 else
··· 32 #include <linux/init.h> 33 #include <linux/jiffies.h> 34 #include <linux/slab.h> 35 + #include <asm/unaligned.h> 36 #include <asm/uaccess.h> 37 #include <asm/string.h> 38 ··· 542 data = ap->tpkt->data; 543 count = ap->tpkt->len; 544 fcs = ap->tfcs; 545 + proto = get_unaligned_be16(data); 546 547 /* 548 * LCP packets with code values between 1 (configure-reqest) ··· 963 code = data[0]; 964 if (code != CONFACK && code != CONFREQ) 965 return; 966 + dlen = get_unaligned_be16(data + 2); 967 if (len < dlen) 968 return; /* packet got truncated or length is bogus */ 969 ··· 997 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { 998 switch (data[0]) { 999 case LCP_MRU: 1000 + val = get_unaligned_be16(data + 2); 1001 if (inbound) 1002 ap->mru = val; 1003 else 1004 ap->chan.mtu = val; 1005 break; 1006 case LCP_ASYNCMAP: 1007 + val = get_unaligned_be32(data + 2); 1008 if (inbound) 1009 ap->raccm = val; 1010 else
+4 -5
drivers/net/ppp_deflate.c
··· 41 #include <linux/ppp-comp.h> 42 43 #include <linux/zlib.h> 44 45 /* 46 * State for a Deflate (de)compressor. ··· 233 */ 234 wptr[0] = PPP_ADDRESS(rptr); 235 wptr[1] = PPP_CONTROL(rptr); 236 - wptr[2] = PPP_COMP >> 8; 237 - wptr[3] = PPP_COMP; 238 wptr += PPP_HDRLEN; 239 - wptr[0] = state->seqno >> 8; 240 - wptr[1] = state->seqno; 241 wptr += DEFLATE_OVHD; 242 olen = PPP_HDRLEN + DEFLATE_OVHD; 243 state->strm.next_out = wptr; ··· 450 } 451 452 /* Check the sequence number. */ 453 - seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1]; 454 if (seq != (state->seqno & 0xffff)) { 455 if (state->debug) 456 printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
··· 41 #include <linux/ppp-comp.h> 42 43 #include <linux/zlib.h> 44 + #include <asm/unaligned.h> 45 46 /* 47 * State for a Deflate (de)compressor. ··· 232 */ 233 wptr[0] = PPP_ADDRESS(rptr); 234 wptr[1] = PPP_CONTROL(rptr); 235 + put_unaligned_be16(PPP_COMP, wptr + 2); 236 wptr += PPP_HDRLEN; 237 + put_unaligned_be16(state->seqno, wptr); 238 wptr += DEFLATE_OVHD; 239 olen = PPP_HDRLEN + DEFLATE_OVHD; 240 state->strm.next_out = wptr; ··· 451 } 452 453 /* Check the sequence number. */ 454 + seq = get_unaligned_be16(ibuf + PPP_HDRLEN); 455 if (seq != (state->seqno & 0xffff)) { 456 if (state->debug) 457 printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
+4 -5
drivers/net/ppp_generic.c
··· 46 #include <linux/device.h> 47 #include <linux/mutex.h> 48 #include <linux/slab.h> 49 #include <net/slhc_vj.h> 50 #include <asm/atomic.h> 51 ··· 211 }; 212 213 /* Get the PPP protocol number from a skb */ 214 - #define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) 215 216 /* We limit the length of ppp->file.rq to this (arbitrary) value */ 217 #define PPP_MAX_RQLEN 32 ··· 965 966 pp = skb_push(skb, 2); 967 proto = npindex_to_proto[npi]; 968 - pp[0] = proto >> 8; 969 - pp[1] = proto; 970 971 netif_stop_queue(dev); 972 skb_queue_tail(&ppp->file.xq, skb); ··· 1473 q = skb_put(frag, flen + hdrlen); 1474 1475 /* make the MP header */ 1476 - q[0] = PPP_MP >> 8; 1477 - q[1] = PPP_MP; 1478 if (ppp->flags & SC_MP_XSHORTSEQ) { 1479 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1480 q[3] = ppp->nxseq;
··· 46 #include <linux/device.h> 47 #include <linux/mutex.h> 48 #include <linux/slab.h> 49 + #include <asm/unaligned.h> 50 #include <net/slhc_vj.h> 51 #include <asm/atomic.h> 52 ··· 210 }; 211 212 /* Get the PPP protocol number from a skb */ 213 + #define PPP_PROTO(skb) get_unaligned_be16((skb)->data) 214 215 /* We limit the length of ppp->file.rq to this (arbitrary) value */ 216 #define PPP_MAX_RQLEN 32 ··· 964 965 pp = skb_push(skb, 2); 966 proto = npindex_to_proto[npi]; 967 + put_unaligned_be16(proto, pp); 968 969 netif_stop_queue(dev); 970 skb_queue_tail(&ppp->file.xq, skb); ··· 1473 q = skb_put(frag, flen + hdrlen); 1474 1475 /* make the MP header */ 1476 + put_unaligned_be16(PPP_MP, q); 1477 if (ppp->flags & SC_MP_XSHORTSEQ) { 1478 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1479 q[3] = ppp->nxseq;
+3 -4
drivers/net/ppp_mppe.c
··· 55 #include <linux/ppp_defs.h> 56 #include <linux/ppp-comp.h> 57 #include <linux/scatterlist.h> 58 59 #include "ppp_mppe.h" 60 ··· 396 */ 397 obuf[0] = PPP_ADDRESS(ibuf); 398 obuf[1] = PPP_CONTROL(ibuf); 399 - obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */ 400 - obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */ 401 obuf += PPP_HDRLEN; 402 403 state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; 404 if (state->debug >= 7) 405 printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, 406 state->ccount); 407 - obuf[0] = state->ccount >> 8; 408 - obuf[1] = state->ccount & 0xff; 409 410 if (!state->stateful || /* stateless mode */ 411 ((state->ccount & 0xff) == 0xff) || /* "flag" packet */
··· 55 #include <linux/ppp_defs.h> 56 #include <linux/ppp-comp.h> 57 #include <linux/scatterlist.h> 58 + #include <asm/unaligned.h> 59 60 #include "ppp_mppe.h" 61 ··· 395 */ 396 obuf[0] = PPP_ADDRESS(ibuf); 397 obuf[1] = PPP_CONTROL(ibuf); 398 + put_unaligned_be16(PPP_COMP, obuf + 2); 399 obuf += PPP_HDRLEN; 400 401 state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; 402 if (state->debug >= 7) 403 printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, 404 state->ccount); 405 + put_unaligned_be16(state->ccount, obuf); 406 407 if (!state->stateful || /* stateless mode */ 408 ((state->ccount & 0xff) == 0xff) || /* "flag" packet */
+2 -1
drivers/net/ppp_synctty.c
··· 45 #include <linux/completion.h> 46 #include <linux/init.h> 47 #include <linux/slab.h> 48 #include <asm/uaccess.h> 49 50 #define PPP_VERSION "2.4.2" ··· 564 int islcp; 565 566 data = skb->data; 567 - proto = (data[0] << 8) + data[1]; 568 569 /* LCP packets with codes between 1 (configure-request) 570 * and 7 (code-reject) must be sent as though no options
··· 45 #include <linux/completion.h> 46 #include <linux/init.h> 47 #include <linux/slab.h> 48 + #include <asm/unaligned.h> 49 #include <asm/uaccess.h> 50 51 #define PPP_VERSION "2.4.2" ··· 563 int islcp; 564 565 data = skb->data; 566 + proto = get_unaligned_be16(data); 567 568 /* LCP packets with codes between 1 (configure-request) 569 * and 7 (code-reject) must be sent as though no options
+22 -2
drivers/net/qlcnic/qlcnic.h
··· 34 35 #define _QLCNIC_LINUX_MAJOR 5 36 #define _QLCNIC_LINUX_MINOR 0 37 - #define _QLCNIC_LINUX_SUBVERSION 14 38 - #define QLCNIC_LINUX_VERSIONID "5.0.14" 39 #define QLCNIC_DRV_IDC_VER 0x01 40 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 41 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) ··· 287 u32 findex; 288 u32 size; 289 u32 reserved[5]; 290 }; 291 292 /* Magic number to let user know flash is programmed */
··· 34 35 #define _QLCNIC_LINUX_MAJOR 5 36 #define _QLCNIC_LINUX_MINOR 0 37 + #define _QLCNIC_LINUX_SUBVERSION 15 38 + #define QLCNIC_LINUX_VERSIONID "5.0.15" 39 #define QLCNIC_DRV_IDC_VER 0x01 40 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 41 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) ··· 287 u32 findex; 288 u32 size; 289 u32 reserved[5]; 290 + }; 291 + 292 + /* Flash Defines and Structures */ 293 + #define QLCNIC_FLT_LOCATION 0x3F1000 294 + #define QLCNIC_FW_IMAGE_REGION 0x74 295 + struct qlcnic_flt_header { 296 + u16 version; 297 + u16 len; 298 + u16 checksum; 299 + u16 reserved; 300 + }; 301 + 302 + struct qlcnic_flt_entry { 303 + u8 region; 304 + u8 reserved0; 305 + u8 attrib; 306 + u8 reserved1; 307 + u32 size; 308 + u32 start_addr; 309 + u32 end_add; 310 }; 311 312 /* Magic number to let user know flash is programmed */
+1 -1
drivers/net/qlcnic/qlcnic_ethtool.c
··· 672 if (data[1]) 673 eth_test->flags |= ETH_TEST_FL_FAILED; 674 675 - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 676 data[2] = qlcnic_irq_test(dev); 677 if (data[2]) 678 eth_test->flags |= ETH_TEST_FL_FAILED;
··· 672 if (data[1]) 673 eth_test->flags |= ETH_TEST_FL_FAILED; 674 675 + if (eth_test->flags & ETH_TEST_FL_OFFLINE) { 676 data[2] = qlcnic_irq_test(dev); 677 if (data[2]) 678 eth_test->flags |= ETH_TEST_FL_FAILED;
+62 -1
drivers/net/qlcnic/qlcnic_init.c
··· 627 return 0; 628 } 629 630 int 631 qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) 632 { 633 u32 ver = -1, min_ver; 634 635 - qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver); 636 637 ver = QLCNIC_DECODE_VERSION(ver); 638 min_ver = QLCNIC_MIN_FW_VERSION;
··· 627 return 0; 628 } 629 630 + static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, 631 + struct qlcnic_flt_entry *region_entry) 632 + { 633 + struct qlcnic_flt_header flt_hdr; 634 + struct qlcnic_flt_entry *flt_entry; 635 + int i = 0, ret; 636 + u32 entry_size; 637 + 638 + memset(region_entry, 0, sizeof(struct qlcnic_flt_entry)); 639 + ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION, 640 + (u8 *)&flt_hdr, 641 + sizeof(struct qlcnic_flt_header)); 642 + if (ret) { 643 + dev_warn(&adapter->pdev->dev, 644 + "error reading flash layout header\n"); 645 + return -EIO; 646 + } 647 + 648 + entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); 649 + flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size); 650 + if (flt_entry == NULL) { 651 + dev_warn(&adapter->pdev->dev, "error allocating memory\n"); 652 + return -EIO; 653 + } 654 + 655 + ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + 656 + sizeof(struct qlcnic_flt_header), 657 + (u8 *)flt_entry, entry_size); 658 + if (ret) { 659 + dev_warn(&adapter->pdev->dev, 660 + "error reading flash layout entries\n"); 661 + goto err_out; 662 + } 663 + 664 + while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) { 665 + if (flt_entry[i].region == region) 666 + break; 667 + i++; 668 + } 669 + if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) { 670 + dev_warn(&adapter->pdev->dev, 671 + "region=%x not found in %d regions\n", region, i); 672 + ret = -EIO; 673 + goto err_out; 674 + } 675 + memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry)); 676 + 677 + err_out: 678 + vfree(flt_entry); 679 + return ret; 680 + } 681 + 682 int 683 qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) 684 { 685 + struct qlcnic_flt_entry fw_entry; 686 u32 ver = -1, min_ver; 687 + int ret; 688 689 + ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry); 690 + if (!ret) 691 + /* 0-4:-signature, 4-8:-fw version */ 692 + qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4, 693 + (int *)&ver); 694 + else 695 + qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, 696 + (int *)&ver); 697 698 ver = QLCNIC_DECODE_VERSION(ver); 699 min_ver = QLCNIC_MIN_FW_VERSION;
+5 -5
drivers/net/qlcnic/qlcnic_main.c
··· 31 32 static struct workqueue_struct *qlcnic_wq; 33 static int qlcnic_mac_learn; 34 - module_param(qlcnic_mac_learn, int, 0644); 35 MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); 36 37 static int use_msi = 1; 38 - module_param(use_msi, int, 0644); 39 MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); 40 41 static int use_msi_x = 1; 42 - module_param(use_msi_x, int, 0644); 43 MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); 44 45 static int auto_fw_reset = AUTO_FW_RESET_ENABLED; ··· 47 MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 48 49 static int load_fw_file; 50 - module_param(load_fw_file, int, 0644); 51 MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); 52 53 static int qlcnic_config_npars; 54 - module_param(qlcnic_config_npars, int, 0644); 55 MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); 56 57 static int __devinit qlcnic_probe(struct pci_dev *pdev,
··· 31 32 static struct workqueue_struct *qlcnic_wq; 33 static int qlcnic_mac_learn; 34 + module_param(qlcnic_mac_learn, int, 0444); 35 MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); 36 37 static int use_msi = 1; 38 + module_param(use_msi, int, 0444); 39 MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); 40 41 static int use_msi_x = 1; 42 + module_param(use_msi_x, int, 0444); 43 MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); 44 45 static int auto_fw_reset = AUTO_FW_RESET_ENABLED; ··· 47 MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 48 49 static int load_fw_file; 50 + module_param(load_fw_file, int, 0444); 51 MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); 52 53 static int qlcnic_config_npars; 54 + module_param(qlcnic_config_npars, int, 0444); 55 MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); 56 57 static int __devinit qlcnic_probe(struct pci_dev *pdev,
+122 -23
drivers/net/r8169.c
··· 1632 { 1633 __le32 *phytable = (__le32 *)fw->data; 1634 struct net_device *dev = tp->dev; 1635 - size_t i; 1636 1637 if (fw->size % sizeof(*phytable)) { 1638 netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size); 1639 return; 1640 } 1641 1642 - for (i = 0; i < fw->size / sizeof(*phytable); i++) { 1643 - u32 action = le32_to_cpu(phytable[i]); 1644 1645 - if (!action) 1646 break; 1647 1648 - if ((action & 0xf0000000) != PHY_WRITE) { 1649 - netif_err(tp, probe, dev, 1650 - "unknown action 0x%08x\n", action); 1651 return; 1652 } 1653 } 1654 1655 - while (i-- != 0) { 1656 - u32 action = le32_to_cpu(*phytable); 1657 u32 data = action & 0x0000ffff; 1658 - u32 reg = (action & 0x0fff0000) >> 16; 1659 1660 switch(action & 0xf0000000) { 1661 - case PHY_WRITE: 1662 - rtl_writephy(tp, reg, data); 1663 - phytable++; 1664 break; 1665 default: 1666 BUG(); 1667 } ··· 3167 rtl8168_driver_start(tp); 3168 } 3169 3170 - rtl8169_init_phy(dev, tp); 3171 - 3172 - /* 3173 - * Pretend we are using VLANs; This bypasses a nasty bug where 3174 - * Interrupts stop flowing on high load on 8110SCd controllers. 3175 - */ 3176 - if (tp->mac_version == RTL_GIGA_MAC_VER_05) 3177 - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan); 3178 - 3179 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); 3180 3181 if (pci_dev_run_wake(pdev)) ··· 3216 static int rtl8169_open(struct net_device *dev) 3217 { 3218 struct rtl8169_private *tp = netdev_priv(dev); 3219 struct pci_dev *pdev = tp->pci_dev; 3220 int retval = -ENOMEM; 3221 ··· 3252 3253 napi_enable(&tp->napi); 3254 3255 rtl_pll_power_up(tp); 3256 3257 rtl_hw_start(dev); ··· 3270 tp->saved_wolopts = 0; 3271 pm_runtime_put_noidle(&pdev->dev); 3272 3273 - rtl8169_check_link_status(dev, tp, tp->mmio_addr); 3274 out: 3275 return retval; 3276
··· 1632 { 1633 __le32 *phytable = (__le32 *)fw->data; 1634 struct net_device *dev = tp->dev; 1635 + size_t index, fw_size = fw->size / sizeof(*phytable); 1636 + u32 predata, count; 1637 1638 if (fw->size % sizeof(*phytable)) { 1639 netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size); 1640 return; 1641 } 1642 1643 + for (index = 0; index < fw_size; index++) { 1644 + u32 action = le32_to_cpu(phytable[index]); 1645 + u32 regno = (action & 0x0fff0000) >> 16; 1646 1647 + switch(action & 0xf0000000) { 1648 + case PHY_READ: 1649 + case PHY_DATA_OR: 1650 + case PHY_DATA_AND: 1651 + case PHY_READ_EFUSE: 1652 + case PHY_CLEAR_READCOUNT: 1653 + case PHY_WRITE: 1654 + case PHY_WRITE_PREVIOUS: 1655 + case PHY_DELAY_MS: 1656 break; 1657 1658 + case PHY_BJMPN: 1659 + if (regno > index) { 1660 + netif_err(tp, probe, tp->dev, 1661 + "Out of range of firmware\n"); 1662 + return; 1663 + } 1664 + break; 1665 + case PHY_READCOUNT_EQ_SKIP: 1666 + if (index + 2 >= fw_size) { 1667 + netif_err(tp, probe, tp->dev, 1668 + "Out of range of firmware\n"); 1669 + return; 1670 + } 1671 + break; 1672 + case PHY_COMP_EQ_SKIPN: 1673 + case PHY_COMP_NEQ_SKIPN: 1674 + case PHY_SKIPN: 1675 + if (index + 1 + regno >= fw_size) { 1676 + netif_err(tp, probe, tp->dev, 1677 + "Out of range of firmware\n"); 1678 + return; 1679 + } 1680 + break; 1681 + 1682 + case PHY_READ_MAC_BYTE: 1683 + case PHY_WRITE_MAC_BYTE: 1684 + case PHY_WRITE_ERI_WORD: 1685 + default: 1686 + netif_err(tp, probe, tp->dev, 1687 + "Invalid action 0x%08x\n", action); 1688 return; 1689 } 1690 } 1691 1692 + predata = 0; 1693 + count = 0; 1694 + 1695 + for (index = 0; index < fw_size; ) { 1696 + u32 action = le32_to_cpu(phytable[index]); 1697 u32 data = action & 0x0000ffff; 1698 + u32 regno = (action & 0x0fff0000) >> 16; 1699 + 1700 + if (!action) 1701 + break; 1702 1703 switch(action & 0xf0000000) { 1704 + case PHY_READ: 1705 + predata = rtl_readphy(tp, regno); 1706 + count++; 1707 + index++; 1708 break; 1709 + case PHY_DATA_OR: 1710 + predata |= data; 1711 + index++; 1712 + break; 1713 + case PHY_DATA_AND: 1714 + predata &= data; 1715 + index++; 1716 + break; 1717 + case PHY_BJMPN: 1718 + index -= regno; 1719 + break; 1720 + case PHY_READ_EFUSE: 1721 + predata = rtl8168d_efuse_read(tp->mmio_addr, regno); 1722 + index++; 1723 + break; 1724 + case PHY_CLEAR_READCOUNT: 1725 + count = 0; 1726 + index++; 1727 + break; 1728 + case PHY_WRITE: 1729 + rtl_writephy(tp, regno, data); 1730 + index++; 1731 + break; 1732 + case PHY_READCOUNT_EQ_SKIP: 1733 + if (count == data) 1734 + index += 2; 1735 + else 1736 + index += 1; 1737 + break; 1738 + case PHY_COMP_EQ_SKIPN: 1739 + if (predata == data) 1740 + index += regno; 1741 + index++; 1742 + break; 1743 + case PHY_COMP_NEQ_SKIPN: 1744 + if (predata != data) 1745 + index += regno; 1746 + index++; 1747 + break; 1748 + case PHY_WRITE_PREVIOUS: 1749 + rtl_writephy(tp, regno, predata); 1750 + index++; 1751 + break; 1752 + case PHY_SKIPN: 1753 + index += regno + 1; 1754 + break; 1755 + case PHY_DELAY_MS: 1756 + mdelay(data); 1757 + index++; 1758 + break; 1759 + 1760 + case PHY_READ_MAC_BYTE: 1761 + case PHY_WRITE_MAC_BYTE: 1762 + case PHY_WRITE_ERI_WORD: 1763 default: 1764 BUG(); 1765 } ··· 3069 rtl8168_driver_start(tp); 3070 } 3071 3072 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); 3073 3074 if (pci_dev_run_wake(pdev)) ··· 3127 static int rtl8169_open(struct net_device *dev) 3128 { 3129 struct rtl8169_private *tp = netdev_priv(dev); 3130 + void __iomem *ioaddr = tp->mmio_addr; 3131 struct pci_dev *pdev = tp->pci_dev; 3132 int retval = -ENOMEM; 3133 ··· 3162 3163 napi_enable(&tp->napi); 3164 3165 + rtl8169_init_phy(dev, tp); 3166 + 3167 + /* 3168 + * Pretend we are using VLANs; This bypasses a nasty bug where 3169 + * Interrupts stop flowing on high load on 8110SCd controllers. 3170 + */ 3171 + if (tp->mac_version == RTL_GIGA_MAC_VER_05) 3172 + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan); 3173 + 3174 rtl_pll_power_up(tp); 3175 3176 rtl_hw_start(dev); ··· 3171 tp->saved_wolopts = 0; 3172 pm_runtime_put_noidle(&pdev->dev); 3173 3174 + rtl8169_check_link_status(dev, tp, ioaddr); 3175 out: 3176 return retval; 3177
+67 -76
drivers/net/sky2.c
··· 46 47 #include <asm/irq.h> 48 49 - #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 50 - #define SKY2_VLAN_TAG_USED 1 51 - #endif 52 - 53 #include "sky2.h" 54 55 #define DRV_NAME "sky2" ··· 1322 return err; 1323 } 1324 1325 - #ifdef SKY2_VLAN_TAG_USED 1326 - static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff) 1327 - { 1328 - if (onoff) { 1329 - sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1330 - RX_VLAN_STRIP_ON); 1331 - sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1332 - TX_VLAN_TAG_ON); 1333 - } else { 1334 - sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1335 - RX_VLAN_STRIP_OFF); 1336 - sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1337 - TX_VLAN_TAG_OFF); 1338 - } 1339 - } 1340 1341 - static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 1342 { 1343 struct sky2_port *sky2 = netdev_priv(dev); 1344 struct sky2_hw *hw = sky2->hw; 1345 u16 port = sky2->port; 1346 1347 - netif_tx_lock_bh(dev); 1348 - napi_disable(&hw->napi); 1349 1350 - sky2->vlgrp = grp; 1351 - sky2_set_vlan_mode(hw, port, grp != NULL); 1352 1353 - sky2_read32(hw, B0_Y2_SP_LISR); 1354 - napi_enable(&hw->napi); 1355 - netif_tx_unlock_bh(dev); 1356 } 1357 - #endif 1358 1359 /* Amount of required worst case padding in rx buffer */ 1360 static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) ··· 1626 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1627 sky2->tx_ring_size - 1); 1628 1629 - #ifdef SKY2_VLAN_TAG_USED 1630 - sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); 1631 - #endif 1632 1633 sky2_rx_start(sky2); 1634 } ··· 1769 } 1770 1771 ctrl = 0; 1772 - #ifdef SKY2_VLAN_TAG_USED 1773 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ 1774 if (vlan_tx_tag_present(skb)) { 1775 if (!le) { ··· 1781 le->length = cpu_to_be16(vlan_tx_tag_get(skb)); 1782 ctrl |= INS_VLAN; 1783 } 1784 - #endif 1785 1786 /* Handle TCP checksum offload */ 1787 if (skb->ip_summed == CHECKSUM_PARTIAL) { ··· 2420 struct sk_buff *skb = NULL; 2421 u16 count = (status & GMR_FS_LEN) >> 16; 2422 2423 - #ifdef SKY2_VLAN_TAG_USED 2424 - /* Account for vlan tag */ 2425 - if (sky2->vlgrp && (status & GMR_FS_VLAN)) 2426 - count -= VLAN_HLEN; 2427 - #endif 2428 2429 netif_printk(sky2, rx_status, KERN_DEBUG, dev, 2430 "rx slot %u status 0x%x len %d\n", ··· 2489 static inline void sky2_skb_rx(const struct sky2_port *sky2, 2490 u32 status, struct sk_buff *skb) 2491 { 2492 - #ifdef SKY2_VLAN_TAG_USED 2493 - u16 vlan_tag = be16_to_cpu(sky2->rx_tag); 2494 - if (sky2->vlgrp && (status & GMR_FS_VLAN)) { 2495 - if (skb->ip_summed == CHECKSUM_NONE) 2496 - vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag); 2497 - else 2498 - vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp, 2499 - vlan_tag, skb); 2500 - return; 2501 - } 2502 - #endif 2503 if (skb->ip_summed == CHECKSUM_NONE) 2504 netif_receive_skb(skb); 2505 else ··· 2608 goto exit_loop; 2609 break; 2610 2611 - #ifdef SKY2_VLAN_TAG_USED 2612 case OP_RXVLAN: 2613 sky2->rx_tag = length; 2614 break; ··· 2615 case OP_RXCHKSVLAN: 2616 sky2->rx_tag = length; 2617 /* fall through */ 2618 - #endif 2619 case OP_RXCHKS: 2620 if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM)) 2621 sky2_rx_checksum(sky2, status); ··· 3017 | SKY2_HW_NEW_LE 3018 | SKY2_HW_AUTO_TX_SUM 3019 | SKY2_HW_ADV_POWER_CTL; 3020 break; 3021 3022 case CHIP_ID_YUKON_SUPR: ··· 3390 u32 modes = SUPPORTED_10baseT_Half 3391 | SUPPORTED_10baseT_Full 3392 | SUPPORTED_100baseT_Half 3393 - | SUPPORTED_100baseT_Full 3394 - | SUPPORTED_Autoneg | SUPPORTED_TP; 3395 3396 if (hw->flags & SKY2_HW_GIGABIT) 3397 modes |= SUPPORTED_1000baseT_Half 3398 | SUPPORTED_1000baseT_Full; 3399 return modes; 3400 } else 3401 - return SUPPORTED_1000baseT_Half 3402 - | SUPPORTED_1000baseT_Full 3403 - | SUPPORTED_Autoneg 3404 - | SUPPORTED_FIBRE; 3405 } 3406 3407 static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) ··· 3412 if (sky2_is_copper(hw)) { 3413 ecmd->port = PORT_TP; 3414 ecmd->speed = sky2->speed; 3415 } else { 3416 ecmd->speed = SPEED_1000; 3417 ecmd->port = PORT_FIBRE; 3418 } 3419 3420 ecmd->advertising = sky2->advertising; ··· 3433 u32 supported = sky2_supported_modes(hw); 3434 3435 if (ecmd->autoneg == AUTONEG_ENABLE) { 3436 sky2->flags |= SKY2_FLAG_AUTO_SPEED; 3437 - ecmd->advertising = supported; 3438 sky2->duplex = -1; 3439 sky2->speed = -1; 3440 } else { ··· 3488 sky2->duplex = ecmd->duplex; 3489 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; 3490 } 3491 - 3492 - sky2->advertising = ecmd->advertising; 3493 3494 if (netif_running(dev)) { 3495 sky2_phy_reinit(sky2); ··· 4216 static int sky2_set_flags(struct net_device *dev, u32 data) 4217 { 4218 struct sky2_port *sky2 = netdev_priv(dev); 4219 - u32 supported = 4220 - (sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH; 4221 int rc; 4222 4223 rc = ethtool_op_set_flags(dev, data, supported); 4224 if (rc) 4225 return rc; 4226 4227 - rx_set_rss(dev); 4228 4229 return 0; 4230 } ··· 4273 .get_sset_count = sky2_get_sset_count, 4274 .get_ethtool_stats = sky2_get_ethtool_stats, 4275 .set_flags = sky2_set_flags, 4276 }; 4277 4278 #ifdef CONFIG_SKY2_DEBUG ··· 4555 .ndo_change_mtu = sky2_change_mtu, 4556 .ndo_tx_timeout = sky2_tx_timeout, 4557 .ndo_get_stats64 = sky2_get_stats, 4558 - #ifdef SKY2_VLAN_TAG_USED 4559 - .ndo_vlan_rx_register = sky2_vlan_rx_register, 4560 - #endif 4561 #ifdef CONFIG_NET_POLL_CONTROLLER 4562 .ndo_poll_controller = sky2_netpoll, 4563 #endif ··· 4570 .ndo_change_mtu = sky2_change_mtu, 4571 .ndo_tx_timeout = sky2_tx_timeout, 4572 .ndo_get_stats64 = sky2_get_stats, 4573 - #ifdef SKY2_VLAN_TAG_USED 4574 - .ndo_vlan_rx_register = sky2_vlan_rx_register, 4575 - #endif 4576 }, 4577 }; 4578 ··· 4620 sky2->port = port; 4621 4622 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG 4623 - | NETIF_F_TSO | NETIF_F_GRO; 4624 if (highmem) 4625 dev->features |= NETIF_F_HIGHDMA; 4626 ··· 4629 if (!(hw->flags & SKY2_HW_RSS_BROKEN)) 4630 dev->features |= NETIF_F_RXHASH; 4631 4632 - #ifdef SKY2_VLAN_TAG_USED 4633 - /* The workaround for FE+ status conflicts with VLAN tag detection. */ 4634 - if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && 4635 - sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) { 4636 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 4637 - } 4638 - #endif 4639 4640 /* read the mac address */ 4641 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
··· 46 47 #include <asm/irq.h> 48 49 #include "sky2.h" 50 51 #define DRV_NAME "sky2" ··· 1326 return err; 1327 } 1328 1329 + #define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX) 1330 1331 + static void sky2_vlan_mode(struct net_device *dev) 1332 { 1333 struct sky2_port *sky2 = netdev_priv(dev); 1334 struct sky2_hw *hw = sky2->hw; 1335 u16 port = sky2->port; 1336 1337 + if (dev->features & NETIF_F_HW_VLAN_RX) 1338 + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1339 + RX_VLAN_STRIP_ON); 1340 + else 1341 + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1342 + RX_VLAN_STRIP_OFF); 1343 1344 + dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN; 1345 + if (dev->features & NETIF_F_HW_VLAN_TX) 1346 + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1347 + TX_VLAN_TAG_ON); 1348 + else { 1349 + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1350 + TX_VLAN_TAG_OFF); 1351 1352 + /* Can't do transmit offload of vlan without hw vlan */ 1353 + dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG 1354 + | NETIF_F_ALL_CSUM); 1355 + } 1356 } 1357 1358 /* Amount of required worst case padding in rx buffer */ 1359 static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) ··· 1635 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1636 sky2->tx_ring_size - 1); 1637 1638 + sky2_vlan_mode(sky2->netdev); 1639 1640 sky2_rx_start(sky2); 1641 } ··· 1780 } 1781 1782 ctrl = 0; 1783 + 1784 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ 1785 if (vlan_tx_tag_present(skb)) { 1786 if (!le) { ··· 1792 le->length = cpu_to_be16(vlan_tx_tag_get(skb)); 1793 ctrl |= INS_VLAN; 1794 } 1795 1796 /* Handle TCP checksum offload */ 1797 if (skb->ip_summed == CHECKSUM_PARTIAL) { ··· 2432 struct sk_buff *skb = NULL; 2433 u16 count = (status & GMR_FS_LEN) >> 16; 2434 2435 + if (status & GMR_FS_VLAN) 2436 + count -= VLAN_HLEN; /* Account for vlan tag */ 2437 2438 netif_printk(sky2, rx_status, KERN_DEBUG, dev, 2439 "rx slot %u status 0x%x len %d\n", ··· 2504 static inline void sky2_skb_rx(const struct sky2_port *sky2, 2505 u32 status, struct sk_buff *skb) 2506 { 2507 + if (status & GMR_FS_VLAN) 2508 + __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag)); 2509 + 2510 if (skb->ip_summed == CHECKSUM_NONE) 2511 netif_receive_skb(skb); 2512 else ··· 2631 goto exit_loop; 2632 break; 2633 2634 case OP_RXVLAN: 2635 sky2->rx_tag = length; 2636 break; ··· 2639 case OP_RXCHKSVLAN: 2640 sky2->rx_tag = length; 2641 /* fall through */ 2642 case OP_RXCHKS: 2643 if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM)) 2644 sky2_rx_checksum(sky2, status); ··· 3042 | SKY2_HW_NEW_LE 3043 | SKY2_HW_AUTO_TX_SUM 3044 | SKY2_HW_ADV_POWER_CTL; 3045 + 3046 + /* The workaround for status conflicts VLAN tag detection. */ 3047 + if (hw->chip_rev == CHIP_REV_YU_FE2_A0) 3048 + hw->flags |= SKY2_HW_VLAN_BROKEN; 3049 break; 3050 3051 case CHIP_ID_YUKON_SUPR: ··· 3411 u32 modes = SUPPORTED_10baseT_Half 3412 | SUPPORTED_10baseT_Full 3413 | SUPPORTED_100baseT_Half 3414 + | SUPPORTED_100baseT_Full; 3415 3416 if (hw->flags & SKY2_HW_GIGABIT) 3417 modes |= SUPPORTED_1000baseT_Half 3418 | SUPPORTED_1000baseT_Full; 3419 return modes; 3420 } else 3421 + return SUPPORTED_1000baseT_Half 3422 + | SUPPORTED_1000baseT_Full; 3423 } 3424 3425 static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) ··· 3436 if (sky2_is_copper(hw)) { 3437 ecmd->port = PORT_TP; 3438 ecmd->speed = sky2->speed; 3439 + ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP; 3440 } else { 3441 ecmd->speed = SPEED_1000; 3442 ecmd->port = PORT_FIBRE; 3443 + ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; 3444 } 3445 3446 ecmd->advertising = sky2->advertising; ··· 3455 u32 supported = sky2_supported_modes(hw); 3456 3457 if (ecmd->autoneg == AUTONEG_ENABLE) { 3458 + if (ecmd->advertising & ~supported) 3459 + return -EINVAL; 3460 + 3461 + if (sky2_is_copper(hw)) 3462 + sky2->advertising = ecmd->advertising | 3463 + ADVERTISED_TP | 3464 + ADVERTISED_Autoneg; 3465 + else 3466 + sky2->advertising = ecmd->advertising | 3467 + ADVERTISED_FIBRE | 3468 + ADVERTISED_Autoneg; 3469 + 3470 sky2->flags |= SKY2_FLAG_AUTO_SPEED; 3471 sky2->duplex = -1; 3472 sky2->speed = -1; 3473 } else { ··· 3499 sky2->duplex = ecmd->duplex; 3500 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; 3501 } 3502 3503 if (netif_running(dev)) { 3504 sky2_phy_reinit(sky2); ··· 4229 static int sky2_set_flags(struct net_device *dev, u32 data) 4230 { 4231 struct sky2_port *sky2 = netdev_priv(dev); 4232 + unsigned long old_feat = dev->features; 4233 + u32 supported = 0; 4234 int rc; 4235 + 4236 + if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN)) 4237 + supported |= ETH_FLAG_RXHASH; 4238 + 4239 + if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN)) 4240 + supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN; 4241 + 4242 + printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n", 4243 + supported, data); 4244 4245 rc = ethtool_op_set_flags(dev, data, supported); 4246 if (rc) 4247 return rc; 4248 4249 + if ((old_feat ^ dev->features) & NETIF_F_RXHASH) 4250 + rx_set_rss(dev); 4251 + 4252 + if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN) 4253 + sky2_vlan_mode(dev); 4254 4255 return 0; 4256 } ··· 4273 .get_sset_count = sky2_get_sset_count, 4274 .get_ethtool_stats = sky2_get_ethtool_stats, 4275 .set_flags = sky2_set_flags, 4276 + .get_flags = ethtool_op_get_flags, 4277 }; 4278 4279 #ifdef CONFIG_SKY2_DEBUG ··· 4554 .ndo_change_mtu = sky2_change_mtu, 4555 .ndo_tx_timeout = sky2_tx_timeout, 4556 .ndo_get_stats64 = sky2_get_stats, 4557 #ifdef CONFIG_NET_POLL_CONTROLLER 4558 .ndo_poll_controller = sky2_netpoll, 4559 #endif ··· 4572 .ndo_change_mtu = sky2_change_mtu, 4573 .ndo_tx_timeout = sky2_tx_timeout, 4574 .ndo_get_stats64 = sky2_get_stats, 4575 }, 4576 }; 4577 ··· 4625 sky2->port = port; 4626 4627 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG 4628 + | NETIF_F_TSO | NETIF_F_GRO; 4629 + 4630 if (highmem) 4631 dev->features |= NETIF_F_HIGHDMA; 4632 ··· 4633 if (!(hw->flags & SKY2_HW_RSS_BROKEN)) 4634 dev->features |= NETIF_F_RXHASH; 4635 4636 + if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) 4637 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 4638 4639 /* read the mac address */ 4640 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
+2 -4
drivers/net/sky2.h
··· 2236 u16 rx_pending; 2237 u16 rx_data_size; 2238 u16 rx_nfrags; 2239 - 2240 - #ifdef SKY2_VLAN_TAG_USED 2241 u16 rx_tag; 2242 - struct vlan_group *vlgrp; 2243 - #endif 2244 struct { 2245 unsigned long last; 2246 u32 mac_rp; ··· 2281 #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2282 #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2283 #define SKY2_HW_RSS_BROKEN 0x00000100 2284 2285 u8 chip_id; 2286 u8 chip_rev;
··· 2236 u16 rx_pending; 2237 u16 rx_data_size; 2238 u16 rx_nfrags; 2239 u16 rx_tag; 2240 + 2241 struct { 2242 unsigned long last; 2243 u32 mac_rp; ··· 2284 #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2285 #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2286 #define SKY2_HW_RSS_BROKEN 0x00000100 2287 + #define SKY2_HW_VLAN_BROKEN 0x00000200 2288 2289 u8 chip_id; 2290 u8 chip_rev;
+1 -1
drivers/net/xen-netfront.c
··· 488 489 if (unlikely(!netif_carrier_ok(dev) || 490 (frags > 1 && !xennet_can_sg(dev)) || 491 - netif_needs_gso(dev, skb))) { 492 spin_unlock_irq(&np->tx_lock); 493 goto drop; 494 }
··· 488 489 if (unlikely(!netif_carrier_ok(dev) || 490 (frags > 1 && !xennet_can_sg(dev)) || 491 + netif_needs_gso(skb, netif_skb_features(skb)))) { 492 spin_unlock_irq(&np->tx_lock); 493 goto drop; 494 }
+1
include/linux/bfin_mac.h
··· 24 const unsigned short *mac_peripherals; 25 int phy_mode; 26 unsigned int phy_mask; 27 }; 28 29 #endif
··· 24 const unsigned short *mac_peripherals; 25 int phy_mode; 26 unsigned int phy_mask; 27 + unsigned short vlan1_mask, vlan2_mask; 28 }; 29 30 #endif
+3 -1
include/linux/etherdevice.h
··· 48 49 50 51 - extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count); 52 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) 53 54 /** 55 * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
··· 48 49 50 51 + extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, 52 + unsigned int rxqs); 53 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) 54 + #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) 55 56 /** 57 * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
+3
include/linux/fec.h
··· 3 * Copyright (c) 2009 Orex Computed Radiography 4 * Baruch Siach <baruch@tkos.co.il> 5 * 6 * Header file for the FEC platform data 7 * 8 * This program is free software; you can redistribute it and/or modify ··· 18 19 struct fec_platform_data { 20 phy_interface_t phy; 21 }; 22 23 #endif
··· 3 * Copyright (c) 2009 Orex Computed Radiography 4 * Baruch Siach <baruch@tkos.co.il> 5 * 6 + * Copyright (C) 2010 Freescale Semiconductor, Inc. 7 + * 8 * Header file for the FEC platform data 9 * 10 * This program is free software; you can redistribute it and/or modify ··· 16 17 struct fec_platform_data { 18 phy_interface_t phy; 19 + unsigned char mac[ETH_ALEN]; 20 }; 21 22 #endif
+1 -1
include/linux/if_bridge.h
··· 103 104 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); 105 106 - typedef int (*br_should_route_hook_t)(struct sk_buff *skb); 107 extern br_should_route_hook_t __rcu *br_should_route_hook; 108 109 #endif
··· 103 104 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); 105 106 + typedef int br_should_route_hook_t(struct sk_buff *skb); 107 extern br_should_route_hook_t __rcu *br_should_route_hook; 108 109 #endif
+11 -13
include/linux/netdevice.h
··· 2191 extern void ether_setup(struct net_device *dev); 2192 2193 /* Support for loadable net-drivers */ 2194 - extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 2195 void (*setup)(struct net_device *), 2196 - unsigned int queue_count); 2197 #define alloc_netdev(sizeof_priv, name, setup) \ 2198 - alloc_netdev_mq(sizeof_priv, name, setup, 1) 2199 extern int register_netdev(struct net_device *dev); 2200 extern void unregister_netdev(struct net_device *dev); 2201 ··· 2307 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 2308 struct net_device *dev); 2309 2310 - int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev); 2311 2312 static inline int net_gso_ok(int features, int gso_type) 2313 { ··· 2321 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 2322 } 2323 2324 - static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 2325 { 2326 - if (skb_is_gso(skb)) { 2327 - int features = netif_get_vlan_features(skb, dev); 2328 - 2329 - return (!skb_gso_ok(skb, features) || 2330 - unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); 2331 - } 2332 - 2333 - return 0; 2334 } 2335 2336 static inline void netif_set_gso_max_size(struct net_device *dev,
··· 2191 extern void ether_setup(struct net_device *dev); 2192 2193 /* Support for loadable net-drivers */ 2194 + extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 2195 void (*setup)(struct net_device *), 2196 + unsigned int txqs, unsigned int rxqs); 2197 #define alloc_netdev(sizeof_priv, name, setup) \ 2198 + alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) 2199 + 2200 + #define alloc_netdev_mq(sizeof_priv, name, setup, count) \ 2201 + alloc_netdev_mqs(sizeof_priv, name, setup, count, count) 2202 + 2203 extern int register_netdev(struct net_device *dev); 2204 extern void unregister_netdev(struct net_device *dev); 2205 ··· 2303 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 2304 struct net_device *dev); 2305 2306 + int netif_skb_features(struct sk_buff *skb); 2307 2308 static inline int net_gso_ok(int features, int gso_type) 2309 { ··· 2317 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 2318 } 2319 2320 + static inline int netif_needs_gso(struct sk_buff *skb, int features) 2321 { 2322 + return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 2323 + unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); 2324 } 2325 2326 static inline void netif_set_gso_max_size(struct net_device *dev,
+5 -5
include/linux/netfilter/x_tables.h
··· 472 * necessary for reading the counters. 473 */ 474 struct xt_info_lock { 475 - spinlock_t lock; 476 unsigned char readers; 477 }; 478 DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); ··· 497 local_bh_disable(); 498 lock = &__get_cpu_var(xt_info_locks); 499 if (likely(!lock->readers++)) 500 - spin_lock(&lock->lock); 501 } 502 503 static inline void xt_info_rdunlock_bh(void) ··· 505 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); 506 507 if (likely(!--lock->readers)) 508 - spin_unlock(&lock->lock); 509 local_bh_enable(); 510 } 511 ··· 516 */ 517 static inline void xt_info_wrlock(unsigned int cpu) 518 { 519 - spin_lock(&per_cpu(xt_info_locks, cpu).lock); 520 } 521 522 static inline void xt_info_wrunlock(unsigned int cpu) 523 { 524 - spin_unlock(&per_cpu(xt_info_locks, cpu).lock); 525 } 526 527 /*
··· 472 * necessary for reading the counters. 473 */ 474 struct xt_info_lock { 475 + seqlock_t lock; 476 unsigned char readers; 477 }; 478 DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); ··· 497 local_bh_disable(); 498 lock = &__get_cpu_var(xt_info_locks); 499 if (likely(!lock->readers++)) 500 + write_seqlock(&lock->lock); 501 } 502 503 static inline void xt_info_rdunlock_bh(void) ··· 505 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); 506 507 if (likely(!--lock->readers)) 508 + write_sequnlock(&lock->lock); 509 local_bh_enable(); 510 } 511 ··· 516 */ 517 static inline void xt_info_wrlock(unsigned int cpu) 518 { 519 + write_seqlock(&per_cpu(xt_info_locks, cpu).lock); 520 } 521 522 static inline void xt_info_wrunlock(unsigned int cpu) 523 { 524 + write_sequnlock(&per_cpu(xt_info_locks, cpu).lock); 525 } 526 527 /*
+1 -1
include/net/ah.h
··· 4 #include <linux/skbuff.h> 5 6 /* This is the maximum truncated ICV length that we know of. */ 7 - #define MAX_AH_AUTH_LEN 12 8 9 struct crypto_ahash; 10
··· 4 #include <linux/skbuff.h> 5 6 /* This is the maximum truncated ICV length that we know of. */ 7 + #define MAX_AH_AUTH_LEN 16 8 9 struct crypto_ahash; 10
+1
include/net/arp.h
··· 25 const unsigned char *src_hw, 26 const unsigned char *target_hw); 27 extern void arp_xmit(struct sk_buff *skb); 28 29 #endif /* _ARP_H */
··· 25 const unsigned char *src_hw, 26 const unsigned char *target_hw); 27 extern void arp_xmit(struct sk_buff *skb); 28 + int arp_invalidate(struct net_device *dev, __be32 ip); 29 30 #endif /* _ARP_H */
+2 -2
include/net/phonet/phonet.h
··· 107 int sock_type; 108 }; 109 110 - int phonet_proto_register(int protocol, struct phonet_protocol *pp); 111 - void phonet_proto_unregister(int protocol, struct phonet_protocol *pp); 112 113 int phonet_sysctl_init(void); 114 void phonet_sysctl_exit(void);
··· 107 int sock_type; 108 }; 109 110 + int phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp); 111 + void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp); 112 113 int phonet_sysctl_init(void); 114 void phonet_sysctl_exit(void);
+14 -6
include/net/sch_generic.h
··· 207 return q->q.qlen; 208 } 209 210 - static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) 211 { 212 return (struct qdisc_skb_cb *)skb->cb; 213 } ··· 394 return true; 395 } 396 397 - static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) 398 { 399 return qdisc_skb_cb(skb)->pkt_len; 400 } ··· 426 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; 427 } 428 429 - static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len) 430 { 431 - sch->bstats.bytes += len; 432 - sch->bstats.packets++; 433 } 434 435 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, ··· 445 { 446 __skb_queue_tail(list, skb); 447 sch->qstats.backlog += qdisc_pkt_len(skb); 448 - __qdisc_update_bstats(sch, qdisc_pkt_len(skb)); 449 450 return NET_XMIT_SUCCESS; 451 }
··· 207 return q->q.qlen; 208 } 209 210 + static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 211 { 212 return (struct qdisc_skb_cb *)skb->cb; 213 } ··· 394 return true; 395 } 396 397 + static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) 398 { 399 return qdisc_skb_cb(skb)->pkt_len; 400 } ··· 426 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; 427 } 428 429 + 430 + static inline void bstats_update(struct gnet_stats_basic_packed *bstats, 431 + const struct sk_buff *skb) 432 { 433 + bstats->bytes += qdisc_pkt_len(skb); 434 + bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 435 + } 436 + 437 + static inline void qdisc_bstats_update(struct Qdisc *sch, 438 + const struct sk_buff *skb) 439 + { 440 + bstats_update(&sch->bstats, skb); 441 } 442 443 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, ··· 437 { 438 __skb_queue_tail(list, skb); 439 sch->qstats.backlog += qdisc_pkt_len(skb); 440 + qdisc_bstats_update(sch, skb); 441 442 return NET_XMIT_SUCCESS; 443 }
+4
include/net/sock.h
··· 152 * fields between dontcopy_begin/dontcopy_end 153 * are not copied in sock_copy() 154 */ 155 int skc_dontcopy_begin[0]; 156 union { 157 struct hlist_node skc_node; 158 struct hlist_nulls_node skc_nulls_node; 159 }; 160 int skc_tx_queue_mapping; 161 atomic_t skc_refcnt; 162 int skc_dontcopy_end[0]; 163 }; 164 165 /**
··· 152 * fields between dontcopy_begin/dontcopy_end 153 * are not copied in sock_copy() 154 */ 155 + /* private: */ 156 int skc_dontcopy_begin[0]; 157 + /* public: */ 158 union { 159 struct hlist_node skc_node; 160 struct hlist_nulls_node skc_nulls_node; 161 }; 162 int skc_tx_queue_mapping; 163 atomic_t skc_refcnt; 164 + /* private: */ 165 int skc_dontcopy_end[0]; 166 + /* public: */ 167 }; 168 169 /**
+1 -1
net/caif/caif_socket.c
··· 740 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) 741 return -ENOPROTOOPT; 742 lock_sock(&(cf_sk->sk)); 743 - cf_sk->conn_req.param.size = ol; 744 if (ol > sizeof(cf_sk->conn_req.param.data) || 745 copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { 746 release_sock(&cf_sk->sk); 747 return -EINVAL; 748 } 749 release_sock(&cf_sk->sk); 750 return 0; 751
··· 740 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) 741 return -ENOPROTOOPT; 742 lock_sock(&(cf_sk->sk)); 743 if (ol > sizeof(cf_sk->conn_req.param.data) || 744 copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { 745 release_sock(&cf_sk->sk); 746 return -EINVAL; 747 } 748 + cf_sk->conn_req.param.size = ol; 749 release_sock(&cf_sk->sk); 750 return 0; 751
+17 -1
net/caif/chnl_net.c
··· 76 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); 77 int pktlen; 78 int err = 0; 79 80 priv = container_of(layr, struct chnl_net, chnl); 81 ··· 92 * send the packet to the net stack. 93 */ 94 skb->dev = priv->netdev; 95 - skb->protocol = htons(ETH_P_IP); 96 97 /* If we change the header in loop mode, the checksum is corrupted. */ 98 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
··· 76 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); 77 int pktlen; 78 int err = 0; 79 + const u8 *ip_version; 80 + u8 buf; 81 82 priv = container_of(layr, struct chnl_net, chnl); 83 ··· 90 * send the packet to the net stack. 91 */ 92 skb->dev = priv->netdev; 93 + 94 + /* check the version of IP */ 95 + ip_version = skb_header_pointer(skb, 0, 1, &buf); 96 + if (!ip_version) 97 + return -EINVAL; 98 + switch (*ip_version >> 4) { 99 + case 4: 100 + skb->protocol = htons(ETH_P_IP); 101 + break; 102 + case 6: 103 + skb->protocol = htons(ETH_P_IPV6); 104 + break; 105 + default: 106 + return -EINVAL; 107 + } 108 109 /* If we change the header in loop mode, the checksum is corrupted. */ 110 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
+79 -70
net/core/dev.c
··· 1732 } 1733 EXPORT_SYMBOL(netif_device_attach); 1734 1735 - static bool can_checksum_protocol(unsigned long features, __be16 protocol) 1736 - { 1737 - return ((features & NETIF_F_NO_CSUM) || 1738 - ((features & NETIF_F_V4_CSUM) && 1739 - protocol == htons(ETH_P_IP)) || 1740 - ((features & NETIF_F_V6_CSUM) && 1741 - protocol == htons(ETH_P_IPV6)) || 1742 - ((features & NETIF_F_FCOE_CRC) && 1743 - protocol == htons(ETH_P_FCOE))); 1744 - } 1745 - 1746 - static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) 1747 - { 1748 - __be16 protocol = skb->protocol; 1749 - int features = dev->features; 1750 - 1751 - if (vlan_tx_tag_present(skb)) { 1752 - features &= dev->vlan_features; 1753 - } else if (protocol == htons(ETH_P_8021Q)) { 1754 - struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 1755 - protocol = veh->h_vlan_encapsulated_proto; 1756 - features &= dev->vlan_features; 1757 - } 1758 - 1759 - return can_checksum_protocol(features, protocol); 1760 - } 1761 - 1762 /** 1763 * skb_dev_set -- assign a new device to a buffer 1764 * @skb: buffer for the new device ··· 1944 /** 1945 * dev_gso_segment - Perform emulated hardware segmentation on skb. 1946 * @skb: buffer to segment 1947 * 1948 * This function segments the given skb and stores the list of segments 1949 * in skb->next. 1950 */ 1951 - static int dev_gso_segment(struct sk_buff *skb) 1952 { 1953 - struct net_device *dev = skb->dev; 1954 struct sk_buff *segs; 1955 - int features = dev->features & ~(illegal_highdma(dev, skb) ? 1956 - NETIF_F_SG : 0); 1957 1958 segs = skb_gso_segment(skb, features); 1959 ··· 1988 } 1989 } 1990 1991 - int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev) 1992 { 1993 __be16 protocol = skb->protocol; 1994 1995 if (protocol == htons(ETH_P_8021Q)) { 1996 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 1997 protocol = veh->h_vlan_encapsulated_proto; 1998 - } else if (!skb->vlan_tci) 1999 - return dev->features; 2000 2001 - if (protocol != htons(ETH_P_8021Q)) 2002 - return dev->features & dev->vlan_features; 2003 - else 2004 - return 0; 2005 } 2006 - EXPORT_SYMBOL(netif_get_vlan_features); 2007 2008 /* 2009 * Returns true if either: ··· 2043 * support DMA from it. 2044 */ 2045 static inline int skb_needs_linearize(struct sk_buff *skb, 2046 - struct net_device *dev) 2047 { 2048 - if (skb_is_nonlinear(skb)) { 2049 - int features = dev->features; 2050 - 2051 - if (vlan_tx_tag_present(skb)) 2052 - features &= dev->vlan_features; 2053 - 2054 - return (skb_has_frag_list(skb) && 2055 - !(features & NETIF_F_FRAGLIST)) || 2056 (skb_shinfo(skb)->nr_frags && 2057 - (!(features & NETIF_F_SG) || 2058 - illegal_highdma(dev, skb))); 2059 - } 2060 - 2061 - return 0; 2062 } 2063 2064 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, ··· 2059 int rc = NETDEV_TX_OK; 2060 2061 if (likely(!skb->next)) { 2062 /* 2063 * If device doesnt need skb->dst, release it right now while 2064 * its hot in this cpu cache ··· 2073 2074 skb_orphan_try(skb); 2075 2076 if (vlan_tx_tag_present(skb) && 2077 - !(dev->features & NETIF_F_HW_VLAN_TX)) { 2078 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); 2079 if (unlikely(!skb)) 2080 goto out; ··· 2084 skb->vlan_tci = 0; 2085 } 2086 2087 - if (netif_needs_gso(dev, skb)) { 2088 - if (unlikely(dev_gso_segment(skb))) 2089 goto out_kfree_skb; 2090 if (skb->next) 2091 goto gso; 2092 } else { 2093 - if (skb_needs_linearize(skb, dev) && 2094 __skb_linearize(skb)) 2095 goto out_kfree_skb; 2096 ··· 2101 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2102 skb_set_transport_header(skb, 2103 skb_checksum_start_offset(skb)); 2104 - if (!dev_can_checksum(dev, skb) && 2105 skb_checksum_help(skb)) 2106 goto out_kfree_skb; 2107 } ··· 2297 */ 2298 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2299 skb_dst_force(skb); 2300 - __qdisc_update_bstats(q, skb->len); 2301 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2302 if (unlikely(contended)) { 2303 spin_unlock(&q->busylock); ··· 5620 } 5621 5622 /** 5623 - * alloc_netdev_mq - allocate network device 5624 * @sizeof_priv: size of private data to allocate space for 5625 * @name: device name format string 5626 * @setup: callback to initialize device 5627 - * @queue_count: the number of subqueues to allocate 5628 * 5629 * Allocates a struct net_device with private data area for driver use 5630 * and performs basic initialization. Also allocates subquue structs 5631 - * for each queue on the device at the end of the netdevice. 5632 */ 5633 - struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 5634 - void (*setup)(struct net_device *), unsigned int queue_count) 5635 { 5636 struct net_device *dev; 5637 size_t alloc_size; ··· 5641 5642 BUG_ON(strlen(name) >= sizeof(dev->name)); 5643 5644 - if (queue_count < 1) { 5645 pr_err("alloc_netdev: Unable to allocate device " 5646 "with zero queues.\n"); 5647 return NULL; 5648 } 5649 5650 alloc_size = sizeof(struct net_device); 5651 if (sizeof_priv) { ··· 5685 5686 dev_net_set(dev, &init_net); 5687 5688 - dev->num_tx_queues = queue_count; 5689 - dev->real_num_tx_queues = queue_count; 5690 if (netif_alloc_netdev_queues(dev)) 5691 goto free_pcpu; 5692 5693 #ifdef CONFIG_RPS 5694 - dev->num_rx_queues = queue_count; 5695 - dev->real_num_rx_queues = queue_count; 5696 if (netif_alloc_rx_queues(dev)) 5697 goto free_pcpu; 5698 #endif ··· 5720 kfree(p); 5721 return NULL; 5722 } 5723 - EXPORT_SYMBOL(alloc_netdev_mq); 5724 5725 /** 5726 * free_netdev - free network device
··· 1732 } 1733 EXPORT_SYMBOL(netif_device_attach); 1734 1735 /** 1736 * skb_dev_set -- assign a new device to a buffer 1737 * @skb: buffer for the new device ··· 1971 /** 1972 * dev_gso_segment - Perform emulated hardware segmentation on skb. 1973 * @skb: buffer to segment 1974 + * @features: device features as applicable to this skb 1975 * 1976 * This function segments the given skb and stores the list of segments 1977 * in skb->next. 1978 */ 1979 + static int dev_gso_segment(struct sk_buff *skb, int features) 1980 { 1981 struct sk_buff *segs; 1982 1983 segs = skb_gso_segment(skb, features); 1984 ··· 2017 } 2018 } 2019 2020 + static bool can_checksum_protocol(unsigned long features, __be16 protocol) 2021 + { 2022 + return ((features & NETIF_F_GEN_CSUM) || 2023 + ((features & NETIF_F_V4_CSUM) && 2024 + protocol == htons(ETH_P_IP)) || 2025 + ((features & NETIF_F_V6_CSUM) && 2026 + protocol == htons(ETH_P_IPV6)) || 2027 + ((features & NETIF_F_FCOE_CRC) && 2028 + protocol == htons(ETH_P_FCOE))); 2029 + } 2030 + 2031 + static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features) 2032 + { 2033 + if (!can_checksum_protocol(protocol, features)) { 2034 + features &= ~NETIF_F_ALL_CSUM; 2035 + features &= ~NETIF_F_SG; 2036 + } else if (illegal_highdma(skb->dev, skb)) { 2037 + features &= ~NETIF_F_SG; 2038 + } 2039 + 2040 + return features; 2041 + } 2042 + 2043 + int netif_skb_features(struct sk_buff *skb) 2044 { 2045 __be16 protocol = skb->protocol; 2046 + int features = skb->dev->features; 2047 2048 if (protocol == htons(ETH_P_8021Q)) { 2049 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2050 protocol = veh->h_vlan_encapsulated_proto; 2051 + } else if (!vlan_tx_tag_present(skb)) { 2052 + return harmonize_features(skb, protocol, features); 2053 + } 2054 2055 + features &= skb->dev->vlan_features; 2056 + 2057 + if (protocol != htons(ETH_P_8021Q)) { 2058 + return harmonize_features(skb, protocol, features); 2059 + } else { 2060 + features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | 2061 + NETIF_F_GEN_CSUM; 2062 + return harmonize_features(skb, protocol, features); 2063 + } 2064 } 2065 + EXPORT_SYMBOL(netif_skb_features); 2066 2067 /* 2068 * Returns true if either: ··· 2042 * support DMA from it. 2043 */ 2044 static inline int skb_needs_linearize(struct sk_buff *skb, 2045 + int features) 2046 { 2047 + return skb_is_nonlinear(skb) && 2048 + ((skb_has_frag_list(skb) && 2049 + !(features & NETIF_F_FRAGLIST)) || 2050 (skb_shinfo(skb)->nr_frags && 2051 + !(features & NETIF_F_SG))); 2052 } 2053 2054 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, ··· 2067 int rc = NETDEV_TX_OK; 2068 2069 if (likely(!skb->next)) { 2070 + int features; 2071 + 2072 /* 2073 * If device doesnt need skb->dst, release it right now while 2074 * its hot in this cpu cache ··· 2079 2080 skb_orphan_try(skb); 2081 2082 + features = netif_skb_features(skb); 2083 + 2084 if (vlan_tx_tag_present(skb) && 2085 + !(features & NETIF_F_HW_VLAN_TX)) { 2086 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); 2087 if (unlikely(!skb)) 2088 goto out; ··· 2088 skb->vlan_tci = 0; 2089 } 2090 2091 + if (netif_needs_gso(skb, features)) { 2092 + if (unlikely(dev_gso_segment(skb, features))) 2093 goto out_kfree_skb; 2094 if (skb->next) 2095 goto gso; 2096 } else { 2097 + if (skb_needs_linearize(skb, features) && 2098 __skb_linearize(skb)) 2099 goto out_kfree_skb; 2100 ··· 2105 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2106 skb_set_transport_header(skb, 2107 skb_checksum_start_offset(skb)); 2108 + if (!(features & NETIF_F_ALL_CSUM) && 2109 skb_checksum_help(skb)) 2110 goto out_kfree_skb; 2111 } ··· 2301 */ 2302 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2303 skb_dst_force(skb); 2304 + 2305 + qdisc_skb_cb(skb)->pkt_len = skb->len; 2306 + qdisc_bstats_update(q, skb); 2307 + 2308 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2309 if (unlikely(contended)) { 2310 spin_unlock(&q->busylock); ··· 5621 } 5622 5623 /** 5624 + * alloc_netdev_mqs - allocate network device 5625 * @sizeof_priv: size of private data to allocate space for 5626 * @name: device name format string 5627 * @setup: callback to initialize device 5628 + * @txqs: the number of TX subqueues to allocate 5629 + * @rxqs: the number of RX subqueues to allocate 5630 * 5631 * Allocates a struct net_device with private data area for driver use 5632 * and performs basic initialization. Also allocates subquue structs 5633 + * for each queue on the device. 5634 */ 5635 + struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 5636 + void (*setup)(struct net_device *), 5637 + unsigned int txqs, unsigned int rxqs) 5638 { 5639 struct net_device *dev; 5640 size_t alloc_size; ··· 5640 5641 BUG_ON(strlen(name) >= sizeof(dev->name)); 5642 5643 + if (txqs < 1) { 5644 pr_err("alloc_netdev: Unable to allocate device " 5645 "with zero queues.\n"); 5646 return NULL; 5647 } 5648 + 5649 + #ifdef CONFIG_RPS 5650 + if (rxqs < 1) { 5651 + pr_err("alloc_netdev: Unable to allocate device " 5652 + "with zero RX queues.\n"); 5653 + return NULL; 5654 + } 5655 + #endif 5656 5657 alloc_size = sizeof(struct net_device); 5658 if (sizeof_priv) { ··· 5676 5677 dev_net_set(dev, &init_net); 5678 5679 + dev->num_tx_queues = txqs; 5680 + dev->real_num_tx_queues = txqs; 5681 if (netif_alloc_netdev_queues(dev)) 5682 goto free_pcpu; 5683 5684 #ifdef CONFIG_RPS 5685 + dev->num_rx_queues = rxqs; 5686 + dev->real_num_rx_queues = rxqs; 5687 if (netif_alloc_rx_queues(dev)) 5688 goto free_pcpu; 5689 #endif ··· 5711 kfree(p); 5712 return NULL; 5713 } 5714 + EXPORT_SYMBOL(alloc_netdev_mqs); 5715 5716 /** 5717 * free_netdev - free network device
+1 -1
net/core/filter.c
··· 158 /** 159 * sk_run_filter - run a filter on a socket 160 * @skb: buffer to run the filter on 161 - * @filter: filter to apply 162 * 163 * Decode and apply filter instructions to the skb->data. 164 * Return length to keep, 0 for none. @skb is the data we are
··· 158 /** 159 * sk_run_filter - run a filter on a socket 160 * @skb: buffer to run the filter on 161 + * @fentry: filter to apply 162 * 163 * Decode and apply filter instructions to the skb->data. 164 * Return length to keep, 0 for none. @skb is the data we are
+1 -1
net/core/rtnetlink.c
··· 1820 if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) 1821 return -EPERM; 1822 1823 - if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { 1824 struct sock *rtnl; 1825 rtnl_dumpit_func dumpit; 1826
··· 1820 if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) 1821 return -EPERM; 1822 1823 + if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 1824 struct sock *rtnl; 1825 rtnl_dumpit_func dumpit; 1826
+2 -1
net/dccp/dccp.h
··· 426 { 427 struct dccp_sock *dp = dccp_sk(sk); 428 429 - dp->dccps_gsr = seq; 430 /* Sequence validity window depends on remote Sequence Window (7.5.1) */ 431 dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4); 432 /*
··· 426 { 427 struct dccp_sock *dp = dccp_sk(sk); 428 429 + if (after48(seq, dp->dccps_gsr)) 430 + dp->dccps_gsr = seq; 431 /* Sequence validity window depends on remote Sequence Window (7.5.1) */ 432 dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4); 433 /*
+1 -1
net/dccp/input.c
··· 260 */ 261 if (time_before(now, (dp->dccps_rate_last + 262 sysctl_dccp_sync_ratelimit))) 263 - return 0; 264 265 DCCP_WARN("Step 6 failed for %s packet, " 266 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
··· 260 */ 261 if (time_before(now, (dp->dccps_rate_last + 262 sysctl_dccp_sync_ratelimit))) 263 + return -1; 264 265 DCCP_WARN("Step 6 failed for %s packet, " 266 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
+3 -1
net/dccp/sysctl.c
··· 21 /* Boundary values */ 22 static int zero = 0, 23 u8_max = 0xFF; 24 - static unsigned long seqw_min = 32; 25 26 static struct ctl_table dccp_default_table[] = { 27 { ··· 32 .mode = 0644, 33 .proc_handler = proc_doulongvec_minmax, 34 .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */ 35 }, 36 { 37 .procname = "rx_ccid",
··· 21 /* Boundary values */ 22 static int zero = 0, 23 u8_max = 0xFF; 24 + static unsigned long seqw_min = DCCPF_SEQ_WMIN, 25 + seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */ 26 27 static struct ctl_table dccp_default_table[] = { 28 { ··· 31 .mode = 0644, 32 .proc_handler = proc_doulongvec_minmax, 33 .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */ 34 + .extra2 = &seqw_max, 35 }, 36 { 37 .procname = "rx_ccid",
+7 -5
net/ethernet/eth.c
··· 347 EXPORT_SYMBOL(ether_setup); 348 349 /** 350 - * alloc_etherdev_mq - Allocates and sets up an Ethernet device 351 * @sizeof_priv: Size of additional driver-private structure to be allocated 352 * for this Ethernet device 353 - * @queue_count: The number of queues this device has. 354 * 355 * Fill in the fields of the device structure with Ethernet-generic 356 * values. Basically does everything except registering the device. ··· 361 * this private data area. 362 */ 363 364 - struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count) 365 { 366 - return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count); 367 } 368 - EXPORT_SYMBOL(alloc_etherdev_mq); 369 370 static size_t _format_mac_addr(char *buf, int buflen, 371 const unsigned char *addr, int len)
··· 347 EXPORT_SYMBOL(ether_setup); 348 349 /** 350 + * alloc_etherdev_mqs - Allocates and sets up an Ethernet device 351 * @sizeof_priv: Size of additional driver-private structure to be allocated 352 * for this Ethernet device 353 + * @txqs: The number of TX queues this device has. 354 + * @txqs: The number of RX queues this device has. 355 * 356 * Fill in the fields of the device structure with Ethernet-generic 357 * values. Basically does everything except registering the device. ··· 360 * this private data area. 361 */ 362 363 + struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, 364 + unsigned int rxqs) 365 { 366 + return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs); 367 } 368 + EXPORT_SYMBOL(alloc_etherdev_mqs); 369 370 static size_t _format_mac_addr(char *buf, int buflen, 371 const unsigned char *addr, int len)
+4 -3
net/ipv4/ah4.c
··· 314 315 skb->ip_summed = CHECKSUM_NONE; 316 317 - ah = (struct ip_auth_hdr *)skb->data; 318 - iph = ip_hdr(skb); 319 - ihl = ip_hdrlen(skb); 320 321 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 322 goto out; 323 nfrags = err; 324 325 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); 326 if (!work_iph)
··· 314 315 skb->ip_summed = CHECKSUM_NONE; 316 317 318 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 319 goto out; 320 nfrags = err; 321 + 322 + ah = (struct ip_auth_hdr *)skb->data; 323 + iph = ip_hdr(skb); 324 + ihl = ip_hdrlen(skb); 325 326 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); 327 if (!work_iph)
+18 -11
net/ipv4/arp.c
··· 1143 return err; 1144 } 1145 1146 static int arp_req_delete_public(struct net *net, struct arpreq *r, 1147 struct net_device *dev) 1148 { ··· 1180 { 1181 int err; 1182 __be32 ip; 1183 - struct neighbour *neigh; 1184 1185 if (r->arp_flags & ATF_PUBL) 1186 return arp_req_delete_public(net, r, dev); ··· 1197 if (!dev) 1198 return -EINVAL; 1199 } 1200 - err = -ENXIO; 1201 - neigh = neigh_lookup(&arp_tbl, &ip, dev); 1202 - if (neigh) { 1203 - if (neigh->nud_state & ~NUD_NOARP) 1204 - err = neigh_update(neigh, NULL, NUD_FAILED, 1205 - NEIGH_UPDATE_F_OVERRIDE| 1206 - NEIGH_UPDATE_F_ADMIN); 1207 - neigh_release(neigh); 1208 - } 1209 - return err; 1210 } 1211 1212 /*
··· 1143 return err; 1144 } 1145 1146 + int arp_invalidate(struct net_device *dev, __be32 ip) 1147 + { 1148 + struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev); 1149 + int err = -ENXIO; 1150 + 1151 + if (neigh) { 1152 + if (neigh->nud_state & ~NUD_NOARP) 1153 + err = neigh_update(neigh, NULL, NUD_FAILED, 1154 + NEIGH_UPDATE_F_OVERRIDE| 1155 + NEIGH_UPDATE_F_ADMIN); 1156 + neigh_release(neigh); 1157 + } 1158 + 1159 + return err; 1160 + } 1161 + EXPORT_SYMBOL(arp_invalidate); 1162 + 1163 static int arp_req_delete_public(struct net *net, struct arpreq *r, 1164 struct net_device *dev) 1165 { ··· 1163 { 1164 int err; 1165 __be32 ip; 1166 1167 if (r->arp_flags & ATF_PUBL) 1168 return arp_req_delete_public(net, r, dev); ··· 1181 if (!dev) 1182 return -EINVAL; 1183 } 1184 + return arp_invalidate(dev, ip); 1185 } 1186 1187 /*
+3 -2
net/ipv4/inet_connection_sock.c
··· 73 !sk2->sk_bound_dev_if || 74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 75 if (!reuse || !sk2->sk_reuse || 76 - sk2->sk_state == TCP_LISTEN) { 77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); 78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || 79 sk2_rcv_saddr == sk_rcv_saddr(sk)) ··· 122 (tb->num_owners < smallest_size || smallest_size == -1)) { 123 smallest_size = tb->num_owners; 124 smallest_rover = rover; 125 - if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { 126 spin_unlock(&head->lock); 127 snum = smallest_rover; 128 goto have_snum;
··· 73 !sk2->sk_bound_dev_if || 74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 75 if (!reuse || !sk2->sk_reuse || 76 + ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { 77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); 78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || 79 sk2_rcv_saddr == sk_rcv_saddr(sk)) ··· 122 (tb->num_owners < smallest_size || smallest_size == -1)) { 123 smallest_size = tb->num_owners; 124 smallest_rover = rover; 125 + if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && 126 + !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { 127 spin_unlock(&head->lock); 128 snum = smallest_rover; 129 goto have_snum;
+1 -1
net/ipv4/inet_diag.c
··· 858 nlmsg_len(nlh) < hdrlen) 859 return -EINVAL; 860 861 - if (nlh->nlmsg_flags & NLM_F_DUMP) { 862 if (nlmsg_attrlen(nlh, hdrlen)) { 863 struct nlattr *attr; 864
··· 858 nlmsg_len(nlh) < hdrlen) 859 return -EINVAL; 860 861 + if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 862 if (nlmsg_attrlen(nlh, hdrlen)) { 863 struct nlattr *attr; 864
+14 -31
net/ipv4/netfilter/arp_tables.c
··· 710 struct arpt_entry *iter; 711 unsigned int cpu; 712 unsigned int i; 713 - unsigned int curcpu = get_cpu(); 714 - 715 - /* Instead of clearing (by a previous call to memset()) 716 - * the counters and using adds, we set the counters 717 - * with data used by 'current' CPU 718 - * 719 - * Bottom half has to be disabled to prevent deadlock 720 - * if new softirq were to run and call ipt_do_table 721 - */ 722 - local_bh_disable(); 723 - i = 0; 724 - xt_entry_foreach(iter, t->entries[curcpu], t->size) { 725 - SET_COUNTER(counters[i], iter->counters.bcnt, 726 - iter->counters.pcnt); 727 - ++i; 728 - } 729 - local_bh_enable(); 730 - /* Processing counters from other cpus, we can let bottom half enabled, 731 - * (preemption is disabled) 732 - */ 733 734 for_each_possible_cpu(cpu) { 735 - if (cpu == curcpu) 736 - continue; 737 i = 0; 738 - local_bh_disable(); 739 - xt_info_wrlock(cpu); 740 xt_entry_foreach(iter, t->entries[cpu], t->size) { 741 - ADD_COUNTER(counters[i], iter->counters.bcnt, 742 - iter->counters.pcnt); 743 ++i; 744 } 745 - xt_info_wrunlock(cpu); 746 - local_bh_enable(); 747 } 748 - put_cpu(); 749 } 750 751 static struct xt_counters *alloc_counters(const struct xt_table *table) ··· 742 * about). 743 */ 744 countersize = sizeof(struct xt_counters) * private->number; 745 - counters = vmalloc(countersize); 746 747 if (counters == NULL) 748 return ERR_PTR(-ENOMEM); ··· 990 struct arpt_entry *iter; 991 992 ret = 0; 993 - counters = vmalloc(num_counters * sizeof(struct xt_counters)); 994 if (!counters) { 995 ret = -ENOMEM; 996 goto out;
··· 710 struct arpt_entry *iter; 711 unsigned int cpu; 712 unsigned int i; 713 714 for_each_possible_cpu(cpu) { 715 + seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 716 + 717 i = 0; 718 xt_entry_foreach(iter, t->entries[cpu], t->size) { 719 + u64 bcnt, pcnt; 720 + unsigned int start; 721 + 722 + do { 723 + start = read_seqbegin(lock); 724 + bcnt = iter->counters.bcnt; 725 + pcnt = iter->counters.pcnt; 726 + } while (read_seqretry(lock, start)); 727 + 728 + ADD_COUNTER(counters[i], bcnt, pcnt); 729 ++i; 730 } 731 } 732 } 733 734 static struct xt_counters *alloc_counters(const struct xt_table *table) ··· 759 * about). 760 */ 761 countersize = sizeof(struct xt_counters) * private->number; 762 + counters = vzalloc(countersize); 763 764 if (counters == NULL) 765 return ERR_PTR(-ENOMEM); ··· 1007 struct arpt_entry *iter; 1008 1009 ret = 0; 1010 + counters = vzalloc(num_counters * sizeof(struct xt_counters)); 1011 if (!counters) { 1012 ret = -ENOMEM; 1013 goto out;
+14 -31
net/ipv4/netfilter/ip_tables.c
··· 884 struct ipt_entry *iter; 885 unsigned int cpu; 886 unsigned int i; 887 - unsigned int curcpu = get_cpu(); 888 - 889 - /* Instead of clearing (by a previous call to memset()) 890 - * the counters and using adds, we set the counters 891 - * with data used by 'current' CPU. 892 - * 893 - * Bottom half has to be disabled to prevent deadlock 894 - * if new softirq were to run and call ipt_do_table 895 - */ 896 - local_bh_disable(); 897 - i = 0; 898 - xt_entry_foreach(iter, t->entries[curcpu], t->size) { 899 - SET_COUNTER(counters[i], iter->counters.bcnt, 900 - iter->counters.pcnt); 901 - ++i; 902 - } 903 - local_bh_enable(); 904 - /* Processing counters from other cpus, we can let bottom half enabled, 905 - * (preemption is disabled) 906 - */ 907 908 for_each_possible_cpu(cpu) { 909 - if (cpu == curcpu) 910 - continue; 911 i = 0; 912 - local_bh_disable(); 913 - xt_info_wrlock(cpu); 914 xt_entry_foreach(iter, t->entries[cpu], t->size) { 915 - ADD_COUNTER(counters[i], iter->counters.bcnt, 916 - iter->counters.pcnt); 917 ++i; /* macro does multi eval of i */ 918 } 919 - xt_info_wrunlock(cpu); 920 - local_bh_enable(); 921 } 922 - put_cpu(); 923 } 924 925 static struct xt_counters *alloc_counters(const struct xt_table *table) ··· 915 (other than comefrom, which userspace doesn't care 916 about). */ 917 countersize = sizeof(struct xt_counters) * private->number; 918 - counters = vmalloc(countersize); 919 920 if (counters == NULL) 921 return ERR_PTR(-ENOMEM); ··· 1186 struct ipt_entry *iter; 1187 1188 ret = 0; 1189 - counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1190 if (!counters) { 1191 ret = -ENOMEM; 1192 goto out;
··· 884 struct ipt_entry *iter; 885 unsigned int cpu; 886 unsigned int i; 887 888 for_each_possible_cpu(cpu) { 889 + seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 890 + 891 i = 0; 892 xt_entry_foreach(iter, t->entries[cpu], t->size) { 893 + u64 bcnt, pcnt; 894 + unsigned int start; 895 + 896 + do { 897 + start = read_seqbegin(lock); 898 + bcnt = iter->counters.bcnt; 899 + pcnt = iter->counters.pcnt; 900 + } while (read_seqretry(lock, start)); 901 + 902 + ADD_COUNTER(counters[i], bcnt, pcnt); 903 ++i; /* macro does multi eval of i */ 904 } 905 } 906 } 907 908 static struct xt_counters *alloc_counters(const struct xt_table *table) ··· 932 (other than comefrom, which userspace doesn't care 933 about). */ 934 countersize = sizeof(struct xt_counters) * private->number; 935 + counters = vzalloc(countersize); 936 937 if (counters == NULL) 938 return ERR_PTR(-ENOMEM); ··· 1203 struct ipt_entry *iter; 1204 1205 ret = 0; 1206 + counters = vzalloc(num_counters * sizeof(struct xt_counters)); 1207 if (!counters) { 1208 ret = -ENOMEM; 1209 goto out;
+5 -3
net/ipv6/ah6.c
··· 538 if (!pskb_may_pull(skb, ah_hlen)) 539 goto out; 540 541 - ip6h = ipv6_hdr(skb); 542 - 543 - skb_push(skb, hdr_len); 544 545 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 546 goto out; 547 nfrags = err; 548 549 work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); 550 if (!work_iph)
··· 538 if (!pskb_may_pull(skb, ah_hlen)) 539 goto out; 540 541 542 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 543 goto out; 544 nfrags = err; 545 + 546 + ah = (struct ip_auth_hdr *)skb->data; 547 + ip6h = ipv6_hdr(skb); 548 + 549 + skb_push(skb, hdr_len); 550 551 work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); 552 if (!work_iph)
+1 -1
net/ipv6/inet6_connection_sock.c
··· 44 !sk2->sk_bound_dev_if || 45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 46 (!sk->sk_reuse || !sk2->sk_reuse || 47 - sk2->sk_state == TCP_LISTEN) && 48 ipv6_rcv_saddr_equal(sk, sk2)) 49 break; 50 }
··· 44 !sk2->sk_bound_dev_if || 45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 46 (!sk->sk_reuse || !sk2->sk_reuse || 47 + ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && 48 ipv6_rcv_saddr_equal(sk, sk2)) 49 break; 50 }
+14 -31
net/ipv6/netfilter/ip6_tables.c
··· 897 struct ip6t_entry *iter; 898 unsigned int cpu; 899 unsigned int i; 900 - unsigned int curcpu = get_cpu(); 901 - 902 - /* Instead of clearing (by a previous call to memset()) 903 - * the counters and using adds, we set the counters 904 - * with data used by 'current' CPU 905 - * 906 - * Bottom half has to be disabled to prevent deadlock 907 - * if new softirq were to run and call ipt_do_table 908 - */ 909 - local_bh_disable(); 910 - i = 0; 911 - xt_entry_foreach(iter, t->entries[curcpu], t->size) { 912 - SET_COUNTER(counters[i], iter->counters.bcnt, 913 - iter->counters.pcnt); 914 - ++i; 915 - } 916 - local_bh_enable(); 917 - /* Processing counters from other cpus, we can let bottom half enabled, 918 - * (preemption is disabled) 919 - */ 920 921 for_each_possible_cpu(cpu) { 922 - if (cpu == curcpu) 923 - continue; 924 i = 0; 925 - local_bh_disable(); 926 - xt_info_wrlock(cpu); 927 xt_entry_foreach(iter, t->entries[cpu], t->size) { 928 - ADD_COUNTER(counters[i], iter->counters.bcnt, 929 - iter->counters.pcnt); 930 ++i; 931 } 932 - xt_info_wrunlock(cpu); 933 - local_bh_enable(); 934 } 935 - put_cpu(); 936 } 937 938 static struct xt_counters *alloc_counters(const struct xt_table *table) ··· 928 (other than comefrom, which userspace doesn't care 929 about). */ 930 countersize = sizeof(struct xt_counters) * private->number; 931 - counters = vmalloc(countersize); 932 933 if (counters == NULL) 934 return ERR_PTR(-ENOMEM); ··· 1199 struct ip6t_entry *iter; 1200 1201 ret = 0; 1202 - counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1203 if (!counters) { 1204 ret = -ENOMEM; 1205 goto out;
··· 897 struct ip6t_entry *iter; 898 unsigned int cpu; 899 unsigned int i; 900 901 for_each_possible_cpu(cpu) { 902 + seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 903 + 904 i = 0; 905 xt_entry_foreach(iter, t->entries[cpu], t->size) { 906 + u64 bcnt, pcnt; 907 + unsigned int start; 908 + 909 + do { 910 + start = read_seqbegin(lock); 911 + bcnt = iter->counters.bcnt; 912 + pcnt = iter->counters.pcnt; 913 + } while (read_seqretry(lock, start)); 914 + 915 + ADD_COUNTER(counters[i], bcnt, pcnt); 916 ++i; 917 } 918 } 919 } 920 921 static struct xt_counters *alloc_counters(const struct xt_table *table) ··· 945 (other than comefrom, which userspace doesn't care 946 about). */ 947 countersize = sizeof(struct xt_counters) * private->number; 948 + counters = vzalloc(countersize); 949 950 if (counters == NULL) 951 return ERR_PTR(-ENOMEM); ··· 1216 struct ip6t_entry *iter; 1217 1218 ret = 0; 1219 + counters = vzalloc(num_counters * sizeof(struct xt_counters)); 1220 if (!counters) { 1221 ret = -ENOMEM; 1222 goto out;
+7 -11
net/netfilter/nf_conntrack_netlink.c
··· 645 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 646 u_int8_t l3proto = nfmsg->nfgen_family; 647 648 - rcu_read_lock(); 649 last = (struct nf_conn *)cb->args[1]; 650 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { 651 restart: 652 - hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], 653 hnnode) { 654 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 655 continue; 656 ct = nf_ct_tuplehash_to_ctrack(h); 657 - if (!atomic_inc_not_zero(&ct->ct_general.use)) 658 - continue; 659 /* Dump entries of a given L3 protocol number. 660 * If it is not specified, ie. l3proto == 0, 661 * then dump everything. */ 662 if (l3proto && nf_ct_l3num(ct) != l3proto) 663 - goto releasect; 664 if (cb->args[1]) { 665 if (ct != last) 666 - goto releasect; 667 cb->args[1] = 0; 668 } 669 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, ··· 679 if (acct) 680 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); 681 } 682 - releasect: 683 - nf_ct_put(ct); 684 } 685 if (cb->args[1]) { 686 cb->args[1] = 0; ··· 686 } 687 } 688 out: 689 - rcu_read_unlock(); 690 if (last) 691 nf_ct_put(last); 692 ··· 924 u16 zone; 925 int err; 926 927 - if (nlh->nlmsg_flags & NLM_F_DUMP) 928 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 929 ctnetlink_done); 930 ··· 1786 u16 zone; 1787 int err; 1788 1789 - if (nlh->nlmsg_flags & NLM_F_DUMP) { 1790 return netlink_dump_start(ctnl, skb, nlh, 1791 ctnetlink_exp_dump_table, 1792 ctnetlink_exp_done);
··· 645 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 646 u_int8_t l3proto = nfmsg->nfgen_family; 647 648 + spin_lock_bh(&nf_conntrack_lock); 649 last = (struct nf_conn *)cb->args[1]; 650 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { 651 restart: 652 + hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]], 653 hnnode) { 654 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 655 continue; 656 ct = nf_ct_tuplehash_to_ctrack(h); 657 /* Dump entries of a given L3 protocol number. 658 * If it is not specified, ie. l3proto == 0, 659 * then dump everything. */ 660 if (l3proto && nf_ct_l3num(ct) != l3proto) 661 + continue; 662 if (cb->args[1]) { 663 if (ct != last) 664 + continue; 665 cb->args[1] = 0; 666 } 667 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, ··· 681 if (acct) 682 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); 683 } 684 } 685 if (cb->args[1]) { 686 cb->args[1] = 0; ··· 690 } 691 } 692 out: 693 + spin_unlock_bh(&nf_conntrack_lock); 694 if (last) 695 nf_ct_put(last); 696 ··· 928 u16 zone; 929 int err; 930 931 + if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) 932 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 933 ctnetlink_done); 934 ··· 1790 u16 zone; 1791 int err; 1792 1793 + if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 1794 return netlink_dump_start(ctnl, skb, nlh, 1795 ctnetlink_exp_dump_table, 1796 ctnetlink_exp_done);
+2 -1
net/netfilter/x_tables.c
··· 1325 1326 for_each_possible_cpu(i) { 1327 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); 1328 - spin_lock_init(&lock->lock); 1329 lock->readers = 0; 1330 } 1331
··· 1325 1326 for_each_possible_cpu(i) { 1327 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); 1328 + 1329 + seqlock_init(&lock->lock); 1330 lock->readers = 0; 1331 } 1332
+1 -1
net/netlink/genetlink.c
··· 519 security_netlink_recv(skb, CAP_NET_ADMIN)) 520 return -EPERM; 521 522 - if (nlh->nlmsg_flags & NLM_F_DUMP) { 523 if (ops->dumpit == NULL) 524 return -EOPNOTSUPP; 525
··· 519 security_netlink_recv(skb, CAP_NET_ADMIN)) 520 return -EPERM; 521 522 + if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 523 if (ops->dumpit == NULL) 524 return -EOPNOTSUPP; 525
+3 -3
net/phonet/af_phonet.c
··· 37 /* Transport protocol registration */ 38 static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; 39 40 - static struct phonet_protocol *phonet_proto_get(int protocol) 41 { 42 struct phonet_protocol *pp; 43 ··· 458 459 static DEFINE_MUTEX(proto_tab_lock); 460 461 - int __init_or_module phonet_proto_register(int protocol, 462 struct phonet_protocol *pp) 463 { 464 int err = 0; ··· 481 } 482 EXPORT_SYMBOL(phonet_proto_register); 483 484 - void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) 485 { 486 mutex_lock(&proto_tab_lock); 487 BUG_ON(proto_tab[protocol] != pp);
··· 37 /* Transport protocol registration */ 38 static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; 39 40 + static struct phonet_protocol *phonet_proto_get(unsigned int protocol) 41 { 42 struct phonet_protocol *pp; 43 ··· 458 459 static DEFINE_MUTEX(proto_tab_lock); 460 461 + int __init_or_module phonet_proto_register(unsigned int protocol, 462 struct phonet_protocol *pp) 463 { 464 int err = 0; ··· 481 } 482 EXPORT_SYMBOL(phonet_proto_register); 483 484 + void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp) 485 { 486 mutex_lock(&proto_tab_lock); 487 BUG_ON(proto_tab[protocol] != pp);
+1 -2
net/sched/act_csum.c
··· 508 509 spin_lock(&p->tcf_lock); 510 p->tcf_tm.lastuse = jiffies; 511 - p->tcf_bstats.bytes += qdisc_pkt_len(skb); 512 - p->tcf_bstats.packets++; 513 action = p->tcf_action; 514 update_flags = p->update_flags; 515 spin_unlock(&p->tcf_lock);
··· 508 509 spin_lock(&p->tcf_lock); 510 p->tcf_tm.lastuse = jiffies; 511 + bstats_update(&p->tcf_bstats, skb); 512 action = p->tcf_action; 513 update_flags = p->update_flags; 514 spin_unlock(&p->tcf_lock);
+1 -2
net/sched/act_ipt.c
··· 209 spin_lock(&ipt->tcf_lock); 210 211 ipt->tcf_tm.lastuse = jiffies; 212 - ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); 213 - ipt->tcf_bstats.packets++; 214 215 /* yes, we have to worry about both in and out dev 216 worry later - danger - this API seems to have changed
··· 209 spin_lock(&ipt->tcf_lock); 210 211 ipt->tcf_tm.lastuse = jiffies; 212 + bstats_update(&ipt->tcf_bstats, skb); 213 214 /* yes, we have to worry about both in and out dev 215 worry later - danger - this API seems to have changed
+1 -2
net/sched/act_mirred.c
··· 165 166 spin_lock(&m->tcf_lock); 167 m->tcf_tm.lastuse = jiffies; 168 - m->tcf_bstats.bytes += qdisc_pkt_len(skb); 169 - m->tcf_bstats.packets++; 170 171 dev = m->tcfm_dev; 172 if (!dev) {
··· 165 166 spin_lock(&m->tcf_lock); 167 m->tcf_tm.lastuse = jiffies; 168 + bstats_update(&m->tcf_bstats, skb); 169 170 dev = m->tcfm_dev; 171 if (!dev) {
+1 -2
net/sched/act_nat.c
··· 125 egress = p->flags & TCA_NAT_FLAG_EGRESS; 126 action = p->tcf_action; 127 128 - p->tcf_bstats.bytes += qdisc_pkt_len(skb); 129 - p->tcf_bstats.packets++; 130 131 spin_unlock(&p->tcf_lock); 132
··· 125 egress = p->flags & TCA_NAT_FLAG_EGRESS; 126 action = p->tcf_action; 127 128 + bstats_update(&p->tcf_bstats, skb); 129 130 spin_unlock(&p->tcf_lock); 131
+1 -2
net/sched/act_pedit.c
··· 187 bad: 188 p->tcf_qstats.overlimits++; 189 done: 190 - p->tcf_bstats.bytes += qdisc_pkt_len(skb); 191 - p->tcf_bstats.packets++; 192 spin_unlock(&p->tcf_lock); 193 return p->tcf_action; 194 }
··· 187 bad: 188 p->tcf_qstats.overlimits++; 189 done: 190 + bstats_update(&p->tcf_bstats, skb); 191 spin_unlock(&p->tcf_lock); 192 return p->tcf_action; 193 }
+1 -2
net/sched/act_police.c
··· 298 299 spin_lock(&police->tcf_lock); 300 301 - police->tcf_bstats.bytes += qdisc_pkt_len(skb); 302 - police->tcf_bstats.packets++; 303 304 if (police->tcfp_ewma_rate && 305 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
··· 298 299 spin_lock(&police->tcf_lock); 300 301 + bstats_update(&police->tcf_bstats, skb); 302 303 if (police->tcfp_ewma_rate && 304 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
+1 -2
net/sched/act_simple.c
··· 42 43 spin_lock(&d->tcf_lock); 44 d->tcf_tm.lastuse = jiffies; 45 - d->tcf_bstats.bytes += qdisc_pkt_len(skb); 46 - d->tcf_bstats.packets++; 47 48 /* print policy string followed by _ then packet count 49 * Example if this was the 3rd packet and the string was "hello"
··· 42 43 spin_lock(&d->tcf_lock); 44 d->tcf_tm.lastuse = jiffies; 45 + bstats_update(&d->tcf_bstats, skb); 46 47 /* print policy string followed by _ then packet count 48 * Example if this was the 3rd packet and the string was "hello"
+1 -2
net/sched/act_skbedit.c
··· 46 47 spin_lock(&d->tcf_lock); 48 d->tcf_tm.lastuse = jiffies; 49 - d->tcf_bstats.bytes += qdisc_pkt_len(skb); 50 - d->tcf_bstats.packets++; 51 52 if (d->flags & SKBEDIT_F_PRIORITY) 53 skb->priority = d->priority;
··· 46 47 spin_lock(&d->tcf_lock); 48 d->tcf_tm.lastuse = jiffies; 49 + bstats_update(&d->tcf_bstats, skb); 50 51 if (d->flags & SKBEDIT_F_PRIORITY) 52 skb->priority = d->priority;
+2 -4
net/sched/sch_atm.c
··· 422 } 423 return ret; 424 } 425 - sch->bstats.bytes += qdisc_pkt_len(skb); 426 - sch->bstats.packets++; 427 - flow->bstats.bytes += qdisc_pkt_len(skb); 428 - flow->bstats.packets++; 429 /* 430 * Okay, this may seem weird. We pretend we've dropped the packet if 431 * it goes via ATM. The reason for this is that the outer qdisc
··· 422 } 423 return ret; 424 } 425 + qdisc_bstats_update(sch, skb); 426 + bstats_update(&flow->bstats, skb); 427 /* 428 * Okay, this may seem weird. We pretend we've dropped the packet if 429 * it goes via ATM. The reason for this is that the outer qdisc
+2 -4
net/sched/sch_cbq.c
··· 390 ret = qdisc_enqueue(skb, cl->q); 391 if (ret == NET_XMIT_SUCCESS) { 392 sch->q.qlen++; 393 - sch->bstats.packets++; 394 - sch->bstats.bytes += qdisc_pkt_len(skb); 395 cbq_mark_toplevel(q, cl); 396 if (!cl->next_alive) 397 cbq_activate_class(cl); ··· 649 ret = qdisc_enqueue(skb, cl->q); 650 if (ret == NET_XMIT_SUCCESS) { 651 sch->q.qlen++; 652 - sch->bstats.packets++; 653 - sch->bstats.bytes += qdisc_pkt_len(skb); 654 if (!cl->next_alive) 655 cbq_activate_class(cl); 656 return 0;
··· 390 ret = qdisc_enqueue(skb, cl->q); 391 if (ret == NET_XMIT_SUCCESS) { 392 sch->q.qlen++; 393 + qdisc_bstats_update(sch, skb); 394 cbq_mark_toplevel(q, cl); 395 if (!cl->next_alive) 396 cbq_activate_class(cl); ··· 650 ret = qdisc_enqueue(skb, cl->q); 651 if (ret == NET_XMIT_SUCCESS) { 652 sch->q.qlen++; 653 + qdisc_bstats_update(sch, skb); 654 if (!cl->next_alive) 655 cbq_activate_class(cl); 656 return 0;
+2 -6
net/sched/sch_drr.c
··· 351 { 352 struct drr_sched *q = qdisc_priv(sch); 353 struct drr_class *cl; 354 - unsigned int len; 355 int err; 356 357 cl = drr_classify(skb, sch, &err); ··· 361 return err; 362 } 363 364 - len = qdisc_pkt_len(skb); 365 err = qdisc_enqueue(skb, cl->qdisc); 366 if (unlikely(err != NET_XMIT_SUCCESS)) { 367 if (net_xmit_drop_count(err)) { ··· 375 cl->deficit = cl->quantum; 376 } 377 378 - cl->bstats.packets++; 379 - cl->bstats.bytes += len; 380 - sch->bstats.packets++; 381 - sch->bstats.bytes += len; 382 383 sch->q.qlen++; 384 return err;
··· 351 { 352 struct drr_sched *q = qdisc_priv(sch); 353 struct drr_class *cl; 354 int err; 355 356 cl = drr_classify(skb, sch, &err); ··· 362 return err; 363 } 364 365 err = qdisc_enqueue(skb, cl->qdisc); 366 if (unlikely(err != NET_XMIT_SUCCESS)) { 367 if (net_xmit_drop_count(err)) { ··· 377 cl->deficit = cl->quantum; 378 } 379 380 + bstats_update(&cl->bstats, skb); 381 + qdisc_bstats_update(sch, skb); 382 383 sch->q.qlen++; 384 return err;
+1 -2
net/sched/sch_dsmark.c
··· 260 return err; 261 } 262 263 - sch->bstats.bytes += qdisc_pkt_len(skb); 264 - sch->bstats.packets++; 265 sch->q.qlen++; 266 267 return NET_XMIT_SUCCESS;
··· 260 return err; 261 } 262 263 + qdisc_bstats_update(sch, skb); 264 sch->q.qlen++; 265 266 return NET_XMIT_SUCCESS;
+2 -4
net/sched/sch_hfsc.c
··· 1599 if (cl->qdisc->q.qlen == 1) 1600 set_active(cl, qdisc_pkt_len(skb)); 1601 1602 - cl->bstats.packets++; 1603 - cl->bstats.bytes += qdisc_pkt_len(skb); 1604 - sch->bstats.packets++; 1605 - sch->bstats.bytes += qdisc_pkt_len(skb); 1606 sch->q.qlen++; 1607 1608 return NET_XMIT_SUCCESS;
··· 1599 if (cl->qdisc->q.qlen == 1) 1600 set_active(cl, qdisc_pkt_len(skb)); 1601 1602 + bstats_update(&cl->bstats, skb); 1603 + qdisc_bstats_update(sch, skb); 1604 sch->q.qlen++; 1605 1606 return NET_XMIT_SUCCESS;
+6 -11
net/sched/sch_htb.c
··· 569 } 570 return ret; 571 } else { 572 - cl->bstats.packets += 573 - skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 574 - cl->bstats.bytes += qdisc_pkt_len(skb); 575 htb_activate(q, cl); 576 } 577 578 sch->q.qlen++; 579 - sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 580 - sch->bstats.bytes += qdisc_pkt_len(skb); 581 return NET_XMIT_SUCCESS; 582 } 583 ··· 645 htb_add_to_wait_tree(q, cl, diff); 646 } 647 648 - /* update byte stats except for leaves which are already updated */ 649 - if (cl->level) { 650 - cl->bstats.bytes += bytes; 651 - cl->bstats.packets += skb_is_gso(skb)? 652 - skb_shinfo(skb)->gso_segs:1; 653 - } 654 cl = cl->parent; 655 } 656 }
··· 569 } 570 return ret; 571 } else { 572 + bstats_update(&cl->bstats, skb); 573 htb_activate(q, cl); 574 } 575 576 sch->q.qlen++; 577 + qdisc_bstats_update(sch, skb); 578 return NET_XMIT_SUCCESS; 579 } 580 ··· 648 htb_add_to_wait_tree(q, cl, diff); 649 } 650 651 + /* update basic stats except for leaves which are already updated */ 652 + if (cl->level) 653 + bstats_update(&cl->bstats, skb); 654 + 655 cl = cl->parent; 656 } 657 }
+1 -2
net/sched/sch_ingress.c
··· 63 64 result = tc_classify(skb, p->filter_list, &res); 65 66 - sch->bstats.packets++; 67 - sch->bstats.bytes += qdisc_pkt_len(skb); 68 switch (result) { 69 case TC_ACT_SHOT: 70 result = TC_ACT_SHOT;
··· 63 64 result = tc_classify(skb, p->filter_list, &res); 65 66 + qdisc_bstats_update(sch, skb); 67 switch (result) { 68 case TC_ACT_SHOT: 69 result = TC_ACT_SHOT;
+1 -2
net/sched/sch_multiq.c
··· 83 84 ret = qdisc_enqueue(skb, qdisc); 85 if (ret == NET_XMIT_SUCCESS) { 86 - sch->bstats.bytes += qdisc_pkt_len(skb); 87 - sch->bstats.packets++; 88 sch->q.qlen++; 89 return NET_XMIT_SUCCESS; 90 }
··· 83 84 ret = qdisc_enqueue(skb, qdisc); 85 if (ret == NET_XMIT_SUCCESS) { 86 + qdisc_bstats_update(sch, skb); 87 sch->q.qlen++; 88 return NET_XMIT_SUCCESS; 89 }
+2 -4
net/sched/sch_netem.c
··· 240 241 if (likely(ret == NET_XMIT_SUCCESS)) { 242 sch->q.qlen++; 243 - sch->bstats.bytes += qdisc_pkt_len(skb); 244 - sch->bstats.packets++; 245 } else if (net_xmit_drop_count(ret)) { 246 sch->qstats.drops++; 247 } ··· 476 __skb_queue_after(list, skb, nskb); 477 478 sch->qstats.backlog += qdisc_pkt_len(nskb); 479 - sch->bstats.bytes += qdisc_pkt_len(nskb); 480 - sch->bstats.packets++; 481 482 return NET_XMIT_SUCCESS; 483 }
··· 240 241 if (likely(ret == NET_XMIT_SUCCESS)) { 242 sch->q.qlen++; 243 + qdisc_bstats_update(sch, skb); 244 } else if (net_xmit_drop_count(ret)) { 245 sch->qstats.drops++; 246 } ··· 477 __skb_queue_after(list, skb, nskb); 478 479 sch->qstats.backlog += qdisc_pkt_len(nskb); 480 + qdisc_bstats_update(sch, nskb); 481 482 return NET_XMIT_SUCCESS; 483 }
+1 -2
net/sched/sch_prio.c
··· 84 85 ret = qdisc_enqueue(skb, qdisc); 86 if (ret == NET_XMIT_SUCCESS) { 87 - sch->bstats.bytes += qdisc_pkt_len(skb); 88 - sch->bstats.packets++; 89 sch->q.qlen++; 90 return NET_XMIT_SUCCESS; 91 }
··· 84 85 ret = qdisc_enqueue(skb, qdisc); 86 if (ret == NET_XMIT_SUCCESS) { 87 + qdisc_bstats_update(sch, skb); 88 sch->q.qlen++; 89 return NET_XMIT_SUCCESS; 90 }
+1 -2
net/sched/sch_red.c
··· 94 95 ret = qdisc_enqueue(skb, child); 96 if (likely(ret == NET_XMIT_SUCCESS)) { 97 - sch->bstats.bytes += qdisc_pkt_len(skb); 98 - sch->bstats.packets++; 99 sch->q.qlen++; 100 } else if (net_xmit_drop_count(ret)) { 101 q->stats.pdrop++;
··· 94 95 ret = qdisc_enqueue(skb, child); 96 if (likely(ret == NET_XMIT_SUCCESS)) { 97 + qdisc_bstats_update(sch, skb); 98 sch->q.qlen++; 99 } else if (net_xmit_drop_count(ret)) { 100 q->stats.pdrop++;
+1 -2
net/sched/sch_sfq.c
··· 403 slot->allot = q->scaled_quantum; 404 } 405 if (++sch->q.qlen <= q->limit) { 406 - sch->bstats.bytes += qdisc_pkt_len(skb); 407 - sch->bstats.packets++; 408 return NET_XMIT_SUCCESS; 409 } 410
··· 403 slot->allot = q->scaled_quantum; 404 } 405 if (++sch->q.qlen <= q->limit) { 406 + qdisc_bstats_update(sch, skb); 407 return NET_XMIT_SUCCESS; 408 } 409
+1 -2
net/sched/sch_tbf.c
··· 134 } 135 136 sch->q.qlen++; 137 - sch->bstats.bytes += qdisc_pkt_len(skb); 138 - sch->bstats.packets++; 139 return NET_XMIT_SUCCESS; 140 } 141
··· 134 } 135 136 sch->q.qlen++; 137 + qdisc_bstats_update(sch, skb); 138 return NET_XMIT_SUCCESS; 139 } 140
+1 -2
net/sched/sch_teql.c
··· 83 84 if (q->q.qlen < dev->tx_queue_len) { 85 __skb_queue_tail(&q->q, skb); 86 - sch->bstats.bytes += qdisc_pkt_len(skb); 87 - sch->bstats.packets++; 88 return NET_XMIT_SUCCESS; 89 } 90
··· 83 84 if (q->q.qlen < dev->tx_queue_len) { 85 __skb_queue_tail(&q->q, skb); 86 + qdisc_bstats_update(sch, skb); 87 return NET_XMIT_SUCCESS; 88 } 89
+4 -2
net/xfrm/xfrm_user.c
··· 26 #include <net/sock.h> 27 #include <net/xfrm.h> 28 #include <net/netlink.h> 29 #include <asm/uaccess.h> 30 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 31 #include <linux/in6.h> ··· 303 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 304 if (!algo) 305 return -ENOSYS; 306 - if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) 307 return -EINVAL; 308 *props = algo->desc.sadb_alg_id; 309 ··· 2189 2190 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 2191 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 2192 - (nlh->nlmsg_flags & NLM_F_DUMP)) { 2193 if (link->dump == NULL) 2194 return -EINVAL; 2195
··· 26 #include <net/sock.h> 27 #include <net/xfrm.h> 28 #include <net/netlink.h> 29 + #include <net/ah.h> 30 #include <asm/uaccess.h> 31 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 32 #include <linux/in6.h> ··· 302 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 303 if (!algo) 304 return -ENOSYS; 305 + if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || 306 + ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) 307 return -EINVAL; 308 *props = algo->desc.sadb_alg_id; 309 ··· 2187 2188 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 2189 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 2190 + (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 2191 if (link->dump == NULL) 2192 return -EINVAL; 2193