Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (56 commits)
route: Take the right src and dst addresses in ip_route_newports
ipv4: Fix nexthop caching wrt. scoping.
ipv4: Invalidate nexthop cache nh_saddr more correctly.
net: fix pch_gbe section mismatch warning
ipv4: fix fib metrics
mlx4_en: Removing HW info from ethtool -i report.
net_sched: fix THROTTLED/RUNNING race
drivers/net/a2065.c: Convert release_resource to release_region/release_mem_region
drivers/net/ariadne.c: Convert release_resource to release_region/release_mem_region
bonding: fix rx_handler locking
myri10ge: fix rmmod crash
mlx4_en: updated driver version to 1.5.4.1
mlx4_en: Using blue flame support
mlx4_core: reserve UARs for userspace consumers
mlx4_core: maintain available field in bitmap allocator
mlx4: Add blue flame support for kernel consumers
mlx4_en: Enabling new steering
mlx4: Add support for promiscuous mode in the new steering model.
mlx4: generalization of multicast steering.
mlx4_en: Reporting HW revision in ethtool -i
...

+1906 -364
+5 -6
drivers/infiniband/hw/mlx4/main.c
··· 625 626 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 627 !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 628 - MLX4_PROTOCOL_IB); 629 if (err) 630 return err; 631 ··· 636 return 0; 637 638 err_add: 639 - mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); 640 return err; 641 } 642 ··· 666 struct mlx4_ib_gid_entry *ge; 667 668 err = mlx4_multicast_detach(mdev->dev, 669 - &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); 670 if (err) 671 return err; 672 ··· 721 if (err) 722 goto out; 723 724 - dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); 725 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 726 727 out: ··· 953 mlx4_foreach_ib_transport_port(port, ibdev->dev) { 954 oldnd = iboe->netdevs[port - 1]; 955 iboe->netdevs[port - 1] = 956 - mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port); 957 if (oldnd != iboe->netdevs[port - 1]) { 958 if (iboe->netdevs[port - 1]) 959 netdev_added(ibdev, port); ··· 1206 .add = mlx4_ib_add, 1207 .remove = mlx4_ib_remove, 1208 .event = mlx4_ib_event, 1209 - .protocol = MLX4_PROTOCOL_IB 1210 }; 1211 1212 static int __init mlx4_ib_init(void)
··· 625 626 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 627 !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 628 + MLX4_PROT_IB_IPV6); 629 if (err) 630 return err; 631 ··· 636 return 0; 637 638 err_add: 639 + mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); 640 return err; 641 } 642 ··· 666 struct mlx4_ib_gid_entry *ge; 667 668 err = mlx4_multicast_detach(mdev->dev, 669 + &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); 670 if (err) 671 return err; 672 ··· 721 if (err) 722 goto out; 723 724 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 725 726 out: ··· 954 mlx4_foreach_ib_transport_port(port, ibdev->dev) { 955 oldnd = iboe->netdevs[port - 1]; 956 iboe->netdevs[port - 1] = 957 + mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); 958 if (oldnd != iboe->netdevs[port - 1]) { 959 if (iboe->netdevs[port - 1]) 960 netdev_added(ibdev, port); ··· 1207 .add = mlx4_ib_add, 1208 .remove = mlx4_ib_remove, 1209 .event = mlx4_ib_event, 1210 + .protocol = MLX4_PROT_IB_IPV6 1211 }; 1212 1213 static int __init mlx4_ib_init(void)
+5 -5
drivers/net/a2065.c
··· 711 return -EBUSY; 712 r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); 713 if (!r2) { 714 - release_resource(r1); 715 return -EBUSY; 716 } 717 718 dev = alloc_etherdev(sizeof(struct lance_private)); 719 if (dev == NULL) { 720 - release_resource(r1); 721 - release_resource(r2); 722 return -ENOMEM; 723 } 724 ··· 764 765 err = register_netdev(dev); 766 if (err) { 767 - release_resource(r1); 768 - release_resource(r2); 769 free_netdev(dev); 770 return err; 771 }
··· 711 return -EBUSY; 712 r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); 713 if (!r2) { 714 + release_mem_region(base_addr, sizeof(struct lance_regs)); 715 return -EBUSY; 716 } 717 718 dev = alloc_etherdev(sizeof(struct lance_private)); 719 if (dev == NULL) { 720 + release_mem_region(base_addr, sizeof(struct lance_regs)); 721 + release_mem_region(mem_start, A2065_RAM_SIZE); 722 return -ENOMEM; 723 } 724 ··· 764 765 err = register_netdev(dev); 766 if (err) { 767 + release_mem_region(base_addr, sizeof(struct lance_regs)); 768 + release_mem_region(mem_start, A2065_RAM_SIZE); 769 free_netdev(dev); 770 return err; 771 }
+5 -5
drivers/net/ariadne.c
··· 182 return -EBUSY; 183 r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM"); 184 if (!r2) { 185 - release_resource(r1); 186 return -EBUSY; 187 } 188 189 dev = alloc_etherdev(sizeof(struct ariadne_private)); 190 if (dev == NULL) { 191 - release_resource(r1); 192 - release_resource(r2); 193 return -ENOMEM; 194 } 195 ··· 213 214 err = register_netdev(dev); 215 if (err) { 216 - release_resource(r1); 217 - release_resource(r2); 218 free_netdev(dev); 219 return err; 220 }
··· 182 return -EBUSY; 183 r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM"); 184 if (!r2) { 185 + release_mem_region(base_addr, sizeof(struct Am79C960)); 186 return -EBUSY; 187 } 188 189 dev = alloc_etherdev(sizeof(struct ariadne_private)); 190 if (dev == NULL) { 191 + release_mem_region(base_addr, sizeof(struct Am79C960)); 192 + release_mem_region(mem_start, ARIADNE_RAM_SIZE); 193 return -ENOMEM; 194 } 195 ··· 213 214 err = register_netdev(dev); 215 if (err) { 216 + release_mem_region(base_addr, sizeof(struct Am79C960)); 217 + release_mem_region(mem_start, ARIADNE_RAM_SIZE); 218 free_netdev(dev); 219 return err; 220 }
+31 -25
drivers/net/bonding/bond_main.c
··· 1482 { 1483 struct sk_buff *skb = *pskb; 1484 struct slave *slave; 1485 - struct net_device *bond_dev; 1486 struct bonding *bond; 1487 - 1488 - slave = bond_slave_get_rcu(skb->dev); 1489 - bond_dev = ACCESS_ONCE(slave->dev->master); 1490 - if (unlikely(!bond_dev)) 1491 - return RX_HANDLER_PASS; 1492 1493 skb = skb_share_check(skb, GFP_ATOMIC); 1494 if (unlikely(!skb)) ··· 1490 1491 *pskb = skb; 1492 1493 - bond = netdev_priv(bond_dev); 1494 1495 if (bond->params.arp_interval) 1496 slave->dev->last_rx = jiffies; ··· 1500 return RX_HANDLER_EXACT; 1501 } 1502 1503 - skb->dev = bond_dev; 1504 1505 if (bond->params.mode == BOND_MODE_ALB && 1506 - bond_dev->priv_flags & IFF_BRIDGE_PORT && 1507 skb->pkt_type == PACKET_HOST) { 1508 1509 if (unlikely(skb_cow_head(skb, ··· 1511 kfree_skb(skb); 1512 return RX_HANDLER_CONSUMED; 1513 } 1514 - memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN); 1515 } 1516 1517 return RX_HANDLER_ANOTHER; ··· 1693 pr_debug("Error %d calling netdev_set_bond_master\n", res); 1694 goto err_restore_mac; 1695 } 1696 - res = netdev_rx_handler_register(slave_dev, bond_handle_frame, 1697 - new_slave); 1698 - if (res) { 1699 - pr_debug("Error %d calling netdev_rx_handler_register\n", res); 1700 - goto err_unset_master; 1701 - } 1702 1703 /* open the slave since the application closed it */ 1704 res = dev_open(slave_dev); 1705 if (res) { 1706 pr_debug("Opening slave %s failed\n", slave_dev->name); 1707 - goto err_unreg_rxhandler; 1708 } 1709 1710 new_slave->dev = slave_dev; 1711 slave_dev->priv_flags |= IFF_BONDING; 1712 ··· 1897 if (res) 1898 goto err_close; 1899 1900 pr_info("%s: enslaving %s as a%s interface with a%s link.\n", 1901 bond_dev->name, slave_dev->name, 1902 bond_is_active_slave(new_slave) ? "n active" : " backup", ··· 1913 return 0; 1914 1915 /* Undo stages on error */ 1916 err_close: 1917 dev_close(slave_dev); 1918 - 1919 - err_unreg_rxhandler: 1920 - netdev_rx_handler_unregister(slave_dev); 1921 - synchronize_net(); 1922 1923 err_unset_master: 1924 netdev_set_bond_master(slave_dev, NULL); ··· 1983 unblock_netpoll_tx(); 1984 return -EINVAL; 1985 } 1986 1987 if (!bond->params.fail_over_mac) { 1988 if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && ··· 2108 netif_addr_unlock_bh(bond_dev); 2109 } 2110 2111 - netdev_rx_handler_unregister(slave_dev); 2112 - synchronize_net(); 2113 netdev_set_bond_master(slave_dev, NULL); 2114 2115 slave_disable_netpoll(slave); ··· 2188 */ 2189 write_unlock_bh(&bond->lock); 2190 2191 if (bond_is_lb(bond)) { 2192 /* must be called only after the slave 2193 * has been detached from the list ··· 2225 netif_addr_unlock_bh(bond_dev); 2226 } 2227 2228 - netdev_rx_handler_unregister(slave_dev); 2229 - synchronize_net(); 2230 netdev_set_bond_master(slave_dev, NULL); 2231 2232 slave_disable_netpoll(slave);
··· 1482 { 1483 struct sk_buff *skb = *pskb; 1484 struct slave *slave; 1485 struct bonding *bond; 1486 1487 skb = skb_share_check(skb, GFP_ATOMIC); 1488 if (unlikely(!skb)) ··· 1496 1497 *pskb = skb; 1498 1499 + slave = bond_slave_get_rcu(skb->dev); 1500 + bond = slave->bond; 1501 1502 if (bond->params.arp_interval) 1503 slave->dev->last_rx = jiffies; ··· 1505 return RX_HANDLER_EXACT; 1506 } 1507 1508 + skb->dev = bond->dev; 1509 1510 if (bond->params.mode == BOND_MODE_ALB && 1511 + bond->dev->priv_flags & IFF_BRIDGE_PORT && 1512 skb->pkt_type == PACKET_HOST) { 1513 1514 if (unlikely(skb_cow_head(skb, ··· 1516 kfree_skb(skb); 1517 return RX_HANDLER_CONSUMED; 1518 } 1519 + memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN); 1520 } 1521 1522 return RX_HANDLER_ANOTHER; ··· 1698 pr_debug("Error %d calling netdev_set_bond_master\n", res); 1699 goto err_restore_mac; 1700 } 1701 1702 /* open the slave since the application closed it */ 1703 res = dev_open(slave_dev); 1704 if (res) { 1705 pr_debug("Opening slave %s failed\n", slave_dev->name); 1706 + goto err_unset_master; 1707 } 1708 1709 + new_slave->bond = bond; 1710 new_slave->dev = slave_dev; 1711 slave_dev->priv_flags |= IFF_BONDING; 1712 ··· 1907 if (res) 1908 goto err_close; 1909 1910 + res = netdev_rx_handler_register(slave_dev, bond_handle_frame, 1911 + new_slave); 1912 + if (res) { 1913 + pr_debug("Error %d calling netdev_rx_handler_register\n", res); 1914 + goto err_dest_symlinks; 1915 + } 1916 + 1917 pr_info("%s: enslaving %s as a%s interface with a%s link.\n", 1918 bond_dev->name, slave_dev->name, 1919 bond_is_active_slave(new_slave) ? "n active" : " backup", ··· 1916 return 0; 1917 1918 /* Undo stages on error */ 1919 + err_dest_symlinks: 1920 + bond_destroy_slave_symlinks(bond_dev, slave_dev); 1921 + 1922 err_close: 1923 dev_close(slave_dev); 1924 1925 err_unset_master: 1926 netdev_set_bond_master(slave_dev, NULL); ··· 1987 unblock_netpoll_tx(); 1988 return -EINVAL; 1989 } 1990 + 1991 + /* unregister rx_handler early so bond_handle_frame wouldn't be called 1992 + * for this slave anymore. 1993 + */ 1994 + netdev_rx_handler_unregister(slave_dev); 1995 + write_unlock_bh(&bond->lock); 1996 + synchronize_net(); 1997 + write_lock_bh(&bond->lock); 1998 1999 if (!bond->params.fail_over_mac) { 2000 if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) && ··· 2104 netif_addr_unlock_bh(bond_dev); 2105 } 2106 2107 netdev_set_bond_master(slave_dev, NULL); 2108 2109 slave_disable_netpoll(slave); ··· 2186 */ 2187 write_unlock_bh(&bond->lock); 2188 2189 + /* unregister rx_handler early so bond_handle_frame wouldn't 2190 + * be called for this slave anymore. 2191 + */ 2192 + netdev_rx_handler_unregister(slave_dev); 2193 + synchronize_net(); 2194 + 2195 if (bond_is_lb(bond)) { 2196 /* must be called only after the slave 2197 * has been detached from the list ··· 2217 netif_addr_unlock_bh(bond_dev); 2218 } 2219 2220 netdev_set_bond_master(slave_dev, NULL); 2221 2222 slave_disable_netpoll(slave);
+1
drivers/net/bonding/bonding.h
··· 187 struct net_device *dev; /* first - useful for panic debug */ 188 struct slave *next; 189 struct slave *prev; 190 int delay; 191 unsigned long jiffies; 192 unsigned long last_arp_rx;
··· 187 struct net_device *dev; /* first - useful for panic debug */ 188 struct slave *next; 189 struct slave *prev; 190 + struct bonding *bond; /* our master */ 191 int delay; 192 unsigned long jiffies; 193 unsigned long last_arp_rx;
+8 -3
drivers/net/davinci_cpdma.c
··· 76 77 struct cpdma_desc_pool { 78 u32 phys; 79 void __iomem *iomap; /* ioremap map */ 80 void *cpumap; /* dma_alloc map */ 81 int desc_size, mem_size; ··· 138 * abstract out these details 139 */ 140 static struct cpdma_desc_pool * 141 - cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align) 142 { 143 int bitmap_size; 144 struct cpdma_desc_pool *pool; ··· 163 if (phys) { 164 pool->phys = phys; 165 pool->iomap = ioremap(phys, size); 166 } else { 167 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, 168 GFP_KERNEL); 169 pool->iomap = (void __force __iomem *)pool->cpumap; 170 } 171 172 if (pool->iomap) ··· 205 { 206 if (!desc) 207 return 0; 208 - return pool->phys + (__force dma_addr_t)desc - 209 (__force dma_addr_t)pool->iomap; 210 } 211 212 static inline struct cpdma_desc __iomem * 213 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) 214 { 215 - return dma ? pool->iomap + dma - pool->phys : NULL; 216 } 217 218 static struct cpdma_desc __iomem * ··· 264 265 ctlr->pool = cpdma_desc_pool_create(ctlr->dev, 266 ctlr->params.desc_mem_phys, 267 ctlr->params.desc_mem_size, 268 ctlr->params.desc_align); 269 if (!ctlr->pool) {
··· 76 77 struct cpdma_desc_pool { 78 u32 phys; 79 + u32 hw_addr; 80 void __iomem *iomap; /* ioremap map */ 81 void *cpumap; /* dma_alloc map */ 82 int desc_size, mem_size; ··· 137 * abstract out these details 138 */ 139 static struct cpdma_desc_pool * 140 + cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, 141 + int size, int align) 142 { 143 int bitmap_size; 144 struct cpdma_desc_pool *pool; ··· 161 if (phys) { 162 pool->phys = phys; 163 pool->iomap = ioremap(phys, size); 164 + pool->hw_addr = hw_addr; 165 } else { 166 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, 167 GFP_KERNEL); 168 pool->iomap = (void __force __iomem *)pool->cpumap; 169 + pool->hw_addr = pool->phys; 170 } 171 172 if (pool->iomap) ··· 201 { 202 if (!desc) 203 return 0; 204 + return pool->hw_addr + (__force dma_addr_t)desc - 205 (__force dma_addr_t)pool->iomap; 206 } 207 208 static inline struct cpdma_desc __iomem * 209 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) 210 { 211 + return dma ? pool->iomap + dma - pool->hw_addr : NULL; 212 } 213 214 static struct cpdma_desc __iomem * ··· 260 261 ctlr->pool = cpdma_desc_pool_create(ctlr->dev, 262 ctlr->params.desc_mem_phys, 263 + ctlr->params.desc_hw_addr, 264 ctlr->params.desc_mem_size, 265 ctlr->params.desc_align); 266 if (!ctlr->pool) {
+1
drivers/net/davinci_cpdma.h
··· 33 bool has_soft_reset; 34 int min_packet_size; 35 u32 desc_mem_phys; 36 int desc_mem_size; 37 int desc_align; 38
··· 33 bool has_soft_reset; 34 int min_packet_size; 35 u32 desc_mem_phys; 36 + u32 desc_hw_addr; 37 int desc_mem_size; 38 int desc_align; 39
+4 -1
drivers/net/davinci_emac.c
··· 1854 dma_params.rxcp = priv->emac_base + 0x660; 1855 dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS; 1856 dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE; 1857 - dma_params.desc_mem_phys = hw_ram_addr; 1858 dma_params.desc_mem_size = pdata->ctrl_ram_size; 1859 dma_params.desc_align = 16; 1860 1861 priv->dma = cpdma_ctlr_create(&dma_params); 1862 if (!priv->dma) {
··· 1854 dma_params.rxcp = priv->emac_base + 0x660; 1855 dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS; 1856 dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE; 1857 + dma_params.desc_hw_addr = hw_ram_addr; 1858 dma_params.desc_mem_size = pdata->ctrl_ram_size; 1859 dma_params.desc_align = 16; 1860 + 1861 + dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 : 1862 + (u32 __force)res->start + pdata->ctrl_ram_offset; 1863 1864 priv->dma = cpdma_ctlr_create(&dma_params); 1865 if (!priv->dma) {
+13
drivers/net/mlx4/alloc.c
··· 62 } else 63 obj = -1; 64 65 spin_unlock(&bitmap->lock); 66 67 return obj; ··· 104 } else 105 obj = -1; 106 107 spin_unlock(&bitmap->lock); 108 109 return obj; 110 } 111 112 void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) ··· 126 bitmap->last = min(bitmap->last, obj); 127 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 128 & bitmap->mask; 129 spin_unlock(&bitmap->lock); 130 } 131 ··· 142 bitmap->max = num - reserved_top; 143 bitmap->mask = mask; 144 bitmap->reserved_top = reserved_top; 145 spin_lock_init(&bitmap->lock); 146 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * 147 sizeof (long), GFP_KERNEL);
··· 62 } else 63 obj = -1; 64 65 + if (obj != -1) 66 + --bitmap->avail; 67 + 68 spin_unlock(&bitmap->lock); 69 70 return obj; ··· 101 } else 102 obj = -1; 103 104 + if (obj != -1) 105 + bitmap->avail -= cnt; 106 + 107 spin_unlock(&bitmap->lock); 108 109 return obj; 110 + } 111 + 112 + u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap) 113 + { 114 + return bitmap->avail; 115 } 116 117 void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) ··· 115 bitmap->last = min(bitmap->last, obj); 116 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 117 & bitmap->mask; 118 + bitmap->avail += cnt; 119 spin_unlock(&bitmap->lock); 120 } 121 ··· 130 bitmap->max = num - reserved_top; 131 bitmap->mask = mask; 132 bitmap->reserved_top = reserved_top; 133 + bitmap->avail = num - reserved_top - reserved_bot; 134 spin_lock_init(&bitmap->lock); 135 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * 136 sizeof (long), GFP_KERNEL);
+1 -1
drivers/net/mlx4/cq.c
··· 198 u64 mtt_addr; 199 int err; 200 201 - if (vector >= dev->caps.num_comp_vectors) 202 return -EINVAL; 203 204 cq->vector = vector;
··· 198 u64 mtt_addr; 199 int err; 200 201 + if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) 202 return -EINVAL; 203 204 cq->vector = vector;
+31 -7
drivers/net/mlx4/en_cq.c
··· 51 int err; 52 53 cq->size = entries; 54 - if (mode == RX) { 55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe); 56 - cq->vector = ring % mdev->dev->caps.num_comp_vectors; 57 - } else { 58 cq->buf_size = sizeof(struct mlx4_cqe); 59 - cq->vector = 0; 60 - } 61 62 cq->ring = ring; 63 cq->is_tx = mode; ··· 77 int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 78 { 79 struct mlx4_en_dev *mdev = priv->mdev; 80 - int err; 81 82 cq->dev = mdev->pndev[priv->port]; 83 cq->mcq.set_ci_db = cq->wqres.db.db; ··· 86 *cq->mcq.set_ci_db = 0; 87 *cq->mcq.arm_db = 0; 88 memset(cq->buf, 0, cq->buf_size); 89 90 if (!cq->is_tx) 91 cq->size = priv->rx_ring[cq->ring].actual_size; ··· 133 return 0; 134 } 135 136 - void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 137 { 138 struct mlx4_en_dev *mdev = priv->mdev; 139 140 mlx4_en_unmap_buffer(&cq->wqres.buf); 141 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 142 cq->buf_size = 0; 143 cq->buf = NULL; 144 }
··· 51 int err; 52 53 cq->size = entries; 54 + if (mode == RX) 55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe); 56 + else 57 cq->buf_size = sizeof(struct mlx4_cqe); 58 59 cq->ring = ring; 60 cq->is_tx = mode; ··· 80 int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 81 { 82 struct mlx4_en_dev *mdev = priv->mdev; 83 + int err = 0; 84 + char name[25]; 85 86 cq->dev = mdev->pndev[priv->port]; 87 cq->mcq.set_ci_db = cq->wqres.db.db; ··· 88 *cq->mcq.set_ci_db = 0; 89 *cq->mcq.arm_db = 0; 90 memset(cq->buf, 0, cq->buf_size); 91 + 92 + if (cq->is_tx == RX) { 93 + if (mdev->dev->caps.comp_pool) { 94 + if (!cq->vector) { 95 + sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring); 96 + if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) { 97 + cq->vector = (cq->ring + 1 + priv->port) % 98 + mdev->dev->caps.num_comp_vectors; 99 + mlx4_warn(mdev, "Failed Assigning an EQ to " 100 + "%s_rx-%d ,Falling back to legacy EQ's\n", 101 + priv->dev->name, cq->ring); 102 + } 103 + } 104 + } else { 105 + cq->vector = (cq->ring + 1 + priv->port) % 106 + mdev->dev->caps.num_comp_vectors; 107 + } 108 + } else { 109 + if (!cq->vector || !mdev->dev->caps.comp_pool) { 110 + /*Fallback to legacy pool in case of error*/ 111 + cq->vector = 0; 112 + } 113 + } 114 115 if (!cq->is_tx) 116 cq->size = priv->rx_ring[cq->ring].actual_size; ··· 112 return 0; 113 } 114 115 + void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 116 + bool reserve_vectors) 117 { 118 struct mlx4_en_dev *mdev = priv->mdev; 119 120 mlx4_en_unmap_buffer(&cq->wqres.buf); 121 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 122 + if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors) 123 + mlx4_release_eq(priv->mdev->dev, cq->vector); 124 cq->buf_size = 0; 125 cq->buf = NULL; 126 }
+62 -4
drivers/net/mlx4/en_ethtool.c
··· 45 struct mlx4_en_priv *priv = netdev_priv(dev); 46 struct mlx4_en_dev *mdev = priv->mdev; 47 48 - sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id); 49 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); 50 sprintf(drvinfo->fw_version, "%d.%d.%d", 51 (u16) (mdev->dev->caps.fw_ver >> 32), ··· 131 static void mlx4_en_get_wol(struct net_device *netdev, 132 struct ethtool_wolinfo *wol) 133 { 134 - wol->supported = 0; 135 - wol->wolopts = 0; 136 } 137 138 static int mlx4_en_get_sset_count(struct net_device *dev, int sset) ··· 445 mlx4_en_stop_port(dev); 446 } 447 448 - mlx4_en_free_resources(priv); 449 450 priv->prof->tx_ring_size = tx_size; 451 priv->prof->rx_ring_size = rx_size; ··· 499 .get_ethtool_stats = mlx4_en_get_ethtool_stats, 500 .self_test = mlx4_en_self_test, 501 .get_wol = mlx4_en_get_wol, 502 .get_msglevel = mlx4_en_get_msglevel, 503 .set_msglevel = mlx4_en_set_msglevel, 504 .get_coalesce = mlx4_en_get_coalesce,
··· 45 struct mlx4_en_priv *priv = netdev_priv(dev); 46 struct mlx4_en_dev *mdev = priv->mdev; 47 48 + strncpy(drvinfo->driver, DRV_NAME, 32); 49 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); 50 sprintf(drvinfo->fw_version, "%d.%d.%d", 51 (u16) (mdev->dev->caps.fw_ver >> 32), ··· 131 static void mlx4_en_get_wol(struct net_device *netdev, 132 struct ethtool_wolinfo *wol) 133 { 134 + struct mlx4_en_priv *priv = netdev_priv(netdev); 135 + int err = 0; 136 + u64 config = 0; 137 + 138 + if (!priv->mdev->dev->caps.wol) { 139 + wol->supported = 0; 140 + wol->wolopts = 0; 141 + return; 142 + } 143 + 144 + err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); 145 + if (err) { 146 + en_err(priv, "Failed to get WoL information\n"); 147 + return; 148 + } 149 + 150 + if (config & MLX4_EN_WOL_MAGIC) 151 + wol->supported = WAKE_MAGIC; 152 + else 153 + wol->supported = 0; 154 + 155 + if (config & MLX4_EN_WOL_ENABLED) 156 + wol->wolopts = WAKE_MAGIC; 157 + else 158 + wol->wolopts = 0; 159 + } 160 + 161 + static int mlx4_en_set_wol(struct net_device *netdev, 162 + struct ethtool_wolinfo *wol) 163 + { 164 + struct mlx4_en_priv *priv = netdev_priv(netdev); 165 + u64 config = 0; 166 + int err = 0; 167 + 168 + if (!priv->mdev->dev->caps.wol) 169 + return -EOPNOTSUPP; 170 + 171 + if (wol->supported & ~WAKE_MAGIC) 172 + return -EINVAL; 173 + 174 + err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); 175 + if (err) { 176 + en_err(priv, "Failed to get WoL info, unable to modify\n"); 177 + return err; 178 + } 179 + 180 + if (wol->wolopts & WAKE_MAGIC) { 181 + config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED | 182 + MLX4_EN_WOL_MAGIC; 183 + } else { 184 + config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC); 185 + config |= MLX4_EN_WOL_DO_MODIFY; 186 + } 187 + 188 + err = mlx4_wol_write(priv->mdev->dev, config, priv->port); 189 + if (err) 190 + en_err(priv, "Failed to set WoL information\n"); 191 + 192 + return err; 193 } 194 195 static int mlx4_en_get_sset_count(struct net_device *dev, int sset) ··· 388 mlx4_en_stop_port(dev); 389 } 390 391 + mlx4_en_free_resources(priv, true); 392 393 priv->prof->tx_ring_size = tx_size; 394 priv->prof->rx_ring_size = rx_size; ··· 442 .get_ethtool_stats = mlx4_en_get_ethtool_stats, 443 .self_test = mlx4_en_self_test, 444 .get_wol = mlx4_en_get_wol, 445 + .set_wol = mlx4_en_set_wol, 446 .get_msglevel = mlx4_en_get_msglevel, 447 .set_msglevel = mlx4_en_set_msglevel, 448 .get_coalesce = mlx4_en_get_coalesce,
+12 -10
drivers/net/mlx4/en_main.c
··· 241 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 242 mdev->port_cnt++; 243 244 - /* If we did not receive an explicit number of Rx rings, default to 245 - * the number of completion vectors populated by the mlx4_core */ 246 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 247 - mlx4_info(mdev, "Using %d tx rings for port:%d\n", 248 - mdev->profile.prof[i].tx_ring_num, i); 249 - mdev->profile.prof[i].rx_ring_num = min_t(int, 250 - roundup_pow_of_two(dev->caps.num_comp_vectors), 251 - MAX_RX_RINGS); 252 - mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", 253 - mdev->profile.prof[i].rx_ring_num, i); 254 } 255 256 /* Create our own workqueue for reset/multicast tasks ··· 296 .remove = mlx4_en_remove, 297 .event = mlx4_en_event, 298 .get_dev = mlx4_en_get_netdev, 299 - .protocol = MLX4_PROTOCOL_EN, 300 }; 301 302 static int __init mlx4_en_init(void)
··· 241 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 242 mdev->port_cnt++; 243 244 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 245 + if (!dev->caps.comp_pool) { 246 + mdev->profile.prof[i].rx_ring_num = 247 + rounddown_pow_of_two(max_t(int, MIN_RX_RINGS, 248 + min_t(int, 249 + dev->caps.num_comp_vectors, 250 + MAX_RX_RINGS))); 251 + } else { 252 + mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two( 253 + min_t(int, dev->caps.comp_pool/ 254 + dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1)); 255 + } 256 } 257 258 /* Create our own workqueue for reset/multicast tasks ··· 294 .remove = mlx4_en_remove, 295 .event = mlx4_en_event, 296 .get_dev = mlx4_en_get_netdev, 297 + .protocol = MLX4_PROT_ETH, 298 }; 299 300 static int __init mlx4_en_init(void)
+152 -47
drivers/net/mlx4/en_netdev.c
··· 156 mutex_lock(&mdev->state_lock); 157 if (priv->port_up) { 158 /* Remove old MAC and insert the new one */ 159 - mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 160 - err = mlx4_register_mac(mdev->dev, priv->port, 161 - priv->mac, &priv->mac_index); 162 if (err) 163 en_err(priv, "Failed changing HW MAC address\n"); 164 } else ··· 213 struct mlx4_en_dev *mdev = priv->mdev; 214 struct net_device *dev = priv->dev; 215 u64 mcast_addr = 0; 216 int err; 217 218 mutex_lock(&mdev->state_lock); ··· 239 priv->flags |= MLX4_EN_FLAG_PROMISC; 240 241 /* Enable promiscouos mode */ 242 - err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 243 - priv->base_qpn, 1); 244 if (err) 245 en_err(priv, "Failed enabling " 246 "promiscous mode\n"); ··· 256 en_err(priv, "Failed disabling " 257 "multicast filter\n"); 258 259 - /* Disable port VLAN filter */ 260 - err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); 261 - if (err) 262 - en_err(priv, "Failed disabling VLAN filter\n"); 263 } 264 goto out; 265 } ··· 285 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 286 287 /* Disable promiscouos mode */ 288 - err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 289 - priv->base_qpn, 0); 290 if (err) 291 en_err(priv, "Failed disabling promiscous mode\n"); 292 293 /* Enable port VLAN filter */ 294 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); ··· 315 0, MLX4_MCAST_DISABLE); 316 if (err) 317 en_err(priv, "Failed disabling multicast filter\n"); 318 } else { 319 int i; 320 321 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 322 0, MLX4_MCAST_DISABLE); 323 if (err) 324 en_err(priv, "Failed disabling multicast filter\n"); 325 326 /* Flush mcast filter and init it with broadcast address */ 327 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 328 1, MLX4_MCAST_CONFIG); ··· 359 for (i = 0; i < priv->mc_addrs_cnt; i++) { 360 mcast_addr = 361 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); 362 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 363 mcast_addr, 0, MLX4_MCAST_CONFIG); 364 } ··· 370 0, MLX4_MCAST_ENABLE); 371 if (err) 372 en_err(priv, "Failed enabling multicast filter\n"); 373 - 374 - mlx4_en_clear_list(dev); 375 } 376 out: 377 mutex_unlock(&mdev->state_lock); ··· 471 unsigned long avg_pkt_size; 472 unsigned long rx_packets; 473 unsigned long rx_bytes; 474 - unsigned long rx_byte_diff; 475 unsigned long tx_packets; 476 unsigned long tx_pkt_diff; 477 unsigned long rx_pkt_diff; ··· 494 rx_pkt_diff = ((unsigned long) (rx_packets - 495 priv->last_moder_packets)); 496 packets = max(tx_pkt_diff, rx_pkt_diff); 497 - rx_byte_diff = rx_bytes - priv->last_moder_bytes; 498 - rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1; 499 rate = packets * HZ / period; 500 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 501 priv->last_moder_bytes)) / packets : 0; 502 503 /* Apply auto-moderation only when packet rate exceeds a rate that 504 * it matters */ 505 - if (rate > MLX4_EN_RX_RATE_THRESH) { 506 /* If tx and rx packet rates are not balanced, assume that 507 * traffic is mainly BW bound and apply maximum moderation. 508 * Otherwise, moderate according to packet rate */ 509 - if (2 * tx_pkt_diff > 3 * rx_pkt_diff && 510 - rx_pkt_diff / rx_byte_diff < 511 - MLX4_EN_SMALL_PKT_SIZE) 512 - moder_time = priv->rx_usecs_low; 513 - else if (2 * rx_pkt_diff > 3 * tx_pkt_diff) 514 moder_time = priv->rx_usecs_high; 515 - else { 516 if (rate < priv->pkt_rate_low) 517 moder_time = priv->rx_usecs_low; 518 else if (rate > priv->pkt_rate_high) ··· 519 priv->rx_usecs_low; 520 } 521 } else { 522 - /* When packet rate is low, use default moderation rather than 523 - * 0 to prevent interrupt storms if traffic suddenly increases */ 524 - moder_time = priv->rx_usecs; 525 } 526 527 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", ··· 611 int err = 0; 612 int i; 613 int j; 614 615 if (priv->port_up) { 616 en_dbg(DRV, priv, "start port called while port already up\n"); ··· 651 ++rx_index; 652 } 653 654 err = mlx4_en_config_rss_steer(priv); 655 if (err) { 656 en_err(priv, "Failed configuring rss steering\n"); 657 - goto cq_err; 658 } 659 660 /* Configure tx cq's and rings */ 661 for (i = 0; i < priv->tx_ring_num; i++) { 662 /* Configure cq */ 663 cq = &priv->tx_cq[i]; 664 err = mlx4_en_activate_cq(priv, cq); 665 if (err) { 666 en_err(priv, "Failed allocating Tx CQ\n"); ··· 726 en_err(priv, "Failed setting default qp numbers\n"); 727 goto tx_err; 728 } 729 - /* Set port mac number */ 730 - en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); 731 - err = mlx4_register_mac(mdev->dev, priv->port, 732 - priv->mac, &priv->mac_index); 733 - if (err) { 734 - en_err(priv, "Failed setting port mac\n"); 735 - goto tx_err; 736 - } 737 - mdev->mac_removed[priv->port] = 0; 738 739 /* Init port */ 740 en_dbg(HW, priv, "Initializing port\n"); 741 err = mlx4_INIT_PORT(mdev->dev, priv->port); 742 if (err) { 743 en_err(priv, "Failed Initializing port\n"); 744 - goto mac_err; 745 } 746 747 /* Schedule multicast task to populate multicast list */ 748 queue_work(mdev->workqueue, &priv->mcast_task); ··· 749 netif_tx_start_all_queues(dev); 750 return 0; 751 752 - mac_err: 753 - mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 754 tx_err: 755 while (tx_index--) { 756 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); ··· 756 } 757 758 mlx4_en_release_rss_steer(priv); 759 cq_err: 760 while (rx_index--) 761 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); ··· 773 struct mlx4_en_priv *priv = netdev_priv(dev); 774 struct mlx4_en_dev *mdev = priv->mdev; 775 int i; 776 777 if (!priv->port_up) { 778 en_dbg(DRV, priv, "stop port called while port already down\n"); ··· 788 /* Set port as not active */ 789 priv->port_up = false; 790 791 /* Unregister Mac address for the port */ 792 - mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 793 mdev->mac_removed[priv->port] = 1; 794 795 /* Free TX Rings */ ··· 882 priv->rx_ring[i].packets = 0; 883 } 884 885 - mlx4_en_set_default_moderation(priv); 886 err = mlx4_en_start_port(dev); 887 if (err) 888 en_err(priv, "Failed starting port:%d\n", priv->port); ··· 908 return 0; 909 } 910 911 - void mlx4_en_free_resources(struct mlx4_en_priv *priv) 912 { 913 int i; 914 ··· 916 if (priv->tx_ring[i].tx_info) 917 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 918 if (priv->tx_cq[i].buf) 919 - mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 920 } 921 922 for (i = 0; i < priv->rx_ring_num; i++) { 923 if (priv->rx_ring[i].rx_info) 924 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); 925 if (priv->rx_cq[i].buf) 926 - mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 927 } 928 } 929 ··· 931 { 932 struct mlx4_en_port_profile *prof = priv->prof; 933 int i; 934 935 /* Create tx Rings */ 936 for (i = 0; i < priv->tx_ring_num; i++) { ··· 945 prof->tx_ring_size, i, TX)) 946 goto err; 947 948 - if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 949 prof->tx_ring_size, TXBB_SIZE)) 950 goto err; 951 } ··· 965 966 err: 967 en_err(priv, "Failed to allocate NIC resources\n"); 968 return -ENOMEM; 969 } 970 ··· 993 mdev->pndev[priv->port] = NULL; 994 mutex_unlock(&mdev->state_lock); 995 996 - mlx4_en_free_resources(priv); 997 free_netdev(dev); 998 } 999 ··· 1020 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1021 } else { 1022 mlx4_en_stop_port(dev); 1023 - mlx4_en_set_default_moderation(priv); 1024 err = mlx4_en_start_port(dev); 1025 if (err) { 1026 en_err(priv, "Failed restarting port:%d\n", ··· 1166 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 1167 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1168 1169 priv->registered = 1; 1170 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1171 return 0; 1172
··· 156 mutex_lock(&mdev->state_lock); 157 if (priv->port_up) { 158 /* Remove old MAC and insert the new one */ 159 + err = mlx4_replace_mac(mdev->dev, priv->port, 160 + priv->base_qpn, priv->mac, 0); 161 if (err) 162 en_err(priv, "Failed changing HW MAC address\n"); 163 } else ··· 214 struct mlx4_en_dev *mdev = priv->mdev; 215 struct net_device *dev = priv->dev; 216 u64 mcast_addr = 0; 217 + u8 mc_list[16] = {0}; 218 int err; 219 220 mutex_lock(&mdev->state_lock); ··· 239 priv->flags |= MLX4_EN_FLAG_PROMISC; 240 241 /* Enable promiscouos mode */ 242 + if (!mdev->dev->caps.vep_uc_steering) 243 + err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 244 + priv->base_qpn, 1); 245 + else 246 + err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, 247 + priv->port); 248 if (err) 249 en_err(priv, "Failed enabling " 250 "promiscous mode\n"); ··· 252 en_err(priv, "Failed disabling " 253 "multicast filter\n"); 254 255 + /* Add the default qp number as multicast promisc */ 256 + if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 257 + err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, 258 + priv->port); 259 + if (err) 260 + en_err(priv, "Failed entering multicast promisc mode\n"); 261 + priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 262 + } 263 + 264 + if (priv->vlgrp) { 265 + /* Disable port VLAN filter */ 266 + err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); 267 + if (err) 268 + en_err(priv, "Failed disabling VLAN filter\n"); 269 + } 270 } 271 goto out; 272 } ··· 270 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 271 272 /* Disable promiscouos mode */ 273 + if (!mdev->dev->caps.vep_uc_steering) 274 + err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 275 + priv->base_qpn, 0); 276 + else 277 + err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 278 + priv->port); 279 if (err) 280 en_err(priv, "Failed disabling promiscous mode\n"); 281 + 282 + /* Disable Multicast promisc */ 283 + if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 284 + err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 285 + priv->port); 286 + if (err) 287 + en_err(priv, "Failed disabling multicast promiscous mode\n"); 288 + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 289 + } 290 291 /* Enable port VLAN filter */ 292 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); ··· 287 0, MLX4_MCAST_DISABLE); 288 if (err) 289 en_err(priv, "Failed disabling multicast filter\n"); 290 + 291 + /* Add the default qp number as multicast promisc */ 292 + if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 293 + err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, 294 + priv->port); 295 + if (err) 296 + en_err(priv, "Failed entering multicast promisc mode\n"); 297 + priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 298 + } 299 } else { 300 int i; 301 + /* Disable Multicast promisc */ 302 + if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 303 + err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 304 + priv->port); 305 + if (err) 306 + en_err(priv, "Failed disabling multicast promiscous mode\n"); 307 + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 308 + } 309 310 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 311 0, MLX4_MCAST_DISABLE); 312 if (err) 313 en_err(priv, "Failed disabling multicast filter\n"); 314 315 + /* Detach our qp from all the multicast addresses */ 316 + for (i = 0; i < priv->mc_addrs_cnt; i++) { 317 + memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); 318 + mc_list[5] = priv->port; 319 + mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 320 + mc_list, MLX4_PROT_ETH); 321 + } 322 /* Flush mcast filter and init it with broadcast address */ 323 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 324 1, MLX4_MCAST_CONFIG); ··· 307 for (i = 0; i < priv->mc_addrs_cnt; i++) { 308 mcast_addr = 309 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); 310 + memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); 311 + mc_list[5] = priv->port; 312 + mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, 313 + mc_list, 0, MLX4_PROT_ETH); 314 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 315 mcast_addr, 0, MLX4_MCAST_CONFIG); 316 } ··· 314 0, MLX4_MCAST_ENABLE); 315 if (err) 316 en_err(priv, "Failed enabling multicast filter\n"); 317 } 318 out: 319 mutex_unlock(&mdev->state_lock); ··· 417 unsigned long avg_pkt_size; 418 unsigned long rx_packets; 419 unsigned long rx_bytes; 420 unsigned long tx_packets; 421 unsigned long tx_pkt_diff; 422 unsigned long rx_pkt_diff; ··· 441 rx_pkt_diff = ((unsigned long) (rx_packets - 442 priv->last_moder_packets)); 443 packets = max(tx_pkt_diff, rx_pkt_diff); 444 rate = packets * HZ / period; 445 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 446 priv->last_moder_bytes)) / packets : 0; 447 448 /* Apply auto-moderation only when packet rate exceeds a rate that 449 * it matters */ 450 + if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 451 /* If tx and rx packet rates are not balanced, assume that 452 * traffic is mainly BW bound and apply maximum moderation. 453 * Otherwise, moderate according to packet rate */ 454 + if (2 * tx_pkt_diff > 3 * rx_pkt_diff || 455 + 2 * rx_pkt_diff > 3 * tx_pkt_diff) { 456 moder_time = priv->rx_usecs_high; 457 + } else { 458 if (rate < priv->pkt_rate_low) 459 moder_time = priv->rx_usecs_low; 460 else if (rate > priv->pkt_rate_high) ··· 471 priv->rx_usecs_low; 472 } 473 } else { 474 + moder_time = priv->rx_usecs_low; 475 } 476 477 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", ··· 565 int err = 0; 566 int i; 567 int j; 568 + u8 mc_list[16] = {0}; 569 + char name[32]; 570 571 if (priv->port_up) { 572 en_dbg(DRV, priv, "start port called while port already up\n"); ··· 603 ++rx_index; 604 } 605 606 + /* Set port mac number */ 607 + en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); 608 + err = mlx4_register_mac(mdev->dev, priv->port, 609 + priv->mac, &priv->base_qpn, 0); 610 + if (err) { 611 + en_err(priv, "Failed setting port mac\n"); 612 + goto cq_err; 613 + } 614 + mdev->mac_removed[priv->port] = 0; 615 + 616 err = mlx4_en_config_rss_steer(priv); 617 if (err) { 618 en_err(priv, "Failed configuring rss steering\n"); 619 + goto mac_err; 620 } 621 622 + if (mdev->dev->caps.comp_pool && !priv->tx_vector) { 623 + sprintf(name , "%s-tx", priv->dev->name); 624 + if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) { 625 + mlx4_warn(mdev, "Failed Assigning an EQ to " 626 + "%s_tx ,Falling back to legacy " 627 + "EQ's\n", priv->dev->name); 628 + } 629 + } 630 /* Configure tx cq's and rings */ 631 for (i = 0; i < priv->tx_ring_num; i++) { 632 /* Configure cq */ 633 cq = &priv->tx_cq[i]; 634 + cq->vector = priv->tx_vector; 635 err = mlx4_en_activate_cq(priv, cq); 636 if (err) { 637 en_err(priv, "Failed allocating Tx CQ\n"); ··· 659 en_err(priv, "Failed setting default qp numbers\n"); 660 goto tx_err; 661 } 662 663 /* Init port */ 664 en_dbg(HW, priv, "Initializing port\n"); 665 err = mlx4_INIT_PORT(mdev->dev, priv->port); 666 if (err) { 667 en_err(priv, "Failed Initializing port\n"); 668 + goto tx_err; 669 } 670 + 671 + /* Attach rx QP to bradcast address */ 672 + memset(&mc_list[10], 0xff, ETH_ALEN); 673 + mc_list[5] = priv->port; 674 + if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 675 + 0, MLX4_PROT_ETH)) 676 + mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 677 678 /* Schedule multicast task to populate multicast list */ 679 queue_work(mdev->workqueue, &priv->mcast_task); ··· 684 netif_tx_start_all_queues(dev); 685 return 0; 686 687 tx_err: 688 while (tx_index--) { 689 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); ··· 693 } 694 695 mlx4_en_release_rss_steer(priv); 696 + mac_err: 697 + mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); 698 cq_err: 699 while (rx_index--) 700 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); ··· 708 struct mlx4_en_priv *priv = netdev_priv(dev); 709 struct mlx4_en_dev *mdev = priv->mdev; 710 int i; 711 + u8 mc_list[16] = {0}; 712 713 if (!priv->port_up) { 714 en_dbg(DRV, priv, "stop port called while port already down\n"); ··· 722 /* Set port as not active */ 723 priv->port_up = false; 724 725 + /* Detach All multicasts */ 726 + memset(&mc_list[10], 0xff, ETH_ALEN); 727 + mc_list[5] = priv->port; 728 + mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 729 + MLX4_PROT_ETH); 730 + for (i = 0; i < priv->mc_addrs_cnt; i++) { 731 + memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); 732 + mc_list[5] = priv->port; 733 + mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 734 + mc_list, MLX4_PROT_ETH); 735 + } 736 + mlx4_en_clear_list(dev); 737 + /* Flush multicast filter */ 738 + mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 739 + 740 /* Unregister Mac address for the port */ 741 + mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); 742 mdev->mac_removed[priv->port] = 1; 743 744 /* Free TX Rings */ ··· 801 priv->rx_ring[i].packets = 0; 802 } 803 804 err = mlx4_en_start_port(dev); 805 if (err) 806 en_err(priv, "Failed starting port:%d\n", priv->port); ··· 828 return 0; 829 } 830 831 + void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors) 832 { 833 int i; 834 ··· 836 if (priv->tx_ring[i].tx_info) 837 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 838 if (priv->tx_cq[i].buf) 839 + mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors); 840 } 841 842 for (i = 0; i < priv->rx_ring_num; i++) { 843 if (priv->rx_ring[i].rx_info) 844 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); 845 if (priv->rx_cq[i].buf) 846 + mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors); 847 } 848 } 849 ··· 851 { 852 struct mlx4_en_port_profile *prof = priv->prof; 853 int i; 854 + int base_tx_qpn, err; 855 + 856 + err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn); 857 + if (err) { 858 + en_err(priv, "failed reserving range for TX rings\n"); 859 + return err; 860 + } 861 862 /* Create tx Rings */ 863 for (i = 0; i < priv->tx_ring_num; i++) { ··· 858 prof->tx_ring_size, i, TX)) 859 goto err; 860 861 + if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i, 862 prof->tx_ring_size, TXBB_SIZE)) 863 goto err; 864 } ··· 878 879 err: 880 en_err(priv, "Failed to allocate NIC resources\n"); 881 + mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num); 882 return -ENOMEM; 883 } 884 ··· 905 mdev->pndev[priv->port] = NULL; 906 mutex_unlock(&mdev->state_lock); 907 908 + mlx4_en_free_resources(priv, false); 909 free_netdev(dev); 910 } 911 ··· 932 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 933 } else { 934 mlx4_en_stop_port(dev); 935 err = mlx4_en_start_port(dev); 936 if (err) { 937 en_err(priv, "Failed restarting port:%d\n", ··· 1079 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 1080 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1081 1082 + /* Configure port */ 1083 + err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1084 + MLX4_EN_MIN_MTU, 1085 + 0, 0, 0, 0); 1086 + if (err) { 1087 + en_err(priv, "Failed setting port general configurations " 1088 + "for port %d, with error %d\n", priv->port, err); 1089 + goto out; 1090 + } 1091 + 1092 + /* Init port */ 1093 + en_warn(priv, "Initializing port\n"); 1094 + err = mlx4_INIT_PORT(mdev->dev, priv->port); 1095 + if (err) { 1096 + en_err(priv, "Failed Initializing port\n"); 1097 + goto out; 1098 + } 1099 priv->registered = 1; 1100 + mlx4_en_set_default_moderation(priv); 1101 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1102 return 0; 1103
+10 -3
drivers/net/mlx4/en_port.c
··· 119 struct mlx4_set_port_rqp_calc_context *context; 120 int err; 121 u32 in_mod; 122 123 mailbox = mlx4_alloc_cmd_mailbox(dev); 124 if (IS_ERR(mailbox)) ··· 131 memset(context, 0, sizeof *context); 132 133 context->base_qpn = cpu_to_be32(base_qpn); 134 - context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn); 135 - context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn); 136 context->intra_no_vlan = 0; 137 context->no_vlan = MLX4_NO_VLAN_IDX; 138 context->intra_vlan_miss = 0; ··· 213 } 214 stats->tx_packets = 0; 215 stats->tx_bytes = 0; 216 - for (i = 0; i <= priv->tx_ring_num; i++) { 217 stats->tx_packets += priv->tx_ring[i].packets; 218 stats->tx_bytes += priv->tx_ring[i].bytes; 219 }
··· 119 struct mlx4_set_port_rqp_calc_context *context; 120 int err; 121 u32 in_mod; 122 + u32 m_promisc = (dev->caps.vep_mc_steering) ? MCAST_DIRECT : MCAST_DEFAULT; 123 + 124 + if (dev->caps.vep_mc_steering && dev->caps.vep_uc_steering) 125 + return 0; 126 127 mailbox = mlx4_alloc_cmd_mailbox(dev); 128 if (IS_ERR(mailbox)) ··· 127 memset(context, 0, sizeof *context); 128 129 context->base_qpn = cpu_to_be32(base_qpn); 130 + context->n_mac = 0x7; 131 + context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | 132 + base_qpn); 133 + context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | 134 + base_qpn); 135 context->intra_no_vlan = 0; 136 context->no_vlan = MLX4_NO_VLAN_IDX; 137 context->intra_vlan_miss = 0; ··· 206 } 207 stats->tx_packets = 0; 208 stats->tx_bytes = 0; 209 + for (i = 0; i < priv->tx_ring_num; i++) { 210 stats->tx_packets += priv->tx_ring[i].packets; 211 stats->tx_bytes += priv->tx_ring[i].bytes; 212 }
+14 -5
drivers/net/mlx4/en_port.h
··· 36 37 38 #define SET_PORT_GEN_ALL_VALID 0x7 39 - #define SET_PORT_PROMISC_EN_SHIFT 31 40 - #define SET_PORT_PROMISC_MODE_SHIFT 30 41 42 enum { 43 MLX4_CMD_SET_VLAN_FLTR = 0x47, 44 MLX4_CMD_SET_MCAST_FLTR = 0x48, 45 MLX4_CMD_DUMP_ETH_STATS = 0x49, 46 }; 47 48 struct mlx4_set_port_general_context { ··· 66 67 struct mlx4_set_port_rqp_calc_context { 68 __be32 base_qpn; 69 - __be32 flags; 70 - u8 reserved[3]; 71 u8 mac_miss; 72 u8 intra_no_vlan; 73 u8 no_vlan; 74 u8 intra_vlan_miss; 75 u8 vlan_miss; 76 - u8 reserved2[3]; 77 u8 no_vlan_prio; 78 __be32 promisc; 79 __be32 mcast;
··· 36 37 38 #define SET_PORT_GEN_ALL_VALID 0x7 39 + #define SET_PORT_PROMISC_SHIFT 31 40 + #define SET_PORT_MC_PROMISC_SHIFT 30 41 42 enum { 43 MLX4_CMD_SET_VLAN_FLTR = 0x47, 44 MLX4_CMD_SET_MCAST_FLTR = 0x48, 45 MLX4_CMD_DUMP_ETH_STATS = 0x49, 46 + }; 47 + 48 + enum { 49 + MCAST_DIRECT_ONLY = 0, 50 + MCAST_DIRECT = 1, 51 + MCAST_DEFAULT = 2 52 }; 53 54 struct mlx4_set_port_general_context { ··· 60 61 struct mlx4_set_port_rqp_calc_context { 62 __be32 base_qpn; 63 + u8 rererved; 64 + u8 n_mac; 65 + u8 n_vlan; 66 + u8 n_prio; 67 + u8 reserved2[3]; 68 u8 mac_miss; 69 u8 intra_no_vlan; 70 u8 no_vlan; 71 u8 intra_vlan_miss; 72 u8 vlan_miss; 73 + u8 reserved3[3]; 74 u8 no_vlan_prio; 75 __be32 promisc; 76 __be32 mcast;
+1 -10
drivers/net/mlx4/en_rx.c
··· 845 } 846 847 /* Configure RSS indirection qp */ 848 - err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn); 849 - if (err) { 850 - en_err(priv, "Failed to reserve range for RSS " 851 - "indirection qp\n"); 852 - goto rss_err; 853 - } 854 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 855 if (err) { 856 en_err(priv, "Failed to allocate RSS indirection QP\n"); 857 - goto reserve_err; 858 } 859 rss_map->indir_qp.event = mlx4_en_sqp_event; 860 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, ··· 875 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 876 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 877 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 878 - reserve_err: 879 - mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1); 880 rss_err: 881 for (i = 0; i < good_qps; i++) { 882 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], ··· 896 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 897 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 898 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 899 - mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1); 900 901 for (i = 0; i < priv->rx_ring_num; i++) { 902 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
··· 845 } 846 847 /* Configure RSS indirection qp */ 848 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 849 if (err) { 850 en_err(priv, "Failed to allocate RSS indirection QP\n"); 851 + goto rss_err; 852 } 853 rss_map->indir_qp.event = mlx4_en_sqp_event; 854 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, ··· 881 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 882 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 883 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 884 rss_err: 885 for (i = 0; i < good_qps; i++) { 886 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], ··· 904 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 905 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 906 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 907 908 for (i = 0; i < priv->rx_ring_num; i++) { 909 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
+52 -20
drivers/net/mlx4/en_tx.c
··· 44 45 enum { 46 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 47 }; 48 49 static int inline_thold __read_mostly = MAX_INLINE; ··· 53 MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 54 55 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 56 - struct mlx4_en_tx_ring *ring, u32 size, 57 u16 stride) 58 { 59 struct mlx4_en_dev *mdev = priv->mdev; ··· 104 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 105 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 106 107 - err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); 108 - if (err) { 109 - en_err(priv, "Failed reserving qp for tx ring.\n"); 110 - goto err_map; 111 - } 112 - 113 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 114 if (err) { 115 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 116 - goto err_reserve; 117 } 118 ring->qp.event = mlx4_en_sqp_event; 119 120 return 0; 121 122 - err_reserve: 123 - mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 124 err_map: 125 mlx4_en_unmap_buffer(&ring->wqres.buf); 126 err_hwq_res: ··· 142 struct mlx4_en_dev *mdev = priv->mdev; 143 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 144 145 mlx4_qp_remove(mdev->dev, &ring->qp); 146 mlx4_qp_free(mdev->dev, &ring->qp); 147 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); ··· 176 177 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 178 ring->cqn, &ring->context); 179 180 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 181 &ring->qp, &ring->qp_state); ··· 598 return skb_tx_hash(dev, skb); 599 } 600 601 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 602 { 603 struct mlx4_en_priv *priv = netdev_priv(dev); ··· 621 int desc_size; 622 int real_size; 623 dma_addr_t dma; 624 - u32 index; 625 __be32 op_own; 626 u16 vlan_tag = 0; 627 int i; 628 int lso_header_size; 629 void *fragptr; 630 631 if (!priv->port_up) 632 goto tx_drop; ··· 670 671 /* Packet is good - grab an index and transmit it */ 672 index = ring->prod & ring->size_mask; 673 674 /* See if we have enough space for whole descriptor TXBB for setting 675 * SW ownership on next descriptor; if not, use a bounce buffer. */ 676 if (likely(index + nr_txbb <= ring->size)) 677 tx_desc = ring->buf + index * TXBB_SIZE; 678 - else 679 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; 680 681 /* Save skb in tx_info ring */ 682 tx_info = &ring->tx_info[index]; ··· 784 ring->prod += nr_txbb; 785 786 /* If we used a bounce buffer then copy descriptor back into place */ 787 - if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf) 788 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 789 790 /* Run destructor before passing skb to HW */ 791 if (likely(!skb_shared(skb))) 792 skb_orphan(skb); 793 794 - /* Ensure new descirptor hits memory 795 - * before setting ownership of this descriptor to HW */ 796 - wmb(); 797 - tx_desc->ctrl.owner_opcode = op_own; 798 799 - /* Ring doorbell! */ 800 - wmb(); 801 - writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); 802 803 /* Poll CQ here */ 804 mlx4_en_xmit_poll(priv, tx_ind);
··· 44 45 enum { 46 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 47 + MAX_BF = 256, 48 }; 49 50 static int inline_thold __read_mostly = MAX_INLINE; ··· 52 MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 53 54 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 55 + struct mlx4_en_tx_ring *ring, int qpn, u32 size, 56 u16 stride) 57 { 58 struct mlx4_en_dev *mdev = priv->mdev; ··· 103 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 104 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 105 106 + ring->qpn = qpn; 107 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 108 if (err) { 109 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 110 + goto err_map; 111 } 112 ring->qp.event = mlx4_en_sqp_event; 113 114 + err = mlx4_bf_alloc(mdev->dev, &ring->bf); 115 + if (err) { 116 + en_dbg(DRV, priv, "working without blueflame (%d)", err); 117 + ring->bf.uar = &mdev->priv_uar; 118 + ring->bf.uar->map = mdev->uar_map; 119 + ring->bf_enabled = false; 120 + } else 121 + ring->bf_enabled = true; 122 + 123 return 0; 124 125 err_map: 126 mlx4_en_unmap_buffer(&ring->wqres.buf); 127 err_hwq_res: ··· 139 struct mlx4_en_dev *mdev = priv->mdev; 140 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 141 142 + if (ring->bf_enabled) 143 + mlx4_bf_free(mdev->dev, &ring->bf); 144 mlx4_qp_remove(mdev->dev, &ring->qp); 145 mlx4_qp_free(mdev->dev, &ring->qp); 146 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); ··· 171 172 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 173 ring->cqn, &ring->context); 174 + if (ring->bf_enabled) 175 + ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 176 177 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 178 &ring->qp, &ring->qp_state); ··· 591 return skb_tx_hash(dev, skb); 592 } 593 594 + static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt) 595 + { 596 + __iowrite64_copy(dst, src, bytecnt / 8); 597 + } 598 + 599 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 600 { 601 struct mlx4_en_priv *priv = netdev_priv(dev); ··· 609 int desc_size; 610 int real_size; 611 dma_addr_t dma; 612 + u32 index, bf_index; 613 __be32 op_own; 614 u16 vlan_tag = 0; 615 int i; 616 int lso_header_size; 617 void *fragptr; 618 + bool bounce = false; 619 620 if (!priv->port_up) 621 goto tx_drop; ··· 657 658 /* Packet is good - grab an index and transmit it */ 659 index = ring->prod & ring->size_mask; 660 + bf_index = ring->prod; 661 662 /* See if we have enough space for whole descriptor TXBB for setting 663 * SW ownership on next descriptor; if not, use a bounce buffer. */ 664 if (likely(index + nr_txbb <= ring->size)) 665 tx_desc = ring->buf + index * TXBB_SIZE; 666 + else { 667 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; 668 + bounce = true; 669 + } 670 671 /* Save skb in tx_info ring */ 672 tx_info = &ring->tx_info[index]; ··· 768 ring->prod += nr_txbb; 769 770 /* If we used a bounce buffer then copy descriptor back into place */ 771 + if (bounce) 772 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 773 774 /* Run destructor before passing skb to HW */ 775 if (likely(!skb_shared(skb))) 776 skb_orphan(skb); 777 778 + if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { 779 + *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; 780 + op_own |= htonl((bf_index & 0xffff) << 8); 781 + /* Ensure new descirptor hits memory 782 + * before setting ownership of this descriptor to HW */ 783 + wmb(); 784 + tx_desc->ctrl.owner_opcode = op_own; 785 786 + wmb(); 787 + 788 + mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl, 789 + desc_size); 790 + 791 + wmb(); 792 + 793 + ring->bf.offset ^= ring->bf.buf_size; 794 + } else { 795 + /* Ensure new descirptor hits memory 796 + * before setting ownership of this descriptor to HW */ 797 + wmb(); 798 + tx_desc->ctrl.owner_opcode = op_own; 799 + wmb(); 800 + writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); 801 + } 802 803 /* Poll CQ here */ 804 mlx4_en_xmit_poll(priv, tx_ind);
+101 -6
drivers/net/mlx4/eq.c
··· 42 #include "fw.h" 43 44 enum { 45 - MLX4_IRQNAME_SIZE = 64 46 }; 47 48 enum { ··· 317 * we need to map, take the difference of highest index and 318 * the lowest index we'll use and add 1. 319 */ 320 - return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - 321 - dev->caps.reserved_eqs / 4 + 1; 322 } 323 324 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) ··· 496 static void mlx4_free_irqs(struct mlx4_dev *dev) 497 { 498 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 499 - int i; 500 501 if (eq_table->have_irq) 502 free_irq(dev->pdev->irq, dev); 503 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 504 if (eq_table->eq[i].have_irq) { 505 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 506 eq_table->eq[i].have_irq = 0; 507 } 508 509 kfree(eq_table->irq_names); 510 } ··· 594 (priv->eq_table.inta_pin < 32 ? 4 : 0); 595 596 priv->eq_table.irq_names = 597 - kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), 598 GFP_KERNEL); 599 if (!priv->eq_table.irq_names) { 600 err = -ENOMEM; ··· 617 &priv->eq_table.eq[dev->caps.num_comp_vectors]); 618 if (err) 619 goto err_out_comp; 620 621 if (dev->flags & MLX4_FLAG_MSI_X) { 622 const char *eq_name; ··· 719 720 mlx4_free_irqs(dev); 721 722 - for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 723 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 724 725 mlx4_unmap_clr_int(dev); ··· 776 return err; 777 } 778 EXPORT_SYMBOL(mlx4_test_interrupts);
··· 42 #include "fw.h" 43 44 enum { 45 + MLX4_IRQNAME_SIZE = 32 46 }; 47 48 enum { ··· 317 * we need to map, take the difference of highest index and 318 * the lowest index we'll use and add 1. 319 */ 320 + return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + 321 + dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; 322 } 323 324 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) ··· 496 static void mlx4_free_irqs(struct mlx4_dev *dev) 497 { 498 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 499 + struct mlx4_priv *priv = mlx4_priv(dev); 500 + int i, vec; 501 502 if (eq_table->have_irq) 503 free_irq(dev->pdev->irq, dev); 504 + 505 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 506 if (eq_table->eq[i].have_irq) { 507 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 508 eq_table->eq[i].have_irq = 0; 509 } 510 + 511 + for (i = 0; i < dev->caps.comp_pool; i++) { 512 + /* 513 + * Freeing the assigned irq's 514 + * all bits should be 0, but we need to validate 515 + */ 516 + if (priv->msix_ctl.pool_bm & 1ULL << i) { 517 + /* NO need protecting*/ 518 + vec = dev->caps.num_comp_vectors + 1 + i; 519 + free_irq(priv->eq_table.eq[vec].irq, 520 + &priv->eq_table.eq[vec]); 521 + } 522 + } 523 + 524 525 kfree(eq_table->irq_names); 526 } ··· 578 (priv->eq_table.inta_pin < 32 ? 4 : 0); 579 580 priv->eq_table.irq_names = 581 + kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + 582 + dev->caps.comp_pool), 583 GFP_KERNEL); 584 if (!priv->eq_table.irq_names) { 585 err = -ENOMEM; ··· 600 &priv->eq_table.eq[dev->caps.num_comp_vectors]); 601 if (err) 602 goto err_out_comp; 603 + 604 + /*if additional completion vectors poolsize is 0 this loop will not run*/ 605 + for (i = dev->caps.num_comp_vectors + 1; 606 + i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { 607 + 608 + err = mlx4_create_eq(dev, dev->caps.num_cqs - 609 + dev->caps.reserved_cqs + 610 + MLX4_NUM_SPARE_EQE, 611 + (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 612 + &priv->eq_table.eq[i]); 613 + if (err) { 614 + --i; 615 + goto err_out_unmap; 616 + } 617 + } 618 + 619 620 if (dev->flags & MLX4_FLAG_MSI_X) { 621 const char *eq_name; ··· 686 687 mlx4_free_irqs(dev); 688 689 + for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) 690 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 691 692 mlx4_unmap_clr_int(dev); ··· 743 return err; 744 } 745 EXPORT_SYMBOL(mlx4_test_interrupts); 746 + 747 + int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector) 748 + { 749 + 750 + struct mlx4_priv *priv = mlx4_priv(dev); 751 + int vec = 0, err = 0, i; 752 + 753 + spin_lock(&priv->msix_ctl.pool_lock); 754 + for (i = 0; !vec && i < dev->caps.comp_pool; i++) { 755 + if (~priv->msix_ctl.pool_bm & 1ULL << i) { 756 + priv->msix_ctl.pool_bm |= 1ULL << i; 757 + vec = dev->caps.num_comp_vectors + 1 + i; 758 + snprintf(priv->eq_table.irq_names + 759 + vec * MLX4_IRQNAME_SIZE, 760 + MLX4_IRQNAME_SIZE, "%s", name); 761 + err = request_irq(priv->eq_table.eq[vec].irq, 762 + mlx4_msi_x_interrupt, 0, 763 + &priv->eq_table.irq_names[vec<<5], 764 + priv->eq_table.eq + vec); 765 + if (err) { 766 + /*zero out bit by fliping it*/ 767 + priv->msix_ctl.pool_bm ^= 1 << i; 768 + vec = 0; 769 + continue; 770 + /*we dont want to break here*/ 771 + } 772 + eq_set_ci(&priv->eq_table.eq[vec], 1); 773 + } 774 + } 775 + spin_unlock(&priv->msix_ctl.pool_lock); 776 + 777 + if (vec) { 778 + *vector = vec; 779 + } else { 780 + *vector = 0; 781 + err = (i == dev->caps.comp_pool) ? -ENOSPC : err; 782 + } 783 + return err; 784 + } 785 + EXPORT_SYMBOL(mlx4_assign_eq); 786 + 787 + void mlx4_release_eq(struct mlx4_dev *dev, int vec) 788 + { 789 + struct mlx4_priv *priv = mlx4_priv(dev); 790 + /*bm index*/ 791 + int i = vec - dev->caps.num_comp_vectors - 1; 792 + 793 + if (likely(i >= 0)) { 794 + /*sanity check , making sure were not trying to free irq's 795 + Belonging to a legacy EQ*/ 796 + spin_lock(&priv->msix_ctl.pool_lock); 797 + if (priv->msix_ctl.pool_bm & 1ULL << i) { 798 + free_irq(priv->eq_table.eq[vec].irq, 799 + &priv->eq_table.eq[vec]); 800 + priv->msix_ctl.pool_bm &= ~(1ULL << i); 801 + } 802 + spin_unlock(&priv->msix_ctl.pool_lock); 803 + } 804 + 805 + } 806 + EXPORT_SYMBOL(mlx4_release_eq); 807 +
+25
drivers/net/mlx4/fw.c
··· 274 dev_cap->stat_rate_support = stat_rate; 275 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET); 276 dev_cap->udp_rss = field & 0x1; 277 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET); 278 dev_cap->loopback_support = field & 0x1; 279 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 280 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 281 dev_cap->reserved_uars = field >> 4; ··· 740 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 741 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 742 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 743 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 744 #define INIT_HCA_TPT_OFFSET 0x0f0 745 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) ··· 801 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 802 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 803 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 804 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 805 806 /* TPT attributes */ ··· 914 /* Input modifier of 0x1f means "finish as soon as possible." */ 915 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); 916 }
··· 274 dev_cap->stat_rate_support = stat_rate; 275 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET); 276 dev_cap->udp_rss = field & 0x1; 277 + dev_cap->vep_uc_steering = field & 0x2; 278 + dev_cap->vep_mc_steering = field & 0x4; 279 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET); 280 dev_cap->loopback_support = field & 0x1; 281 + dev_cap->wol = field & 0x40; 282 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 283 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 284 dev_cap->reserved_uars = field >> 4; ··· 737 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 738 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 739 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 740 + #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) 741 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 742 #define INIT_HCA_TPT_OFFSET 0x0f0 743 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) ··· 797 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 798 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 799 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 800 + if (dev->caps.vep_mc_steering) 801 + MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET); 802 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 803 804 /* TPT attributes */ ··· 908 /* Input modifier of 0x1f means "finish as soon as possible." */ 909 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); 910 } 911 + 912 + #define MLX4_WOL_SETUP_MODE (5 << 28) 913 + int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) 914 + { 915 + u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 916 + 917 + return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, 918 + MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A); 919 + } 920 + EXPORT_SYMBOL_GPL(mlx4_wol_read); 921 + 922 + int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) 923 + { 924 + u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 925 + 926 + return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, 927 + MLX4_CMD_TIME_CLASS_A); 928 + } 929 + EXPORT_SYMBOL_GPL(mlx4_wol_write);
+3
drivers/net/mlx4/fw.h
··· 80 u16 stat_rate_support; 81 int udp_rss; 82 int loopback_support; 83 u32 flags; 84 int reserved_uars; 85 int uar_size;
··· 80 u16 stat_rate_support; 81 int udp_rss; 82 int loopback_support; 83 + int vep_uc_steering; 84 + int vep_mc_steering; 85 + int wol; 86 u32 flags; 87 int reserved_uars; 88 int uar_size;
+115 -4
drivers/net/mlx4/main.c
··· 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 43 #include <linux/mlx4/device.h> 44 #include <linux/mlx4/doorbell.h> ··· 228 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 229 dev->caps.udp_rss = dev_cap->udp_rss; 230 dev->caps.loopback_support = dev_cap->loopback_support; 231 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 232 233 dev->caps.log_num_macs = log_num_mac; ··· 722 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 723 } 724 725 static void mlx4_close_hca(struct mlx4_dev *dev) 726 { 727 mlx4_CLOSE_HCA(dev, 0); 728 mlx4_free_icms(dev); 729 mlx4_UNMAP_FA(dev); ··· 799 goto err_stop_fw; 800 } 801 802 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 803 804 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); ··· 832 mlx4_free_icms(dev); 833 834 err_stop_fw: 835 mlx4_UNMAP_FA(dev); 836 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 837 ··· 1000 { 1001 struct mlx4_priv *priv = mlx4_priv(dev); 1002 struct msix_entry *entries; 1003 - int nreq; 1004 int err; 1005 int i; 1006 1007 if (msi_x) { 1008 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 1009 - num_possible_cpus() + 1); 1010 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1011 if (!entries) 1012 goto no_msi; ··· 1031 goto no_msi; 1032 } 1033 1034 - dev->caps.num_comp_vectors = nreq - 1; 1035 for (i = 0; i < nreq; ++i) 1036 priv->eq_table.eq[i].irq = entries[i].vector; 1037 ··· 1051 1052 no_msi: 1053 dev->caps.num_comp_vectors = 1; 1054 1055 for (i = 0; i < 2; ++i) 1056 priv->eq_table.eq[i].irq = dev->pdev->irq; ··· 1089 return; 1090 1091 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1092 } 1093 1094 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ··· 1225 INIT_LIST_HEAD(&priv->pgdir_list); 1226 mutex_init(&priv->pgdir_mutex); 1227 1228 /* 1229 * Now reset the HCA before we touch the PCI capabilities or 1230 * attempt a firmware command, since a boot ROM may have left ··· 1254 if (err) 1255 goto err_close; 1256 1257 mlx4_enable_msi_x(dev); 1258 1259 err = mlx4_setup_hca(dev); 1260 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { ··· 1271 } 1272 1273 if (err) 1274 - goto err_free_eq; 1275 1276 for (port = 1; port <= dev->caps.num_ports; port++) { 1277 err = mlx4_init_port_info(dev, port); ··· 1303 mlx4_cleanup_mr_table(dev); 1304 mlx4_cleanup_pd_table(dev); 1305 mlx4_cleanup_uar_table(dev); 1306 1307 err_free_eq: 1308 mlx4_free_eq_table(dev); ··· 1366 iounmap(priv->kar); 1367 mlx4_uar_free(dev, &priv->driver_uar); 1368 mlx4_cleanup_uar_table(dev); 1369 mlx4_free_eq_table(dev); 1370 mlx4_close_hca(dev); 1371 mlx4_cmd_cleanup(dev);
··· 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 + #include <linux/io-mapping.h> 43 44 #include <linux/mlx4/device.h> 45 #include <linux/mlx4/doorbell.h> ··· 227 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 228 dev->caps.udp_rss = dev_cap->udp_rss; 229 dev->caps.loopback_support = dev_cap->loopback_support; 230 + dev->caps.vep_uc_steering = dev_cap->vep_uc_steering; 231 + dev->caps.vep_mc_steering = dev_cap->vep_mc_steering; 232 + dev->caps.wol = dev_cap->wol; 233 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 234 235 dev->caps.log_num_macs = log_num_mac; ··· 718 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 719 } 720 721 + static int map_bf_area(struct mlx4_dev *dev) 722 + { 723 + struct mlx4_priv *priv = mlx4_priv(dev); 724 + resource_size_t bf_start; 725 + resource_size_t bf_len; 726 + int err = 0; 727 + 728 + bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT); 729 + bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT); 730 + priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 731 + if (!priv->bf_mapping) 732 + err = -ENOMEM; 733 + 734 + return err; 735 + } 736 + 737 + static void unmap_bf_area(struct mlx4_dev *dev) 738 + { 739 + if (mlx4_priv(dev)->bf_mapping) 740 + io_mapping_free(mlx4_priv(dev)->bf_mapping); 741 + } 742 + 743 static void mlx4_close_hca(struct mlx4_dev *dev) 744 { 745 + unmap_bf_area(dev); 746 mlx4_CLOSE_HCA(dev, 0); 747 mlx4_free_icms(dev); 748 mlx4_UNMAP_FA(dev); ··· 772 goto err_stop_fw; 773 } 774 775 + if (map_bf_area(dev)) 776 + mlx4_dbg(dev, "Failed to map blue flame area\n"); 777 + 778 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 779 780 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); ··· 802 mlx4_free_icms(dev); 803 804 err_stop_fw: 805 + unmap_bf_area(dev); 806 mlx4_UNMAP_FA(dev); 807 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 808 ··· 969 { 970 struct mlx4_priv *priv = mlx4_priv(dev); 971 struct msix_entry *entries; 972 + int nreq = min_t(int, dev->caps.num_ports * 973 + min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) 974 + + MSIX_LEGACY_SZ, MAX_MSIX); 975 int err; 976 int i; 977 978 if (msi_x) { 979 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 980 + nreq); 981 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 982 if (!entries) 983 goto no_msi; ··· 998 goto no_msi; 999 } 1000 1001 + if (nreq < 1002 + MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) { 1003 + /*Working in legacy mode , all EQ's shared*/ 1004 + dev->caps.comp_pool = 0; 1005 + dev->caps.num_comp_vectors = nreq - 1; 1006 + } else { 1007 + dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 1008 + dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 1009 + } 1010 for (i = 0; i < nreq; ++i) 1011 priv->eq_table.eq[i].irq = entries[i].vector; 1012 ··· 1010 1011 no_msi: 1012 dev->caps.num_comp_vectors = 1; 1013 + dev->caps.comp_pool = 0; 1014 1015 for (i = 0; i < 2; ++i) 1016 priv->eq_table.eq[i].irq = dev->pdev->irq; ··· 1047 return; 1048 1049 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1050 + } 1051 + 1052 + static int mlx4_init_steering(struct mlx4_dev *dev) 1053 + { 1054 + struct mlx4_priv *priv = mlx4_priv(dev); 1055 + int num_entries = dev->caps.num_ports; 1056 + int i, j; 1057 + 1058 + priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 1059 + if (!priv->steer) 1060 + return -ENOMEM; 1061 + 1062 + for (i = 0; i < num_entries; i++) { 1063 + for (j = 0; j < MLX4_NUM_STEERS; j++) { 1064 + INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 1065 + INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 1066 + } 1067 + INIT_LIST_HEAD(&priv->steer[i].high_prios); 1068 + } 1069 + return 0; 1070 + } 1071 + 1072 + static void mlx4_clear_steering(struct mlx4_dev *dev) 1073 + { 1074 + struct mlx4_priv *priv = mlx4_priv(dev); 1075 + struct mlx4_steer_index *entry, *tmp_entry; 1076 + struct mlx4_promisc_qp *pqp, *tmp_pqp; 1077 + int num_entries = dev->caps.num_ports; 1078 + int i, j; 1079 + 1080 + for (i = 0; i < num_entries; i++) { 1081 + for (j = 0; j < MLX4_NUM_STEERS; j++) { 1082 + list_for_each_entry_safe(pqp, tmp_pqp, 1083 + &priv->steer[i].promisc_qps[j], 1084 + list) { 1085 + list_del(&pqp->list); 1086 + kfree(pqp); 1087 + } 1088 + list_for_each_entry_safe(entry, tmp_entry, 1089 + &priv->steer[i].steer_entries[j], 1090 + list) { 1091 + list_del(&entry->list); 1092 + list_for_each_entry_safe(pqp, tmp_pqp, 1093 + &entry->duplicates, 1094 + list) { 1095 + list_del(&pqp->list); 1096 + kfree(pqp); 1097 + } 1098 + kfree(entry); 1099 + } 1100 + } 1101 + } 1102 + kfree(priv->steer); 1103 } 1104 1105 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ··· 1130 INIT_LIST_HEAD(&priv->pgdir_list); 1131 mutex_init(&priv->pgdir_mutex); 1132 1133 + pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id); 1134 + 1135 + INIT_LIST_HEAD(&priv->bf_list); 1136 + mutex_init(&priv->bf_mutex); 1137 + 1138 /* 1139 * Now reset the HCA before we touch the PCI capabilities or 1140 * attempt a firmware command, since a boot ROM may have left ··· 1154 if (err) 1155 goto err_close; 1156 1157 + priv->msix_ctl.pool_bm = 0; 1158 + spin_lock_init(&priv->msix_ctl.pool_lock); 1159 + 1160 mlx4_enable_msi_x(dev); 1161 + 1162 + err = mlx4_init_steering(dev); 1163 + if (err) 1164 + goto err_free_eq; 1165 1166 err = mlx4_setup_hca(dev); 1167 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { ··· 1164 } 1165 1166 if (err) 1167 + goto err_steer; 1168 1169 for (port = 1; port <= dev->caps.num_ports; port++) { 1170 err = mlx4_init_port_info(dev, port); ··· 1196 mlx4_cleanup_mr_table(dev); 1197 mlx4_cleanup_pd_table(dev); 1198 mlx4_cleanup_uar_table(dev); 1199 + 1200 + err_steer: 1201 + mlx4_clear_steering(dev); 1202 1203 err_free_eq: 1204 mlx4_free_eq_table(dev); ··· 1256 iounmap(priv->kar); 1257 mlx4_uar_free(dev, &priv->driver_uar); 1258 mlx4_cleanup_uar_table(dev); 1259 + mlx4_clear_steering(dev); 1260 mlx4_free_eq_table(dev); 1261 mlx4_close_hca(dev); 1262 mlx4_cmd_cleanup(dev);
+602 -44
drivers/net/mlx4/mcg.c
··· 32 */ 33 34 #include <linux/string.h> 35 36 #include <linux/mlx4/cmd.h> 37 ··· 41 #define MGM_QPN_MASK 0x00FFFFFF 42 #define MGM_BLCK_LB_BIT 30 43 44 - struct mlx4_mgm { 45 - __be32 next_gid_index; 46 - __be32 members_count; 47 - u32 reserved[2]; 48 - u8 gid[16]; 49 - __be32 qp[MLX4_QP_PER_MGM]; 50 - }; 51 - 52 static const u8 zero_gid[16]; /* automatically initialized to 0 */ 53 54 - static int mlx4_READ_MCG(struct mlx4_dev *dev, int index, 55 - struct mlx4_cmd_mailbox *mailbox) 56 { 57 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 58 MLX4_CMD_TIME_CLASS_A); 59 } 60 61 - static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index, 62 - struct mlx4_cmd_mailbox *mailbox) 63 { 64 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 65 MLX4_CMD_TIME_CLASS_A); 66 } 67 68 - static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 69 - u16 *hash) 70 { 71 u64 imm; 72 int err; 73 74 - err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH, 75 - MLX4_CMD_TIME_CLASS_A); 76 77 if (!err) 78 *hash = imm; 79 80 return err; 81 } 82 ··· 548 * If no AMGM exists for given gid, *index = -1, *prev = index of last 549 * entry in hash chain and *mgm holds end of hash chain. 550 */ 551 - static int find_mgm(struct mlx4_dev *dev, 552 - u8 *gid, enum mlx4_protocol protocol, 553 - struct mlx4_cmd_mailbox *mgm_mailbox, 554 - u16 *hash, int *prev, int *index) 555 { 556 struct mlx4_cmd_mailbox *mailbox; 557 struct mlx4_mgm *mgm = mgm_mailbox->buf; 558 u8 *mgid; 559 int err; 560 561 mailbox = mlx4_alloc_cmd_mailbox(dev); 562 if (IS_ERR(mailbox)) ··· 567 568 memcpy(mgid, gid, 16); 569 570 - err = mlx4_MGID_HASH(dev, mailbox, hash); 571 mlx4_free_cmd_mailbox(dev, mailbox); 572 if (err) 573 return err; ··· 579 *prev = -1; 580 581 do { 582 - err = mlx4_READ_MCG(dev, *index, mgm_mailbox); 583 if (err) 584 return err; 585 586 - if (!memcmp(mgm->gid, zero_gid, 16)) { 587 if (*index != *hash) { 588 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 589 err = -EINVAL; ··· 592 } 593 594 if (!memcmp(mgm->gid, gid, 16) && 595 - be32_to_cpu(mgm->members_count) >> 30 == protocol) 596 return err; 597 598 *prev = *index; ··· 603 return err; 604 } 605 606 - int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 607 - int block_mcast_loopback, enum mlx4_protocol protocol) 608 { 609 struct mlx4_priv *priv = mlx4_priv(dev); 610 struct mlx4_cmd_mailbox *mailbox; ··· 616 int link = 0; 617 int i; 618 int err; 619 620 mailbox = mlx4_alloc_cmd_mailbox(dev); 621 if (IS_ERR(mailbox)) ··· 625 mgm = mailbox->buf; 626 627 mutex_lock(&priv->mcg_table.mutex); 628 - 629 - err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 630 if (err) 631 goto out; 632 633 if (index != -1) { 634 - if (!memcmp(mgm->gid, zero_gid, 16)) 635 memcpy(mgm->gid, gid, 16); 636 } else { 637 link = 1; 638 ··· 670 else 671 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 672 673 - mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30); 674 675 - err = mlx4_WRITE_MCG(dev, index, mailbox); 676 if (err) 677 goto out; 678 679 if (!link) 680 goto out; 681 682 - err = mlx4_READ_MCG(dev, prev, mailbox); 683 if (err) 684 goto out; 685 686 mgm->next_gid_index = cpu_to_be32(index << 6); 687 688 - err = mlx4_WRITE_MCG(dev, prev, mailbox); 689 if (err) 690 goto out; 691 692 out: 693 if (err && link && index != -1) { 694 if (index < dev->caps.num_mgms) 695 mlx4_warn(dev, "Got AMGM index %d < %d", ··· 711 mlx4_free_cmd_mailbox(dev, mailbox); 712 return err; 713 } 714 - EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 715 716 - int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 717 - enum mlx4_protocol protocol) 718 { 719 struct mlx4_priv *priv = mlx4_priv(dev); 720 struct mlx4_cmd_mailbox *mailbox; ··· 723 int prev, index; 724 int i, loc; 725 int err; 726 727 mailbox = mlx4_alloc_cmd_mailbox(dev); 728 if (IS_ERR(mailbox)) ··· 733 734 mutex_lock(&priv->mcg_table.mutex); 735 736 - err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 737 if (err) 738 goto out; 739 ··· 743 err = -EINVAL; 744 goto out; 745 } 746 747 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 748 for (loc = -1, i = 0; i < members_count; ++i) ··· 761 } 762 763 764 - mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30); 765 mgm->qp[loc] = mgm->qp[i - 1]; 766 mgm->qp[i - 1] = 0; 767 768 - if (i != 1) { 769 - err = mlx4_WRITE_MCG(dev, index, mailbox); 770 goto out; 771 } 772 773 if (prev == -1) { 774 /* Remove entry from MGM */ 775 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 776 if (amgm_index) { 777 - err = mlx4_READ_MCG(dev, amgm_index, mailbox); 778 if (err) 779 goto out; 780 } else 781 memset(mgm->gid, 0, 16); 782 783 - err = mlx4_WRITE_MCG(dev, index, mailbox); 784 if (err) 785 goto out; 786 ··· 800 } else { 801 /* Remove entry from AMGM */ 802 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 803 - err = mlx4_READ_MCG(dev, prev, mailbox); 804 if (err) 805 goto out; 806 807 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 808 809 - err = mlx4_WRITE_MCG(dev, prev, mailbox); 810 if (err) 811 goto out; 812 ··· 824 mlx4_free_cmd_mailbox(dev, mailbox); 825 return err; 826 } 827 EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 828 829 int mlx4_init_mcg_table(struct mlx4_dev *dev) 830 {
··· 32 */ 33 34 #include <linux/string.h> 35 + #include <linux/etherdevice.h> 36 37 #include <linux/mlx4/cmd.h> 38 ··· 40 #define MGM_QPN_MASK 0x00FFFFFF 41 #define MGM_BLCK_LB_BIT 30 42 43 static const u8 zero_gid[16]; /* automatically initialized to 0 */ 44 45 + static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 46 + struct mlx4_cmd_mailbox *mailbox) 47 { 48 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 49 MLX4_CMD_TIME_CLASS_A); 50 } 51 52 + static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, 53 + struct mlx4_cmd_mailbox *mailbox) 54 { 55 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 56 MLX4_CMD_TIME_CLASS_A); 57 } 58 59 + static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer, 60 + struct mlx4_cmd_mailbox *mailbox) 61 + { 62 + u32 in_mod; 63 + 64 + in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1; 65 + return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, 66 + MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A); 67 + } 68 + 69 + static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 70 + u16 *hash, u8 op_mod) 71 { 72 u64 imm; 73 int err; 74 75 + err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, 76 + MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A); 77 78 if (!err) 79 *hash = imm; 80 81 + return err; 82 + } 83 + 84 + static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num, 85 + enum mlx4_steer_type steer, 86 + u32 qpn) 87 + { 88 + struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num]; 89 + struct mlx4_promisc_qp *pqp; 90 + 91 + list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 92 + if (pqp->qpn == qpn) 93 + return pqp; 94 + } 95 + /* not found */ 96 + return NULL; 97 + } 98 + 99 + /* 100 + * Add new entry to steering data structure. 101 + * All promisc QPs should be added as well 102 + */ 103 + static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, 104 + enum mlx4_steer_type steer, 105 + unsigned int index, u32 qpn) 106 + { 107 + struct mlx4_steer *s_steer; 108 + struct mlx4_cmd_mailbox *mailbox; 109 + struct mlx4_mgm *mgm; 110 + u32 members_count; 111 + struct mlx4_steer_index *new_entry; 112 + struct mlx4_promisc_qp *pqp; 113 + struct mlx4_promisc_qp *dqp; 114 + u32 prot; 115 + int err; 116 + u8 pf_num; 117 + 118 + pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); 119 + s_steer = &mlx4_priv(dev)->steer[pf_num]; 120 + new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); 121 + if (!new_entry) 122 + return -ENOMEM; 123 + 124 + INIT_LIST_HEAD(&new_entry->duplicates); 125 + new_entry->index = index; 126 + list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); 127 + 128 + /* If the given qpn is also a promisc qp, 129 + * it should be inserted to duplicates list 130 + */ 131 + pqp = get_promisc_qp(dev, pf_num, steer, qpn); 132 + if (pqp) { 133 + dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 134 + if (!dqp) { 135 + err = -ENOMEM; 136 + goto out_alloc; 137 + } 138 + dqp->qpn = qpn; 139 + list_add_tail(&dqp->list, &new_entry->duplicates); 140 + } 141 + 142 + /* if no promisc qps for this vep, we are done */ 143 + if (list_empty(&s_steer->promisc_qps[steer])) 144 + return 0; 145 + 146 + /* now need to add all the promisc qps to the new 147 + * steering entry, as they should also receive the packets 148 + * destined to this address */ 149 + mailbox = mlx4_alloc_cmd_mailbox(dev); 150 + if (IS_ERR(mailbox)) { 151 + err = -ENOMEM; 152 + goto out_alloc; 153 + } 154 + mgm = mailbox->buf; 155 + 156 + err = mlx4_READ_ENTRY(dev, index, mailbox); 157 + if (err) 158 + goto out_mailbox; 159 + 160 + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 161 + prot = be32_to_cpu(mgm->members_count) >> 30; 162 + list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 163 + /* don't add already existing qpn */ 164 + if (pqp->qpn == qpn) 165 + continue; 166 + if (members_count == MLX4_QP_PER_MGM) { 167 + /* out of space */ 168 + err = -ENOMEM; 169 + goto out_mailbox; 170 + } 171 + 172 + /* add the qpn */ 173 + mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); 174 + } 175 + /* update the qps count and update the entry with all the promisc qps*/ 176 + mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 177 + err = mlx4_WRITE_ENTRY(dev, index, mailbox); 178 + 179 + out_mailbox: 180 + mlx4_free_cmd_mailbox(dev, mailbox); 181 + if (!err) 182 + return 0; 183 + out_alloc: 184 + if (dqp) { 185 + list_del(&dqp->list); 186 + kfree(&dqp); 187 + } 188 + list_del(&new_entry->list); 189 + kfree(new_entry); 190 + return err; 191 + } 192 + 193 + /* update the data structures with existing steering entry */ 194 + static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, 195 + enum mlx4_steer_type steer, 196 + unsigned int index, u32 qpn) 197 + { 198 + struct mlx4_steer *s_steer; 199 + struct mlx4_steer_index *tmp_entry, *entry = NULL; 200 + struct mlx4_promisc_qp *pqp; 201 + struct mlx4_promisc_qp *dqp; 202 + u8 pf_num; 203 + 204 + pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); 205 + s_steer = &mlx4_priv(dev)->steer[pf_num]; 206 + 207 + pqp = get_promisc_qp(dev, pf_num, steer, qpn); 208 + if (!pqp) 209 + return 0; /* nothing to do */ 210 + 211 + list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 212 + if (tmp_entry->index == index) { 213 + entry = tmp_entry; 214 + break; 215 + } 216 + } 217 + if (unlikely(!entry)) { 218 + mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); 219 + return -EINVAL; 220 + } 221 + 222 + /* the given qpn is listed as a promisc qpn 223 + * we need to add it as a duplicate to this entry 224 + * for future refernce */ 225 + list_for_each_entry(dqp, &entry->duplicates, list) { 226 + if (qpn == dqp->qpn) 227 + return 0; /* qp is already duplicated */ 228 + } 229 + 230 + /* add the qp as a duplicate on this index */ 231 + dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 232 + if (!dqp) 233 + return -ENOMEM; 234 + dqp->qpn = qpn; 235 + list_add_tail(&dqp->list, &entry->duplicates); 236 + 237 + return 0; 238 + } 239 + 240 + /* Check whether a qpn is a duplicate on steering entry 241 + * If so, it should not be removed from mgm */ 242 + static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, 243 + enum mlx4_steer_type steer, 244 + unsigned int index, u32 qpn) 245 + { 246 + struct mlx4_steer *s_steer; 247 + struct mlx4_steer_index *tmp_entry, *entry = NULL; 248 + struct mlx4_promisc_qp *dqp, *tmp_dqp; 249 + u8 pf_num; 250 + 251 + pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); 252 + s_steer = &mlx4_priv(dev)->steer[pf_num]; 253 + 254 + /* if qp is not promisc, it cannot be duplicated */ 255 + if (!get_promisc_qp(dev, pf_num, steer, qpn)) 256 + return false; 257 + 258 + /* The qp is promisc qp so it is a duplicate on this index 259 + * Find the index entry, and remove the duplicate */ 260 + list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 261 + if (tmp_entry->index == index) { 262 + entry = tmp_entry; 263 + break; 264 + } 265 + } 266 + if (unlikely(!entry)) { 267 + mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); 268 + return false; 269 + } 270 + list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { 271 + if (dqp->qpn == qpn) { 272 + list_del(&dqp->list); 273 + kfree(dqp); 274 + } 275 + } 276 + return true; 277 + } 278 + 279 + /* I a steering entry contains only promisc QPs, it can be removed. */ 280 + static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port, 281 + enum mlx4_steer_type steer, 282 + unsigned int index, u32 tqpn) 283 + { 284 + struct mlx4_steer *s_steer; 285 + struct mlx4_cmd_mailbox *mailbox; 286 + struct mlx4_mgm *mgm; 287 + struct mlx4_steer_index *entry = NULL, *tmp_entry; 288 + u32 qpn; 289 + u32 members_count; 290 + bool ret = false; 291 + int i; 292 + u8 pf_num; 293 + 294 + pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); 295 + s_steer = &mlx4_priv(dev)->steer[pf_num]; 296 + 297 + mailbox = mlx4_alloc_cmd_mailbox(dev); 298 + if (IS_ERR(mailbox)) 299 + return false; 300 + mgm = mailbox->buf; 301 + 302 + if (mlx4_READ_ENTRY(dev, index, mailbox)) 303 + goto out; 304 + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 305 + for (i = 0; i < members_count; i++) { 306 + qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; 307 + if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) { 308 + /* the qp is not promisc, the entry can't be removed */ 309 + goto out; 310 + } 311 + } 312 + /* All the qps currently registered for this entry are promiscuous, 313 + * Checking for duplicates */ 314 + ret = true; 315 + list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 316 + if (entry->index == index) { 317 + if (list_empty(&entry->duplicates)) { 318 + list_del(&entry->list); 319 + kfree(entry); 320 + } else { 321 + /* This entry contains duplicates so it shouldn't be removed */ 322 + ret = false; 323 + goto out; 324 + } 325 + } 326 + } 327 + 328 + out: 329 + mlx4_free_cmd_mailbox(dev, mailbox); 330 + return ret; 331 + } 332 + 333 + static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, 334 + enum mlx4_steer_type steer, u32 qpn) 335 + { 336 + struct mlx4_steer *s_steer; 337 + struct mlx4_cmd_mailbox *mailbox; 338 + struct mlx4_mgm *mgm; 339 + struct mlx4_steer_index *entry; 340 + struct mlx4_promisc_qp *pqp; 341 + struct mlx4_promisc_qp *dqp; 342 + u32 members_count; 343 + u32 prot; 344 + int i; 345 + bool found; 346 + int last_index; 347 + int err; 348 + u8 pf_num; 349 + struct mlx4_priv *priv = mlx4_priv(dev); 350 + pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); 351 + s_steer = &mlx4_priv(dev)->steer[pf_num]; 352 + 353 + mutex_lock(&priv->mcg_table.mutex); 354 + 355 + if (get_promisc_qp(dev, pf_num, steer, qpn)) { 356 + err = 0; /* Noting to do, already exists */ 357 + goto out_mutex; 358 + } 359 + 360 + pqp = kmalloc(sizeof *pqp, GFP_KERNEL); 361 + if (!pqp) { 362 + err = -ENOMEM; 363 + goto out_mutex; 364 + } 365 + pqp->qpn = qpn; 366 + 367 + mailbox = mlx4_alloc_cmd_mailbox(dev); 368 + if (IS_ERR(mailbox)) { 369 + err = -ENOMEM; 370 + goto out_alloc; 371 + } 372 + mgm = mailbox->buf; 373 + 374 + /* the promisc qp needs to be added for each one of the steering 375 + * entries, if it already exists, needs to be added as a duplicate 376 + * for this entry */ 377 + list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 378 + err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 379 + if (err) 380 + goto out_mailbox; 381 + 382 + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 383 + prot = be32_to_cpu(mgm->members_count) >> 30; 384 + found = false; 385 + for (i = 0; i < members_count; i++) { 386 + if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 387 + /* Entry already exists, add to duplicates */ 388 + dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 389 + if (!dqp) 390 + goto out_mailbox; 391 + dqp->qpn = qpn; 392 + list_add_tail(&dqp->list, &entry->duplicates); 393 + found = true; 394 + } 395 + } 396 + if (!found) { 397 + /* Need to add the qpn to mgm */ 398 + if (members_count == MLX4_QP_PER_MGM) { 399 + /* entry is full */ 400 + err = -ENOMEM; 401 + goto out_mailbox; 402 + } 403 + mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); 404 + mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 405 + err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 406 + if (err) 407 + goto out_mailbox; 408 + } 409 + last_index = entry->index; 410 + } 411 + 412 + /* add the new qpn to list of promisc qps */ 413 + list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 414 + /* now need to add all the promisc qps to default entry */ 415 + memset(mgm, 0, sizeof *mgm); 416 + members_count = 0; 417 + list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 418 + mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 419 + mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 420 + 421 + err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox); 422 + if (err) 423 + goto out_list; 424 + 425 + mlx4_free_cmd_mailbox(dev, mailbox); 426 + mutex_unlock(&priv->mcg_table.mutex); 427 + return 0; 428 + 429 + out_list: 430 + list_del(&pqp->list); 431 + out_mailbox: 432 + mlx4_free_cmd_mailbox(dev, mailbox); 433 + out_alloc: 434 + kfree(pqp); 435 + out_mutex: 436 + mutex_unlock(&priv->mcg_table.mutex); 437 + return err; 438 + } 439 + 440 + static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port, 441 + enum mlx4_steer_type steer, u32 qpn) 442 + { 443 + struct mlx4_priv *priv = mlx4_priv(dev); 444 + struct mlx4_steer *s_steer; 445 + struct mlx4_cmd_mailbox *mailbox; 446 + struct mlx4_mgm *mgm; 447 + struct mlx4_steer_index *entry; 448 + struct mlx4_promisc_qp *pqp; 449 + struct mlx4_promisc_qp *dqp; 450 + u32 members_count; 451 + bool found; 452 + bool back_to_list = false; 453 + int loc, i; 454 + int err; 455 + u8 pf_num; 456 + 457 + pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1); 458 + s_steer = &mlx4_priv(dev)->steer[pf_num]; 459 + mutex_lock(&priv->mcg_table.mutex); 460 + 461 + pqp = get_promisc_qp(dev, pf_num, steer, qpn); 462 + if (unlikely(!pqp)) { 463 + mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); 464 + /* nothing to do */ 465 + err = 0; 466 + goto out_mutex; 467 + } 468 + 469 + /*remove from list of promisc qps */ 470 + list_del(&pqp->list); 471 + kfree(pqp); 472 + 473 + /* set the default entry not to include the removed one */ 474 + mailbox = mlx4_alloc_cmd_mailbox(dev); 475 + if (IS_ERR(mailbox)) { 476 + err = -ENOMEM; 477 + back_to_list = true; 478 + goto out_list; 479 + } 480 + mgm = mailbox->buf; 481 + members_count = 0; 482 + list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 483 + mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 484 + mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 485 + 486 + err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox); 487 + if (err) 488 + goto out_mailbox; 489 + 490 + /* remove the qp from all the steering entries*/ 491 + list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 492 + found = false; 493 + list_for_each_entry(dqp, &entry->duplicates, list) { 494 + if (dqp->qpn == qpn) { 495 + found = true; 496 + break; 497 + } 498 + } 499 + if (found) { 500 + /* a duplicate, no need to change the mgm, 501 + * only update the duplicates list */ 502 + list_del(&dqp->list); 503 + kfree(dqp); 504 + } else { 505 + err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 506 + if (err) 507 + goto out_mailbox; 508 + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 509 + for (loc = -1, i = 0; i < members_count; ++i) 510 + if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) 511 + loc = i; 512 + 513 + mgm->members_count = cpu_to_be32(--members_count | 514 + (MLX4_PROT_ETH << 30)); 515 + mgm->qp[loc] = mgm->qp[i - 1]; 516 + mgm->qp[i - 1] = 0; 517 + 518 + err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 519 + if (err) 520 + goto out_mailbox; 521 + } 522 + 523 + } 524 + 525 + out_mailbox: 526 + mlx4_free_cmd_mailbox(dev, mailbox); 527 + out_list: 528 + if (back_to_list) 529 + list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 530 + out_mutex: 531 + mutex_unlock(&priv->mcg_table.mutex); 532 return err; 533 } 534 ··· 94 * If no AMGM exists for given gid, *index = -1, *prev = index of last 95 * entry in hash chain and *mgm holds end of hash chain. 96 */ 97 + static int find_entry(struct mlx4_dev *dev, u8 port, 98 + u8 *gid, enum mlx4_protocol prot, 99 + enum mlx4_steer_type steer, 100 + struct mlx4_cmd_mailbox *mgm_mailbox, 101 + u16 *hash, int *prev, int *index) 102 { 103 struct mlx4_cmd_mailbox *mailbox; 104 struct mlx4_mgm *mgm = mgm_mailbox->buf; 105 u8 *mgid; 106 int err; 107 + u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0; 108 109 mailbox = mlx4_alloc_cmd_mailbox(dev); 110 if (IS_ERR(mailbox)) ··· 111 112 memcpy(mgid, gid, 16); 113 114 + err = mlx4_GID_HASH(dev, mailbox, hash, op_mod); 115 mlx4_free_cmd_mailbox(dev, mailbox); 116 if (err) 117 return err; ··· 123 *prev = -1; 124 125 do { 126 + err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); 127 if (err) 128 return err; 129 130 + if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 131 if (*index != *hash) { 132 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 133 err = -EINVAL; ··· 136 } 137 138 if (!memcmp(mgm->gid, gid, 16) && 139 + be32_to_cpu(mgm->members_count) >> 30 == prot) 140 return err; 141 142 *prev = *index; ··· 147 return err; 148 } 149 150 + int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 151 + int block_mcast_loopback, enum mlx4_protocol prot, 152 + enum mlx4_steer_type steer) 153 { 154 struct mlx4_priv *priv = mlx4_priv(dev); 155 struct mlx4_cmd_mailbox *mailbox; ··· 159 int link = 0; 160 int i; 161 int err; 162 + u8 port = gid[5]; 163 + u8 new_entry = 0; 164 165 mailbox = mlx4_alloc_cmd_mailbox(dev); 166 if (IS_ERR(mailbox)) ··· 166 mgm = mailbox->buf; 167 168 mutex_lock(&priv->mcg_table.mutex); 169 + err = find_entry(dev, port, gid, prot, steer, 170 + mailbox, &hash, &prev, &index); 171 if (err) 172 goto out; 173 174 if (index != -1) { 175 + if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 176 + new_entry = 1; 177 memcpy(mgm->gid, gid, 16); 178 + } 179 } else { 180 link = 1; 181 ··· 209 else 210 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 211 212 + mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); 213 214 + err = mlx4_WRITE_ENTRY(dev, index, mailbox); 215 if (err) 216 goto out; 217 218 if (!link) 219 goto out; 220 221 + err = mlx4_READ_ENTRY(dev, prev, mailbox); 222 if (err) 223 goto out; 224 225 mgm->next_gid_index = cpu_to_be32(index << 6); 226 227 + err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 228 if (err) 229 goto out; 230 231 out: 232 + if (prot == MLX4_PROT_ETH) { 233 + /* manage the steering entry for promisc mode */ 234 + if (new_entry) 235 + new_steering_entry(dev, 0, port, steer, index, qp->qpn); 236 + else 237 + existing_steering_entry(dev, 0, port, steer, 238 + index, qp->qpn); 239 + } 240 if (err && link && index != -1) { 241 if (index < dev->caps.num_mgms) 242 mlx4_warn(dev, "Got AMGM index %d < %d", ··· 242 mlx4_free_cmd_mailbox(dev, mailbox); 243 return err; 244 } 245 246 + int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 247 + enum mlx4_protocol prot, enum mlx4_steer_type steer) 248 { 249 struct mlx4_priv *priv = mlx4_priv(dev); 250 struct mlx4_cmd_mailbox *mailbox; ··· 255 int prev, index; 256 int i, loc; 257 int err; 258 + u8 port = gid[5]; 259 + bool removed_entry = false; 260 261 mailbox = mlx4_alloc_cmd_mailbox(dev); 262 if (IS_ERR(mailbox)) ··· 263 264 mutex_lock(&priv->mcg_table.mutex); 265 266 + err = find_entry(dev, port, gid, prot, steer, 267 + mailbox, &hash, &prev, &index); 268 if (err) 269 goto out; 270 ··· 272 err = -EINVAL; 273 goto out; 274 } 275 + 276 + /* if this pq is also a promisc qp, it shouldn't be removed */ 277 + if (prot == MLX4_PROT_ETH && 278 + check_duplicate_entry(dev, 0, port, steer, index, qp->qpn)) 279 + goto out; 280 281 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 282 for (loc = -1, i = 0; i < members_count; ++i) ··· 285 } 286 287 288 + mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); 289 mgm->qp[loc] = mgm->qp[i - 1]; 290 mgm->qp[i - 1] = 0; 291 292 + if (prot == MLX4_PROT_ETH) 293 + removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn); 294 + if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) { 295 + err = mlx4_WRITE_ENTRY(dev, index, mailbox); 296 goto out; 297 } 298 + 299 + /* We are going to delete the entry, members count should be 0 */ 300 + mgm->members_count = cpu_to_be32((u32) prot << 30); 301 302 if (prev == -1) { 303 /* Remove entry from MGM */ 304 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 305 if (amgm_index) { 306 + err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); 307 if (err) 308 goto out; 309 } else 310 memset(mgm->gid, 0, 16); 311 312 + err = mlx4_WRITE_ENTRY(dev, index, mailbox); 313 if (err) 314 goto out; 315 ··· 319 } else { 320 /* Remove entry from AMGM */ 321 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 322 + err = mlx4_READ_ENTRY(dev, prev, mailbox); 323 if (err) 324 goto out; 325 326 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 327 328 + err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 329 if (err) 330 goto out; 331 ··· 343 mlx4_free_cmd_mailbox(dev, mailbox); 344 return err; 345 } 346 + 347 + 348 + int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 349 + int block_mcast_loopback, enum mlx4_protocol prot) 350 + { 351 + enum mlx4_steer_type steer; 352 + 353 + steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; 354 + 355 + if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering) 356 + return 0; 357 + 358 + if (prot == MLX4_PROT_ETH) 359 + gid[7] |= (steer << 1); 360 + 361 + return mlx4_qp_attach_common(dev, qp, gid, 362 + block_mcast_loopback, prot, 363 + steer); 364 + } 365 + EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 366 + 367 + int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 368 + enum mlx4_protocol prot) 369 + { 370 + enum mlx4_steer_type steer; 371 + 372 + steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; 373 + 374 + if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering) 375 + return 0; 376 + 377 + if (prot == MLX4_PROT_ETH) { 378 + gid[7] |= (steer << 1); 379 + } 380 + 381 + return mlx4_qp_detach_common(dev, qp, gid, prot, steer); 382 + } 383 EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 384 + 385 + 386 + int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 387 + { 388 + if (!dev->caps.vep_mc_steering) 389 + return 0; 390 + 391 + 392 + return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn); 393 + } 394 + EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); 395 + 396 + int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 397 + { 398 + if (!dev->caps.vep_mc_steering) 399 + return 0; 400 + 401 + 402 + return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn); 403 + } 404 + EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); 405 + 406 + int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 407 + { 408 + if (!dev->caps.vep_mc_steering) 409 + return 0; 410 + 411 + 412 + return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); 413 + } 414 + EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); 415 + 416 + int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 417 + { 418 + if (!dev->caps.vep_mc_steering) 419 + return 0; 420 + 421 + return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); 422 + } 423 + EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); 424 425 int mlx4_init_mcg_table(struct mlx4_dev *dev) 426 {
+50
drivers/net/mlx4/mlx4.h
··· 105 u32 max; 106 u32 reserved_top; 107 u32 mask; 108 spinlock_t lock; 109 unsigned long *table; 110 }; ··· 163 u8 catas_bar; 164 }; 165 166 struct mlx4_cmd { 167 struct pci_pool *pool; 168 void __iomem *hcr; ··· 287 int max; 288 }; 289 290 struct mlx4_port_info { 291 struct mlx4_dev *dev; 292 int port; ··· 298 struct device_attribute port_attr; 299 enum mlx4_port_type tmp_type; 300 struct mlx4_mac_table mac_table; 301 struct mlx4_vlan_table vlan_table; 302 }; 303 304 struct mlx4_sense { ··· 308 u8 do_sense_port[MLX4_MAX_PORTS + 1]; 309 u8 sense_allowed[MLX4_MAX_PORTS + 1]; 310 struct delayed_work sense_poll; 311 }; 312 313 struct mlx4_priv { ··· 352 struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; 353 struct mlx4_sense sense; 354 struct mutex port_mutex; 355 }; 356 357 static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) ··· 372 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); 373 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); 374 void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); 375 int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 376 u32 reserved_bot, u32 resetrved_top); 377 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); ··· 448 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); 449 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); 450 451 #endif /* MLX4_H */
··· 105 u32 max; 106 u32 reserved_top; 107 u32 mask; 108 + u32 avail; 109 spinlock_t lock; 110 unsigned long *table; 111 }; ··· 162 u8 catas_bar; 163 }; 164 165 + #define MGM_QPN_MASK 0x00FFFFFF 166 + #define MGM_BLCK_LB_BIT 30 167 + 168 + struct mlx4_promisc_qp { 169 + struct list_head list; 170 + u32 qpn; 171 + }; 172 + 173 + struct mlx4_steer_index { 174 + struct list_head list; 175 + unsigned int index; 176 + struct list_head duplicates; 177 + }; 178 + 179 + struct mlx4_mgm { 180 + __be32 next_gid_index; 181 + __be32 members_count; 182 + u32 reserved[2]; 183 + u8 gid[16]; 184 + __be32 qp[MLX4_QP_PER_MGM]; 185 + }; 186 struct mlx4_cmd { 187 struct pci_pool *pool; 188 void __iomem *hcr; ··· 265 int max; 266 }; 267 268 + struct mlx4_mac_entry { 269 + u64 mac; 270 + }; 271 + 272 struct mlx4_port_info { 273 struct mlx4_dev *dev; 274 int port; ··· 272 struct device_attribute port_attr; 273 enum mlx4_port_type tmp_type; 274 struct mlx4_mac_table mac_table; 275 + struct radix_tree_root mac_tree; 276 struct mlx4_vlan_table vlan_table; 277 + int base_qpn; 278 }; 279 280 struct mlx4_sense { ··· 280 u8 do_sense_port[MLX4_MAX_PORTS + 1]; 281 u8 sense_allowed[MLX4_MAX_PORTS + 1]; 282 struct delayed_work sense_poll; 283 + }; 284 + 285 + struct mlx4_msix_ctl { 286 + u64 pool_bm; 287 + spinlock_t pool_lock; 288 + }; 289 + 290 + struct mlx4_steer { 291 + struct list_head promisc_qps[MLX4_NUM_STEERS]; 292 + struct list_head steer_entries[MLX4_NUM_STEERS]; 293 + struct list_head high_prios; 294 }; 295 296 struct mlx4_priv { ··· 313 struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; 314 struct mlx4_sense sense; 315 struct mutex port_mutex; 316 + struct mlx4_msix_ctl msix_ctl; 317 + struct mlx4_steer *steer; 318 + struct list_head bf_list; 319 + struct mutex bf_mutex; 320 + struct io_mapping *bf_mapping; 321 }; 322 323 static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) ··· 328 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); 329 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); 330 void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); 331 + u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); 332 int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 333 u32 reserved_bot, u32 resetrved_top); 334 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); ··· 403 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); 404 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); 405 406 + int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 407 + enum mlx4_protocol prot, enum mlx4_steer_type steer); 408 + int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 409 + int block_mcast_loopback, enum mlx4_protocol prot, 410 + enum mlx4_steer_type steer); 411 #endif /* MLX4_H */
+22 -5
drivers/net/mlx4/mlx4_en.h
··· 49 #include "en_port.h" 50 51 #define DRV_NAME "mlx4_en" 52 - #define DRV_VERSION "1.5.1.6" 53 - #define DRV_RELDATE "August 2010" 54 55 #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 56 ··· 62 #define MLX4_EN_PAGE_SHIFT 12 63 #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) 64 #define MAX_RX_RINGS 16 65 #define TXBB_SIZE 64 66 #define HEADROOM (2048 / TXBB_SIZE + 1) 67 #define STAMP_STRIDE 64 ··· 125 #define MLX4_EN_RX_SIZE_THRESH 1024 126 #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) 127 #define MLX4_EN_SAMPLE_INTERVAL 0 128 129 #define MLX4_EN_AUTO_CONF 0xffff 130 ··· 216 217 #define MLX4_EN_USE_SRQ 0x01000000 218 219 struct mlx4_en_rx_alloc { 220 struct page *page; 221 u16 offset; ··· 248 unsigned long bytes; 249 unsigned long packets; 250 spinlock_t comp_lock; 251 }; 252 253 struct mlx4_en_rx_desc { ··· 460 struct mlx4_en_rss_map rss_map; 461 u32 flags; 462 #define MLX4_EN_FLAG_PROMISC 0x1 463 u32 tx_ring_num; 464 u32 rx_ring_num; 465 u32 rx_skb_size; ··· 469 u16 log_rx_info; 470 471 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; 472 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; 473 struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; 474 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; ··· 485 int mc_addrs_cnt; 486 struct mlx4_en_stat_out_mbox hw_stats; 487 int vids[128]; 488 }; 489 490 ··· 502 int mlx4_en_start_port(struct net_device *dev); 503 void mlx4_en_stop_port(struct net_device *dev); 504 505 - void mlx4_en_free_resources(struct mlx4_en_priv *priv); 506 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 507 508 int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 509 int entries, int ring, enum cq_type mode); 510 - void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 511 int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 512 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 513 int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); ··· 520 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 521 522 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 523 - u32 size, u16 stride); 524 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 525 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 526 struct mlx4_en_tx_ring *ring,
··· 49 #include "en_port.h" 50 51 #define DRV_NAME "mlx4_en" 52 + #define DRV_VERSION "1.5.4.1" 53 + #define DRV_RELDATE "March 2011" 54 55 #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 56 ··· 62 #define MLX4_EN_PAGE_SHIFT 12 63 #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) 64 #define MAX_RX_RINGS 16 65 + #define MIN_RX_RINGS 4 66 #define TXBB_SIZE 64 67 #define HEADROOM (2048 / TXBB_SIZE + 1) 68 #define STAMP_STRIDE 64 ··· 124 #define MLX4_EN_RX_SIZE_THRESH 1024 125 #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) 126 #define MLX4_EN_SAMPLE_INTERVAL 0 127 + #define MLX4_EN_AVG_PKT_SMALL 256 128 129 #define MLX4_EN_AUTO_CONF 0xffff 130 ··· 214 215 #define MLX4_EN_USE_SRQ 0x01000000 216 217 + #define MLX4_EN_CX3_LOW_ID 0x1000 218 + #define MLX4_EN_CX3_HIGH_ID 0x1005 219 + 220 struct mlx4_en_rx_alloc { 221 struct page *page; 222 u16 offset; ··· 243 unsigned long bytes; 244 unsigned long packets; 245 spinlock_t comp_lock; 246 + struct mlx4_bf bf; 247 + bool bf_enabled; 248 }; 249 250 struct mlx4_en_rx_desc { ··· 453 struct mlx4_en_rss_map rss_map; 454 u32 flags; 455 #define MLX4_EN_FLAG_PROMISC 0x1 456 + #define MLX4_EN_FLAG_MC_PROMISC 0x2 457 u32 tx_ring_num; 458 u32 rx_ring_num; 459 u32 rx_skb_size; ··· 461 u16 log_rx_info; 462 463 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; 464 + int tx_vector; 465 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; 466 struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; 467 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; ··· 476 int mc_addrs_cnt; 477 struct mlx4_en_stat_out_mbox hw_stats; 478 int vids[128]; 479 + bool wol; 480 + }; 481 + 482 + enum mlx4_en_wol { 483 + MLX4_EN_WOL_MAGIC = (1ULL << 61), 484 + MLX4_EN_WOL_ENABLED = (1ULL << 62), 485 + MLX4_EN_WOL_DO_MODIFY = (1ULL << 63), 486 }; 487 488 ··· 486 int mlx4_en_start_port(struct net_device *dev); 487 void mlx4_en_stop_port(struct net_device *dev); 488 489 + void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors); 490 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 491 492 int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 493 int entries, int ring, enum cq_type mode); 494 + void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 495 + bool reserve_vectors); 496 int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 497 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 498 int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); ··· 503 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 504 505 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 506 + int qpn, u32 size, u16 stride); 507 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 508 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 509 struct mlx4_en_tx_ring *ring,
+102
drivers/net/mlx4/pd.c
··· 32 */ 33 34 #include <linux/errno.h> 35 36 #include <asm/page.h> 37 38 #include "mlx4.h" 39 #include "icm.h" 40 41 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) 42 { ··· 82 return -ENOMEM; 83 84 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; 85 86 return 0; 87 } ··· 93 mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index); 94 } 95 EXPORT_SYMBOL_GPL(mlx4_uar_free); 96 97 int mlx4_init_uar_table(struct mlx4_dev *dev) 98 {
··· 32 */ 33 34 #include <linux/errno.h> 35 + #include <linux/io-mapping.h> 36 37 #include <asm/page.h> 38 39 #include "mlx4.h" 40 #include "icm.h" 41 + 42 + enum { 43 + MLX4_NUM_RESERVED_UARS = 8 44 + }; 45 46 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) 47 { ··· 77 return -ENOMEM; 78 79 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; 80 + uar->map = NULL; 81 82 return 0; 83 } ··· 87 mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index); 88 } 89 EXPORT_SYMBOL_GPL(mlx4_uar_free); 90 + 91 + int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf) 92 + { 93 + struct mlx4_priv *priv = mlx4_priv(dev); 94 + struct mlx4_uar *uar; 95 + int err = 0; 96 + int idx; 97 + 98 + if (!priv->bf_mapping) 99 + return -ENOMEM; 100 + 101 + mutex_lock(&priv->bf_mutex); 102 + if (!list_empty(&priv->bf_list)) 103 + uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list); 104 + else { 105 + if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) { 106 + err = -ENOMEM; 107 + goto out; 108 + } 109 + uar = kmalloc(sizeof *uar, GFP_KERNEL); 110 + if (!uar) { 111 + err = -ENOMEM; 112 + goto out; 113 + } 114 + err = mlx4_uar_alloc(dev, uar); 115 + if (err) 116 + goto free_kmalloc; 117 + 118 + uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE); 119 + if (!uar->map) { 120 + err = -ENOMEM; 121 + goto free_uar; 122 + } 123 + 124 + uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); 125 + if (!uar->bf_map) { 126 + err = -ENOMEM; 127 + goto unamp_uar; 128 + } 129 + uar->free_bf_bmap = 0; 130 + list_add(&uar->bf_list, &priv->bf_list); 131 + } 132 + 133 + bf->uar = uar; 134 + idx = ffz(uar->free_bf_bmap); 135 + uar->free_bf_bmap |= 1 << idx; 136 + bf->uar = uar; 137 + bf->offset = 0; 138 + bf->buf_size = dev->caps.bf_reg_size / 2; 139 + bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size; 140 + if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1) 141 + list_del_init(&uar->bf_list); 142 + 143 + goto out; 144 + 145 + unamp_uar: 146 + bf->uar = NULL; 147 + iounmap(uar->map); 148 + 149 + free_uar: 150 + mlx4_uar_free(dev, uar); 151 + 152 + free_kmalloc: 153 + kfree(uar); 154 + 155 + out: 156 + mutex_unlock(&priv->bf_mutex); 157 + return err; 158 + } 159 + EXPORT_SYMBOL_GPL(mlx4_bf_alloc); 160 + 161 + void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf) 162 + { 163 + struct mlx4_priv *priv = mlx4_priv(dev); 164 + int idx; 165 + 166 + if (!bf->uar || !bf->uar->bf_map) 167 + return; 168 + 169 + mutex_lock(&priv->bf_mutex); 170 + idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size; 171 + bf->uar->free_bf_bmap &= ~(1 << idx); 172 + if (!bf->uar->free_bf_bmap) { 173 + if (!list_empty(&bf->uar->bf_list)) 174 + list_del(&bf->uar->bf_list); 175 + 176 + io_mapping_unmap(bf->uar->bf_map); 177 + iounmap(bf->uar->map); 178 + mlx4_uar_free(dev, bf->uar); 179 + kfree(bf->uar); 180 + } else if (list_empty(&bf->uar->bf_list)) 181 + list_add(&bf->uar->bf_list, &priv->bf_list); 182 + 183 + mutex_unlock(&priv->bf_mutex); 184 + } 185 + EXPORT_SYMBOL_GPL(mlx4_bf_free); 186 187 int mlx4_init_uar_table(struct mlx4_dev *dev) 188 {
+151 -14
drivers/net/mlx4/port.c
··· 90 return err; 91 } 92 93 - int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) 94 { 95 - struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; 96 int i, err = 0; 97 int free = -1; 98 99 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); 100 mutex_lock(&table->mutex); 101 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { ··· 173 174 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 175 /* MAC already registered, increase refernce count */ 176 - *index = i; 177 ++table->refs[i]; 178 goto out; 179 } ··· 203 goto out; 204 } 205 206 - *index = free; 207 ++table->total; 208 out: 209 mutex_unlock(&table->mutex); ··· 212 } 213 EXPORT_SYMBOL_GPL(mlx4_register_mac); 214 215 - void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index) 216 { 217 - struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; 218 219 mutex_lock(&table->mutex); 220 - if (!table->refs[index]) { 221 - mlx4_warn(dev, "No MAC entry for index %d\n", index); 222 goto out; 223 - } 224 - if (--table->refs[index]) { 225 - mlx4_warn(dev, "Have more references for index %d," 226 - "no need to modify MAC table\n", index); 227 - goto out; 228 - } 229 table->entries[index] = 0; 230 mlx4_set_port_mac_table(dev, port, table->entries); 231 --table->total; ··· 266 } 267 EXPORT_SYMBOL_GPL(mlx4_unregister_mac); 268 269 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, 270 __be32 *entries) 271 {
··· 90 return err; 91 } 92 93 + static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, 94 + u64 mac, int *qpn, u8 reserve) 95 { 96 + struct mlx4_qp qp; 97 + u8 gid[16] = {0}; 98 + int err; 99 + 100 + if (reserve) { 101 + err = mlx4_qp_reserve_range(dev, 1, 1, qpn); 102 + if (err) { 103 + mlx4_err(dev, "Failed to reserve qp for mac registration\n"); 104 + return err; 105 + } 106 + } 107 + qp.qpn = *qpn; 108 + 109 + mac &= 0xffffffffffffULL; 110 + mac = cpu_to_be64(mac << 16); 111 + memcpy(&gid[10], &mac, ETH_ALEN); 112 + gid[5] = port; 113 + gid[7] = MLX4_UC_STEER << 1; 114 + 115 + err = mlx4_qp_attach_common(dev, &qp, gid, 0, 116 + MLX4_PROT_ETH, MLX4_UC_STEER); 117 + if (err && reserve) 118 + mlx4_qp_release_range(dev, *qpn, 1); 119 + 120 + return err; 121 + } 122 + 123 + static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, 124 + u64 mac, int qpn, u8 free) 125 + { 126 + struct mlx4_qp qp; 127 + u8 gid[16] = {0}; 128 + 129 + qp.qpn = qpn; 130 + mac &= 0xffffffffffffULL; 131 + mac = cpu_to_be64(mac << 16); 132 + memcpy(&gid[10], &mac, ETH_ALEN); 133 + gid[5] = port; 134 + gid[7] = MLX4_UC_STEER << 1; 135 + 136 + mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER); 137 + if (free) 138 + mlx4_qp_release_range(dev, qpn, 1); 139 + } 140 + 141 + int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap) 142 + { 143 + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 144 + struct mlx4_mac_table *table = &info->mac_table; 145 + struct mlx4_mac_entry *entry; 146 int i, err = 0; 147 int free = -1; 148 149 + if (dev->caps.vep_uc_steering) { 150 + err = mlx4_uc_steer_add(dev, port, mac, qpn, 1); 151 + if (!err) { 152 + entry = kmalloc(sizeof *entry, GFP_KERNEL); 153 + if (!entry) { 154 + mlx4_uc_steer_release(dev, port, mac, *qpn, 1); 155 + return -ENOMEM; 156 + } 157 + entry->mac = mac; 158 + err = radix_tree_insert(&info->mac_tree, *qpn, entry); 159 + if (err) { 160 + mlx4_uc_steer_release(dev, port, mac, *qpn, 1); 161 + return err; 162 + } 163 + } else 164 + return err; 165 + } 166 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); 167 mutex_lock(&table->mutex); 168 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { ··· 106 107 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 108 /* MAC already registered, increase refernce count */ 109 ++table->refs[i]; 110 goto out; 111 } ··· 137 goto out; 138 } 139 140 + if (!dev->caps.vep_uc_steering) 141 + *qpn = info->base_qpn + free; 142 ++table->total; 143 out: 144 mutex_unlock(&table->mutex); ··· 145 } 146 EXPORT_SYMBOL_GPL(mlx4_register_mac); 147 148 + static int validate_index(struct mlx4_dev *dev, 149 + struct mlx4_mac_table *table, int index) 150 { 151 + int err = 0; 152 + 153 + if (index < 0 || index >= table->max || !table->entries[index]) { 154 + mlx4_warn(dev, "No valid Mac entry for the given index\n"); 155 + err = -EINVAL; 156 + } 157 + return err; 158 + } 159 + 160 + static int find_index(struct mlx4_dev *dev, 161 + struct mlx4_mac_table *table, u64 mac) 162 + { 163 + int i; 164 + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 165 + if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) 166 + return i; 167 + } 168 + /* Mac not found */ 169 + return -EINVAL; 170 + } 171 + 172 + void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn) 173 + { 174 + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 175 + struct mlx4_mac_table *table = &info->mac_table; 176 + int index = qpn - info->base_qpn; 177 + struct mlx4_mac_entry *entry; 178 + 179 + if (dev->caps.vep_uc_steering) { 180 + entry = radix_tree_lookup(&info->mac_tree, qpn); 181 + if (entry) { 182 + mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1); 183 + radix_tree_delete(&info->mac_tree, qpn); 184 + index = find_index(dev, table, entry->mac); 185 + kfree(entry); 186 + } 187 + } 188 189 mutex_lock(&table->mutex); 190 + 191 + if (validate_index(dev, table, index)) 192 goto out; 193 + 194 table->entries[index] = 0; 195 mlx4_set_port_mac_table(dev, port, table->entries); 196 --table->total; ··· 167 } 168 EXPORT_SYMBOL_GPL(mlx4_unregister_mac); 169 170 + int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap) 171 + { 172 + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 173 + struct mlx4_mac_table *table = &info->mac_table; 174 + int index = qpn - info->base_qpn; 175 + struct mlx4_mac_entry *entry; 176 + int err; 177 + 178 + if (dev->caps.vep_uc_steering) { 179 + entry = radix_tree_lookup(&info->mac_tree, qpn); 180 + if (!entry) 181 + return -EINVAL; 182 + index = find_index(dev, table, entry->mac); 183 + mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0); 184 + entry->mac = new_mac; 185 + err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0); 186 + if (err || index < 0) 187 + return err; 188 + } 189 + 190 + mutex_lock(&table->mutex); 191 + 192 + err = validate_index(dev, table, index); 193 + if (err) 194 + goto out; 195 + 196 + table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); 197 + 198 + err = mlx4_set_port_mac_table(dev, port, table->entries); 199 + if (unlikely(err)) { 200 + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac); 201 + table->entries[index] = 0; 202 + } 203 + out: 204 + mutex_unlock(&table->mutex); 205 + return err; 206 + } 207 + EXPORT_SYMBOL_GPL(mlx4_replace_mac); 208 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, 209 __be32 *entries) 210 {
+1 -3
drivers/net/mlx4/profile.c
··· 107 profile[MLX4_RES_AUXC].num = request->num_qp; 108 profile[MLX4_RES_SRQ].num = request->num_srq; 109 profile[MLX4_RES_CQ].num = request->num_cq; 110 - profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, 111 - dev_cap->reserved_eqs + 112 - num_possible_cpus() + 1); 113 profile[MLX4_RES_DMPT].num = request->num_mpt; 114 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 115 profile[MLX4_RES_MTT].num = request->num_mtt;
··· 107 profile[MLX4_RES_AUXC].num = request->num_qp; 108 profile[MLX4_RES_SRQ].num = request->num_srq; 109 profile[MLX4_RES_CQ].num = request->num_cq; 110 + profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); 111 profile[MLX4_RES_DMPT].num = request->num_mpt; 112 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 113 profile[MLX4_RES_MTT].num = request->num_mtt;
+1
drivers/net/myri10ge/myri10ge.c
··· 3645 dma_free_coherent(&pdev->dev, bytes, 3646 ss->fw_stats, ss->fw_stats_bus); 3647 ss->fw_stats = NULL; 3648 } 3649 } 3650 kfree(mgp->ss);
··· 3645 dma_free_coherent(&pdev->dev, bytes, 3646 ss->fw_stats, ss->fw_stats_bus); 3647 ss->fw_stats = NULL; 3648 + netif_napi_del(&ss->napi); 3649 } 3650 } 3651 kfree(mgp->ss);
+3 -3
drivers/net/pch_gbe/pch_gbe_main.c
··· 2441 .resume = pch_gbe_io_resume 2442 }; 2443 2444 - static struct pci_driver pch_gbe_pcidev = { 2445 .name = KBUILD_MODNAME, 2446 .id_table = pch_gbe_pcidev_id, 2447 .probe = pch_gbe_probe, ··· 2458 { 2459 int ret; 2460 2461 - ret = pci_register_driver(&pch_gbe_pcidev); 2462 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { 2463 if (copybreak == 0) { 2464 pr_info("copybreak disabled\n"); ··· 2472 2473 static void __exit pch_gbe_exit_module(void) 2474 { 2475 - pci_unregister_driver(&pch_gbe_pcidev); 2476 } 2477 2478 module_init(pch_gbe_init_module);
··· 2441 .resume = pch_gbe_io_resume 2442 }; 2443 2444 + static struct pci_driver pch_gbe_driver = { 2445 .name = KBUILD_MODNAME, 2446 .id_table = pch_gbe_pcidev_id, 2447 .probe = pch_gbe_probe, ··· 2458 { 2459 int ret; 2460 2461 + ret = pci_register_driver(&pch_gbe_driver); 2462 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { 2463 if (copybreak == 0) { 2464 pr_info("copybreak disabled\n"); ··· 2472 2473 static void __exit pch_gbe_exit_module(void) 2474 { 2475 + pci_unregister_driver(&pch_gbe_driver); 2476 } 2477 2478 module_init(pch_gbe_init_module);
+16 -2
drivers/net/sfc/efx.c
··· 1054 { 1055 struct pci_dev *pci_dev = efx->pci_dev; 1056 dma_addr_t dma_mask = efx->type->max_dma_mask; 1057 int rc; 1058 1059 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); ··· 1105 rc = -EIO; 1106 goto fail3; 1107 } 1108 - efx->membase = ioremap_wc(efx->membase_phys, 1109 - efx->type->mem_map_size); 1110 if (!efx->membase) { 1111 netif_err(efx, probe, efx->net_dev, 1112 "could not map memory BAR at %llx+%x\n",
··· 1054 { 1055 struct pci_dev *pci_dev = efx->pci_dev; 1056 dma_addr_t dma_mask = efx->type->max_dma_mask; 1057 + bool use_wc; 1058 int rc; 1059 1060 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); ··· 1104 rc = -EIO; 1105 goto fail3; 1106 } 1107 + 1108 + /* bug22643: If SR-IOV is enabled then tx push over a write combined 1109 + * mapping is unsafe. We need to disable write combining in this case. 1110 + * MSI is unsupported when SR-IOV is enabled, and the firmware will 1111 + * have removed the MSI capability. So write combining is safe if 1112 + * there is an MSI capability. 1113 + */ 1114 + use_wc = (!EFX_WORKAROUND_22643(efx) || 1115 + pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); 1116 + if (use_wc) 1117 + efx->membase = ioremap_wc(efx->membase_phys, 1118 + efx->type->mem_map_size); 1119 + else 1120 + efx->membase = ioremap_nocache(efx->membase_phys, 1121 + efx->type->mem_map_size); 1122 if (!efx->membase) { 1123 netif_err(efx, probe, efx->net_dev, 1124 "could not map memory BAR at %llx+%x\n",
+2
drivers/net/sfc/workarounds.h
··· 38 #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS 39 /* Legacy interrupt storm when interrupt fifo fills */ 40 #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 41 42 /* Spurious parity errors in TSORT buffers */ 43 #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
··· 38 #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS 39 /* Legacy interrupt storm when interrupt fifo fills */ 40 #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 41 + /* Write combining and sriov=enabled are incompatible */ 42 + #define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA 43 44 /* Spurious parity errors in TSORT buffers */ 45 #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
+10 -7
drivers/net/usb/smsc95xx.c
··· 49 50 struct smsc95xx_priv { 51 u32 mac_cr; 52 spinlock_t mac_cr_lock; 53 bool use_tx_csum; 54 bool use_rx_csum; ··· 372 { 373 struct usbnet *dev = netdev_priv(netdev); 374 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 375 - u32 hash_hi = 0; 376 - u32 hash_lo = 0; 377 unsigned long flags; 378 379 spin_lock_irqsave(&pdata->mac_cr_lock, flags); 380 ··· 397 u32 bitnum = smsc95xx_hash(ha->addr); 398 u32 mask = 0x01 << (bitnum & 0x1F); 399 if (bitnum & 0x20) 400 - hash_hi |= mask; 401 else 402 - hash_lo |= mask; 403 } 404 405 netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n", 406 - hash_hi, hash_lo); 407 } else { 408 netif_dbg(dev, drv, dev->net, "receive own packets only\n"); 409 pdata->mac_cr &= ··· 413 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 414 415 /* Initiate async writes, as we can't wait for completion here */ 416 - smsc95xx_write_reg_async(dev, HASHH, &hash_hi); 417 - smsc95xx_write_reg_async(dev, HASHL, &hash_lo); 418 smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); 419 } 420
··· 49 50 struct smsc95xx_priv { 51 u32 mac_cr; 52 + u32 hash_hi; 53 + u32 hash_lo; 54 spinlock_t mac_cr_lock; 55 bool use_tx_csum; 56 bool use_rx_csum; ··· 370 { 371 struct usbnet *dev = netdev_priv(netdev); 372 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 373 unsigned long flags; 374 + 375 + pdata->hash_hi = 0; 376 + pdata->hash_lo = 0; 377 378 spin_lock_irqsave(&pdata->mac_cr_lock, flags); 379 ··· 394 u32 bitnum = smsc95xx_hash(ha->addr); 395 u32 mask = 0x01 << (bitnum & 0x1F); 396 if (bitnum & 0x20) 397 + pdata->hash_hi |= mask; 398 else 399 + pdata->hash_lo |= mask; 400 } 401 402 netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n", 403 + pdata->hash_hi, pdata->hash_lo); 404 } else { 405 netif_dbg(dev, drv, dev->net, "receive own packets only\n"); 406 pdata->mac_cr &= ··· 410 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 411 412 /* Initiate async writes, as we can't wait for completion here */ 413 + smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi); 414 + smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo); 415 smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); 416 } 417
+2
drivers/net/wireless/ath/ath9k/main.c
··· 2160 if (!ath_drain_all_txq(sc, false)) 2161 ath_reset(sc, false); 2162 2163 out: 2164 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); 2165 mutex_unlock(&sc->mutex);
··· 2160 if (!ath_drain_all_txq(sc, false)) 2161 ath_reset(sc, false); 2162 2163 + ieee80211_wake_queues(hw); 2164 + 2165 out: 2166 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); 2167 mutex_unlock(&sc->mutex);
+1 -1
drivers/net/wireless/ath/ath9k/rc.c
··· 1328 1329 hdr = (struct ieee80211_hdr *)skb->data; 1330 fc = hdr->frame_control; 1331 - for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 1332 struct ieee80211_tx_rate *rate = &tx_info->status.rates[i]; 1333 if (!rate->count) 1334 break;
··· 1328 1329 hdr = (struct ieee80211_hdr *)skb->data; 1330 fc = hdr->frame_control; 1331 + for (i = 0; i < sc->hw->max_rates; i++) { 1332 struct ieee80211_tx_rate *rate = &tx_info->status.rates[i]; 1333 if (!rate->count) 1334 break;
+2 -2
drivers/net/wireless/ath/ath9k/xmit.c
··· 1725 u8 tidno; 1726 1727 spin_lock_bh(&txctl->txq->axq_lock); 1728 - 1729 - if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) { 1730 tidno = ieee80211_get_qos_ctl(hdr)[0] & 1731 IEEE80211_QOS_CTL_TID_MASK; 1732 tid = ATH_AN_2_TID(txctl->an, tidno);
··· 1725 u8 tidno; 1726 1727 spin_lock_bh(&txctl->txq->axq_lock); 1728 + if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && 1729 + ieee80211_is_data_qos(hdr->frame_control)) { 1730 tidno = ieee80211_get_qos_ctl(hdr)[0] & 1731 IEEE80211_QOS_CTL_TID_MASK; 1732 tid = ATH_AN_2_TID(txctl->an, tidno);
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
··· 2265 int ret; 2266 2267 ret = wait_event_timeout(priv->_agn.notif_waitq, 2268 - &wait_entry->triggered, 2269 timeout); 2270 2271 spin_lock_bh(&priv->_agn.notif_wait_lock);
··· 2265 int ret; 2266 2267 ret = wait_event_timeout(priv->_agn.notif_waitq, 2268 + wait_entry->triggered, 2269 timeout); 2270 2271 spin_lock_bh(&priv->_agn.notif_wait_lock);
+5 -2
drivers/net/wireless/iwlwifi/iwl-agn.c
··· 3009 3010 mutex_lock(&priv->mutex); 3011 3012 - if (!priv->_agn.offchan_tx_skb) 3013 - return -EINVAL; 3014 3015 priv->_agn.offchan_tx_skb = NULL; 3016 3017 ret = iwl_scan_cancel_timeout(priv, 200); 3018 if (ret) 3019 ret = -EIO; 3020 mutex_unlock(&priv->mutex); 3021 3022 return ret;
··· 3009 3010 mutex_lock(&priv->mutex); 3011 3012 + if (!priv->_agn.offchan_tx_skb) { 3013 + ret = -EINVAL; 3014 + goto unlock; 3015 + } 3016 3017 priv->_agn.offchan_tx_skb = NULL; 3018 3019 ret = iwl_scan_cancel_timeout(priv, 200); 3020 if (ret) 3021 ret = -EIO; 3022 + unlock: 3023 mutex_unlock(&priv->mutex); 3024 3025 return ret;
+3
drivers/net/wireless/orinoco/cfg.c
··· 153 priv->scan_request = request; 154 155 err = orinoco_hw_trigger_scan(priv, request->ssids); 156 157 return err; 158 }
··· 153 priv->scan_request = request; 154 155 err = orinoco_hw_trigger_scan(priv, request->ssids); 156 + /* On error the we aren't processing the request */ 157 + if (err) 158 + priv->scan_request = NULL; 159 160 return err; 161 }
+1 -1
drivers/net/wireless/orinoco/main.c
··· 1376 1377 spin_lock_irqsave(&priv->scan_lock, flags); 1378 list_for_each_entry_safe(sd, temp, &priv->scan_list, list) { 1379 - spin_unlock_irqrestore(&priv->scan_lock, flags); 1380 1381 buf = sd->buf; 1382 len = sd->len; 1383 type = sd->type; 1384 1385 list_del(&sd->list); 1386 kfree(sd); 1387 1388 if (len > 0) {
··· 1376 1377 spin_lock_irqsave(&priv->scan_lock, flags); 1378 list_for_each_entry_safe(sd, temp, &priv->scan_list, list) { 1379 1380 buf = sd->buf; 1381 len = sd->len; 1382 type = sd->type; 1383 1384 list_del(&sd->list); 1385 + spin_unlock_irqrestore(&priv->scan_lock, flags); 1386 kfree(sd); 1387 1388 if (len > 0) {
+6 -1
drivers/net/wireless/rt2x00/rt2800usb.c
··· 719 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, 720 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, 721 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, 722 /* AzureWave */ 723 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, 724 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, ··· 914 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, 915 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, 916 { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, 917 - { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) }, 918 /* AzureWave */ 919 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, 920 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, ··· 937 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, 938 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, 939 { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) }, 940 /* Encore */ 941 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, 942 /* Gemtek */ ··· 963 { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 964 { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) }, 965 /* Planex */ 966 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, 967 /* Qcom */ 968 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, ··· 975 /* Sweex */ 976 { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, 977 { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, 978 /* Zyxel */ 979 { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) }, 980 #endif
··· 719 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, 720 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, 721 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, 722 + { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) }, 723 /* AzureWave */ 724 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, 725 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, ··· 913 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, 914 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, 915 { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, 916 /* AzureWave */ 917 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, 918 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, ··· 937 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, 938 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, 939 { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) }, 940 + /* Edimax */ 941 + { USB_DEVICE(0x7392, 0x4085), USB_DEVICE_DATA(&rt2800usb_ops) }, 942 /* Encore */ 943 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, 944 /* Gemtek */ ··· 961 { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 962 { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) }, 963 /* Planex */ 964 + { USB_DEVICE(0x2019, 0x5201), USB_DEVICE_DATA(&rt2800usb_ops) }, 965 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, 966 /* Qcom */ 967 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, ··· 972 /* Sweex */ 973 { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, 974 { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, 975 + /* Toshiba */ 976 + { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) }, 977 /* Zyxel */ 978 { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) }, 979 #endif
+15 -16
drivers/net/wireless/rtlwifi/efuse.c
··· 410 411 if (!efuse_shadow_update_chk(hw)) { 412 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 413 - memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 414 - (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 415 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); 416 417 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ··· 446 447 if (word_en != 0x0F) { 448 u8 tmpdata[8]; 449 - memcpy((void *)tmpdata, 450 - (void *)(&rtlefuse-> 451 - efuse_map[EFUSE_MODIFY_MAP][base]), 8); 452 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, 453 ("U-efuse\n"), tmpdata, 8); 454 ··· 465 efuse_power_switch(hw, true, false); 466 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 467 468 - memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 469 - (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 470 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); 471 472 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n")); ··· 479 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 480 481 if (rtlefuse->autoload_failflag == true) { 482 - memset((void *)(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]), 128, 483 - 0xFF); 484 } else 485 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 486 487 - memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 488 - (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 489 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); 490 491 } ··· 693 if (offset > 15) 694 return false; 695 696 - memset((void *)data, PGPKT_DATA_SIZE * sizeof(u8), 0xff); 697 - memset((void *)tmpdata, PGPKT_DATA_SIZE * sizeof(u8), 0xff); 698 699 while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) { 700 if (readstate & PG_STATE_HEADER) { ··· 861 862 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); 863 864 - memset((void *)originaldata, 8 * sizeof(u8), 0xff); 865 866 if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) { 867 badworden = efuse_word_enable_data_write(hw, ··· 916 target_pkt.offset = offset; 917 target_pkt.word_en = word_en; 918 919 - memset((void *)target_pkt.data, 8 * sizeof(u8), 0xFF); 920 921 efuse_word_enable_data_read(word_en, data, target_pkt.data); 922 target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en); ··· 1021 u8 badworden = 0x0F; 1022 u8 tmpdata[8]; 1023 1024 - memset((void *)tmpdata, PGPKT_DATA_SIZE, 0xff); 1025 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, 1026 ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr)); 1027
··· 410 411 if (!efuse_shadow_update_chk(hw)) { 412 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 413 + memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 414 + &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 415 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); 416 417 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ··· 446 447 if (word_en != 0x0F) { 448 u8 tmpdata[8]; 449 + memcpy(tmpdata, 450 + &rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base], 451 + 8); 452 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, 453 ("U-efuse\n"), tmpdata, 8); 454 ··· 465 efuse_power_switch(hw, true, false); 466 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 467 468 + memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 469 + &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 470 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); 471 472 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n")); ··· 479 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 480 481 if (rtlefuse->autoload_failflag == true) { 482 + memset(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 0xFF, 128); 483 } else 484 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 485 486 + memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 487 + &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 488 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); 489 490 } ··· 694 if (offset > 15) 695 return false; 696 697 + memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8)); 698 + memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8)); 699 700 while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) { 701 if (readstate & PG_STATE_HEADER) { ··· 862 863 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); 864 865 + memset(originaldata, 0xff, 8 * sizeof(u8)); 866 867 if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) { 868 badworden = efuse_word_enable_data_write(hw, ··· 917 target_pkt.offset = offset; 918 target_pkt.word_en = word_en; 919 920 + memset(target_pkt.data, 0xFF, 8 * sizeof(u8)); 921 922 efuse_word_enable_data_read(word_en, data, target_pkt.data); 923 target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en); ··· 1022 u8 badworden = 0x0F; 1023 u8 tmpdata[8]; 1024 1025 + memset(tmpdata, 0xff, PGPKT_DATA_SIZE); 1026 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, 1027 ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr)); 1028
+1
drivers/net/wireless/zd1211rw/zd_usb.c
··· 60 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, 61 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, 62 { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 }, 63 { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 }, 64 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 65 /* ZD1211B */
··· 60 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, 61 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, 62 { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 }, 63 + { USB_DEVICE(0x157e, 0x3207), .driver_info = DEVICE_ZD1211 }, 64 { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 }, 65 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 66 /* ZD1211B */
+1
include/linux/davinci_emac.h
··· 36 37 u8 rmii_en; 38 u8 version; 39 void (*interrupt_enable) (void); 40 void (*interrupt_disable) (void); 41 };
··· 36 37 u8 rmii_en; 38 u8 version; 39 + bool no_bd_ram; 40 void (*interrupt_enable) (void); 41 void (*interrupt_disable) (void); 42 };
+3
include/linux/ethtool.h
··· 648 649 #include <linux/rculist.h> 650 651 struct ethtool_rx_ntuple_flow_spec_container { 652 struct ethtool_rx_ntuple_flow_spec fs; 653 struct list_head list;
··· 648 649 #include <linux/rculist.h> 650 651 + /* needed by dev_disable_lro() */ 652 + extern int __ethtool_set_flags(struct net_device *dev, u32 flags); 653 + 654 struct ethtool_rx_ntuple_flow_spec_container { 655 struct ethtool_rx_ntuple_flow_spec fs; 656 struct list_head list;
+46 -5
include/linux/mlx4/device.h
··· 39 40 #include <asm/atomic.h> 41 42 enum { 43 MLX4_FLAG_MSI_X = 1 << 0, 44 MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, ··· 150 }; 151 152 enum mlx4_protocol { 153 - MLX4_PROTOCOL_IB, 154 - MLX4_PROTOCOL_EN, 155 }; 156 157 enum { ··· 178 MLX4_NO_VLAN_IDX = 0, 179 MLX4_VLAN_MISS_IDX, 180 MLX4_VLAN_REGULAR 181 }; 182 183 enum { ··· 236 int num_eqs; 237 int reserved_eqs; 238 int num_comp_vectors; 239 int num_mpts; 240 int num_mtt_segs; 241 int mtts_per_seg; ··· 259 u16 stat_rate_support; 260 int udp_rss; 261 int loopback_support; 262 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 263 int max_gso_sz; 264 int reserved_qps_cnt[MLX4_NUM_QP_REGION]; ··· 351 struct mlx4_uar { 352 unsigned long pfn; 353 int index; 354 }; 355 356 struct mlx4_cq { ··· 443 unsigned long flags; 444 struct mlx4_caps caps; 445 struct radix_tree_root qp_table_tree; 446 - u32 rev_id; 447 char board_id[MLX4_BOARD_ID_LEN]; 448 }; 449 ··· 489 490 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); 491 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); 492 493 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 494 struct mlx4_mtt *mtt); ··· 538 int block_mcast_loopback, enum mlx4_protocol protocol); 539 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 540 enum mlx4_protocol protocol); 541 542 - int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index); 543 - void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index); 544 545 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 546 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); ··· 562 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); 563 int mlx4_SYNC_TPT(struct mlx4_dev *dev); 564 int mlx4_test_interrupts(struct mlx4_dev *dev); 565 566 #endif /* MLX4_DEVICE_H */
··· 39 40 #include <asm/atomic.h> 41 42 + #define MAX_MSIX_P_PORT 17 43 + #define MAX_MSIX 64 44 + #define MSIX_LEGACY_SZ 4 45 + #define MIN_MSIX_P_PORT 5 46 + 47 enum { 48 MLX4_FLAG_MSI_X = 1 << 0, 49 MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, ··· 145 }; 146 147 enum mlx4_protocol { 148 + MLX4_PROT_IB_IPV6 = 0, 149 + MLX4_PROT_ETH, 150 + MLX4_PROT_IB_IPV4, 151 + MLX4_PROT_FCOE 152 }; 153 154 enum { ··· 171 MLX4_NO_VLAN_IDX = 0, 172 MLX4_VLAN_MISS_IDX, 173 MLX4_VLAN_REGULAR 174 + }; 175 + 176 + enum mlx4_steer_type { 177 + MLX4_MC_STEER = 0, 178 + MLX4_UC_STEER, 179 + MLX4_NUM_STEERS 180 }; 181 182 enum { ··· 223 int num_eqs; 224 int reserved_eqs; 225 int num_comp_vectors; 226 + int comp_pool; 227 int num_mpts; 228 int num_mtt_segs; 229 int mtts_per_seg; ··· 245 u16 stat_rate_support; 246 int udp_rss; 247 int loopback_support; 248 + int vep_uc_steering; 249 + int vep_mc_steering; 250 + int wol; 251 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 252 int max_gso_sz; 253 int reserved_qps_cnt[MLX4_NUM_QP_REGION]; ··· 334 struct mlx4_uar { 335 unsigned long pfn; 336 int index; 337 + struct list_head bf_list; 338 + unsigned free_bf_bmap; 339 + void __iomem *map; 340 + void __iomem *bf_map; 341 + }; 342 + 343 + struct mlx4_bf { 344 + unsigned long offset; 345 + int buf_size; 346 + struct mlx4_uar *uar; 347 + void __iomem *reg; 348 }; 349 350 struct mlx4_cq { ··· 415 unsigned long flags; 416 struct mlx4_caps caps; 417 struct radix_tree_root qp_table_tree; 418 + u8 rev_id; 419 char board_id[MLX4_BOARD_ID_LEN]; 420 }; 421 ··· 461 462 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); 463 void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); 464 + int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf); 465 + void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf); 466 467 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 468 struct mlx4_mtt *mtt); ··· 508 int block_mcast_loopback, enum mlx4_protocol protocol); 509 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 510 enum mlx4_protocol protocol); 511 + int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); 512 + int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); 513 + int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); 514 + int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); 515 + int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); 516 517 + int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap); 518 + void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn); 519 + int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap); 520 521 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 522 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); ··· 526 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); 527 int mlx4_SYNC_TPT(struct mlx4_dev *dev); 528 int mlx4_test_interrupts(struct mlx4_dev *dev); 529 + int mlx4_assign_eq(struct mlx4_dev *dev, char* name , int* vector); 530 + void mlx4_release_eq(struct mlx4_dev *dev, int vec); 531 + 532 + int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); 533 + int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); 534 535 #endif /* MLX4_DEVICE_H */
+1
include/linux/mlx4/qp.h
··· 303 304 enum { 305 MLX4_INLINE_ALIGN = 64, 306 }; 307 308 struct mlx4_wqe_inline_seg {
··· 303 304 enum { 305 MLX4_INLINE_ALIGN = 64, 306 + MLX4_INLINE_SEG = 1 << 31, 307 }; 308 309 struct mlx4_wqe_inline_seg {
+2 -1
include/net/cfg80211.h
··· 486 * @plink_state: mesh peer link state 487 * @signal: signal strength of last received packet in dBm 488 * @signal_avg: signal strength average in dBm 489 - * @txrate: current unicast bitrate to this station 490 * @rx_packets: packets received from this station 491 * @tx_packets: packets transmitted to this station 492 * @tx_retries: cumulative retry counts
··· 486 * @plink_state: mesh peer link state 487 * @signal: signal strength of last received packet in dBm 488 * @signal_avg: signal strength average in dBm 489 + * @txrate: current unicast bitrate from this station 490 + * @rxrate: current unicast bitrate to this station 491 * @rx_packets: packets received from this station 492 * @tx_packets: packets transmitted to this station 493 * @tx_retries: cumulative retry counts
+1 -1
include/net/ip6_route.h
··· 70 extern void ip6_route_input(struct sk_buff *skb); 71 72 extern struct dst_entry * ip6_route_output(struct net *net, 73 - struct sock *sk, 74 struct flowi6 *fl6); 75 76 extern int ip6_route_init(void);
··· 70 extern void ip6_route_input(struct sk_buff *skb); 71 72 extern struct dst_entry * ip6_route_output(struct net *net, 73 + const struct sock *sk, 74 struct flowi6 *fl6); 75 76 extern int ip6_route_init(void);
+13 -5
include/net/ip_fib.h
··· 51 struct fib_info *nh_parent; 52 unsigned nh_flags; 53 unsigned char nh_scope; 54 - unsigned char nh_cfg_scope; 55 #ifdef CONFIG_IP_ROUTE_MULTIPATH 56 int nh_weight; 57 int nh_power; ··· 61 int nh_oif; 62 __be32 nh_gw; 63 __be32 nh_saddr; 64 }; 65 66 /* ··· 74 struct net *fib_net; 75 int fib_treeref; 76 atomic_t fib_clntref; 77 - int fib_dead; 78 unsigned fib_flags; 79 - int fib_protocol; 80 __be32 fib_prefsrc; 81 u32 fib_priority; 82 u32 *fib_metrics; ··· 142 143 #endif /* CONFIG_IP_ROUTE_MULTIPATH */ 144 145 - #define FIB_RES_SADDR(res) (FIB_RES_NH(res).nh_saddr) 146 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw) 147 #define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev) 148 #define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif) 149 150 - #define FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : FIB_RES_SADDR(res)) 151 152 struct fib_table { 153 struct hlist_node tb_hlist;
··· 51 struct fib_info *nh_parent; 52 unsigned nh_flags; 53 unsigned char nh_scope; 54 #ifdef CONFIG_IP_ROUTE_MULTIPATH 55 int nh_weight; 56 int nh_power; ··· 62 int nh_oif; 63 __be32 nh_gw; 64 __be32 nh_saddr; 65 + int nh_saddr_genid; 66 }; 67 68 /* ··· 74 struct net *fib_net; 75 int fib_treeref; 76 atomic_t fib_clntref; 77 unsigned fib_flags; 78 + unsigned char fib_dead; 79 + unsigned char fib_protocol; 80 + unsigned char fib_scope; 81 __be32 fib_prefsrc; 82 u32 fib_priority; 83 u32 *fib_metrics; ··· 141 142 #endif /* CONFIG_IP_ROUTE_MULTIPATH */ 143 144 + extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); 145 + 146 + #define FIB_RES_SADDR(net, res) \ 147 + ((FIB_RES_NH(res).nh_saddr_genid == \ 148 + atomic_read(&(net)->ipv4.dev_addr_genid)) ? \ 149 + FIB_RES_NH(res).nh_saddr : \ 150 + fib_info_update_nh_saddr((net), &FIB_RES_NH(res))) 151 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw) 152 #define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev) 153 #define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif) 154 155 + #define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \ 156 + FIB_RES_SADDR(net, res)) 157 158 struct fib_table { 159 struct hlist_node tb_hlist;
+1 -1
include/net/mac80211.h
··· 1160 * @napi_weight: weight used for NAPI polling. You must specify an 1161 * appropriate value here if a napi_poll operation is provided 1162 * by your driver. 1163 - 1164 * @max_rx_aggregation_subframes: maximum buffer size (number of 1165 * sub-frames) to be used for A-MPDU block ack receiver 1166 * aggregation.
··· 1160 * @napi_weight: weight used for NAPI polling. You must specify an 1161 * appropriate value here if a napi_poll operation is provided 1162 * by your driver. 1163 + * 1164 * @max_rx_aggregation_subframes: maximum buffer size (number of 1165 * sub-frames) to be used for A-MPDU block ack receiver 1166 * aggregation.
+1
include/net/netns/ipv4.h
··· 55 int current_rt_cache_rebuild_count; 56 57 atomic_t rt_genid; 58 59 #ifdef CONFIG_IP_MROUTE 60 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
··· 55 int current_rt_cache_rebuild_count; 56 57 atomic_t rt_genid; 58 + atomic_t dev_addr_genid; 59 60 #ifdef CONFIG_IP_MROUTE 61 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+3 -2
include/net/route.h
··· 207 208 struct in_ifaddr; 209 extern void fib_add_ifaddr(struct in_ifaddr *); 210 211 static inline void ip_rt_put(struct rtable * rt) 212 { ··· 270 struct flowi4 fl4 = { 271 .flowi4_oif = rt->rt_oif, 272 .flowi4_mark = rt->rt_mark, 273 - .daddr = rt->rt_key_dst, 274 - .saddr = rt->rt_key_src, 275 .flowi4_tos = rt->rt_tos, 276 .flowi4_proto = protocol, 277 .fl4_sport = sport,
··· 207 208 struct in_ifaddr; 209 extern void fib_add_ifaddr(struct in_ifaddr *); 210 + extern void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *); 211 212 static inline void ip_rt_put(struct rtable * rt) 213 { ··· 269 struct flowi4 fl4 = { 270 .flowi4_oif = rt->rt_oif, 271 .flowi4_mark = rt->rt_mark, 272 + .daddr = rt->rt_dst, 273 + .saddr = rt->rt_src, 274 .flowi4_tos = rt->rt_tos, 275 .flowi4_proto = protocol, 276 .fl4_sport = sport,
+4 -4
include/net/sch_generic.h
··· 25 enum qdisc_state_t { 26 __QDISC_STATE_SCHED, 27 __QDISC_STATE_DEACTIVATED, 28 }; 29 30 /* ··· 33 */ 34 enum qdisc___state_t { 35 __QDISC___STATE_RUNNING = 1, 36 - __QDISC___STATE_THROTTLED = 2, 37 }; 38 39 struct qdisc_size_table { ··· 106 107 static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) 108 { 109 - return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false; 110 } 111 112 static inline void qdisc_throttled(struct Qdisc *qdisc) 113 { 114 - qdisc->__state |= __QDISC___STATE_THROTTLED; 115 } 116 117 static inline void qdisc_unthrottled(struct Qdisc *qdisc) 118 { 119 - qdisc->__state &= ~__QDISC___STATE_THROTTLED; 120 } 121 122 struct Qdisc_class_ops {
··· 25 enum qdisc_state_t { 26 __QDISC_STATE_SCHED, 27 __QDISC_STATE_DEACTIVATED, 28 + __QDISC_STATE_THROTTLED, 29 }; 30 31 /* ··· 32 */ 33 enum qdisc___state_t { 34 __QDISC___STATE_RUNNING = 1, 35 }; 36 37 struct qdisc_size_table { ··· 106 107 static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) 108 { 109 + return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false; 110 } 111 112 static inline void qdisc_throttled(struct Qdisc *qdisc) 113 { 114 + set_bit(__QDISC_STATE_THROTTLED, &qdisc->state); 115 } 116 117 static inline void qdisc_unthrottled(struct Qdisc *qdisc) 118 { 119 + clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state); 120 } 121 122 struct Qdisc_class_ops {
+1 -1
net/bridge/br_multicast.c
··· 445 ip6h->payload_len = htons(8 + sizeof(*mldq)); 446 ip6h->nexthdr = IPPROTO_HOPOPTS; 447 ip6h->hop_limit = 1; 448 ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 449 &ip6h->saddr); 450 - ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 451 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 452 453 hopopt = (u8 *)(ip6h + 1);
··· 445 ip6h->payload_len = htons(8 + sizeof(*mldq)); 446 ip6h->nexthdr = IPPROTO_HOPOPTS; 447 ip6h->hop_limit = 1; 448 + ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 449 ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 450 &ip6h->saddr); 451 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 452 453 hopopt = (u8 *)(ip6h + 1);
+11 -8
net/core/dev.c
··· 1353 */ 1354 void dev_disable_lro(struct net_device *dev) 1355 { 1356 - if (dev->ethtool_ops && dev->ethtool_ops->get_flags && 1357 - dev->ethtool_ops->set_flags) { 1358 - u32 flags = dev->ethtool_ops->get_flags(dev); 1359 - if (flags & ETH_FLAG_LRO) { 1360 - flags &= ~ETH_FLAG_LRO; 1361 - dev->ethtool_ops->set_flags(dev, flags); 1362 - } 1363 - } 1364 WARN_ON(dev->features & NETIF_F_LRO); 1365 } 1366 EXPORT_SYMBOL(dev_disable_lro);
··· 1353 */ 1354 void dev_disable_lro(struct net_device *dev) 1355 { 1356 + u32 flags; 1357 + 1358 + if (dev->ethtool_ops && dev->ethtool_ops->get_flags) 1359 + flags = dev->ethtool_ops->get_flags(dev); 1360 + else 1361 + flags = ethtool_op_get_flags(dev); 1362 + 1363 + if (!(flags & ETH_FLAG_LRO)) 1364 + return; 1365 + 1366 + __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO); 1367 WARN_ON(dev->features & NETIF_F_LRO); 1368 } 1369 EXPORT_SYMBOL(dev_disable_lro);
+1 -1
net/core/ethtool.c
··· 513 } 514 } 515 516 - static int __ethtool_set_flags(struct net_device *dev, u32 data) 517 { 518 u32 changed; 519
··· 513 } 514 } 515 516 + int __ethtool_set_flags(struct net_device *dev, u32 data) 517 { 518 u32 changed; 519
+29 -1
net/ipv4/devinet.c
··· 64 #include <net/rtnetlink.h> 65 #include <net/net_namespace.h> 66 67 static struct ipv4_devconf ipv4_devconf = { 68 .data = { 69 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1, ··· 152 result = dev; 153 break; 154 } 155 } 156 if (result && devref) 157 dev_hold(result); ··· 361 } 362 } 363 364 /* 2. Unlink it */ 365 366 *ifap = ifa1->ifa_next; ··· 391 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1); 392 393 if (promote) { 394 395 if (prev_prom) { 396 prev_prom->ifa_next = promote->ifa_next; ··· 403 rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid); 404 blocking_notifier_call_chain(&inetaddr_chain, 405 NETDEV_UP, promote); 406 - for (ifa = promote->ifa_next; ifa; ifa = ifa->ifa_next) { 407 if (ifa1->ifa_mask != ifa->ifa_mask || 408 !inet_ifa_match(ifa1->ifa_address, ifa)) 409 continue;
··· 64 #include <net/rtnetlink.h> 65 #include <net/net_namespace.h> 66 67 + #include "fib_lookup.h" 68 + 69 static struct ipv4_devconf ipv4_devconf = { 70 .data = { 71 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1, ··· 150 result = dev; 151 break; 152 } 153 + } 154 + if (!result) { 155 + struct flowi4 fl4 = { .daddr = addr }; 156 + struct fib_result res = { 0 }; 157 + struct fib_table *local; 158 + 159 + /* Fallback to FIB local table so that communication 160 + * over loopback subnets work. 161 + */ 162 + local = fib_get_table(net, RT_TABLE_LOCAL); 163 + if (local && 164 + !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) && 165 + res.type == RTN_LOCAL) 166 + result = FIB_RES_DEV(res); 167 } 168 if (result && devref) 169 dev_hold(result); ··· 345 } 346 } 347 348 + /* On promotion all secondaries from subnet are changing 349 + * the primary IP, we must remove all their routes silently 350 + * and later to add them back with new prefsrc. Do this 351 + * while all addresses are on the device list. 352 + */ 353 + for (ifa = promote; ifa; ifa = ifa->ifa_next) { 354 + if (ifa1->ifa_mask == ifa->ifa_mask && 355 + inet_ifa_match(ifa1->ifa_address, ifa)) 356 + fib_del_ifaddr(ifa, ifa1); 357 + } 358 + 359 /* 2. Unlink it */ 360 361 *ifap = ifa1->ifa_next; ··· 364 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1); 365 366 if (promote) { 367 + struct in_ifaddr *next_sec = promote->ifa_next; 368 369 if (prev_prom) { 370 prev_prom->ifa_next = promote->ifa_next; ··· 375 rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid); 376 blocking_notifier_call_chain(&inetaddr_chain, 377 NETDEV_UP, promote); 378 + for (ifa = next_sec; ifa; ifa = ifa->ifa_next) { 379 if (ifa1->ifa_mask != ifa->ifa_mask || 380 !inet_ifa_match(ifa1->ifa_address, ifa)) 381 continue;
+95 -17
net/ipv4/fib_frontend.c
··· 228 if (res.type != RTN_LOCAL || !accept_local) 229 goto e_inval; 230 } 231 - *spec_dst = FIB_RES_PREFSRC(res); 232 fib_combine_itag(itag, &res); 233 dev_match = false; 234 ··· 258 ret = 0; 259 if (fib_lookup(net, &fl4, &res) == 0) { 260 if (res.type == RTN_UNICAST) { 261 - *spec_dst = FIB_RES_PREFSRC(res); 262 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; 263 } 264 } ··· 722 } 723 } 724 725 - static void fib_del_ifaddr(struct in_ifaddr *ifa) 726 { 727 struct in_device *in_dev = ifa->ifa_dev; 728 struct net_device *dev = in_dev->dev; 729 struct in_ifaddr *ifa1; 730 - struct in_ifaddr *prim = ifa; 731 __be32 brd = ifa->ifa_address | ~ifa->ifa_mask; 732 __be32 any = ifa->ifa_address & ifa->ifa_mask; 733 #define LOCAL_OK 1 ··· 740 #define BRD0_OK 4 741 #define BRD1_OK 8 742 unsigned ok = 0; 743 744 - if (!(ifa->ifa_flags & IFA_F_SECONDARY)) 745 - fib_magic(RTM_DELROUTE, 746 - dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, 747 - any, ifa->ifa_prefixlen, prim); 748 - else { 749 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); 750 if (prim == NULL) { 751 printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n"); 752 return; 753 } 754 } 755 756 /* Deletion is more complicated than add. ··· 769 */ 770 771 for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) { 772 if (ifa->ifa_local == ifa1->ifa_local) 773 ok |= LOCAL_OK; 774 if (ifa->ifa_broadcast == ifa1->ifa_broadcast) ··· 820 ok |= BRD1_OK; 821 if (any == ifa1->ifa_broadcast) 822 ok |= BRD0_OK; 823 } 824 825 if (!(ok & BRD_OK)) 826 fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim); 827 - if (!(ok & BRD1_OK)) 828 - fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim); 829 - if (!(ok & BRD0_OK)) 830 - fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim); 831 if (!(ok & LOCAL_OK)) { 832 fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim); 833 834 /* Check, that this local address finally disappeared. */ 835 - if (inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) { 836 /* And the last, but not the least thing. 837 * We must flush stray FIB entries. 838 * ··· 960 { 961 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 962 struct net_device *dev = ifa->ifa_dev->dev; 963 964 switch (event) { 965 case NETDEV_UP: ··· 968 #ifdef CONFIG_IP_ROUTE_MULTIPATH 969 fib_sync_up(dev); 970 #endif 971 - fib_update_nh_saddrs(dev); 972 rt_cache_flush(dev_net(dev), -1); 973 break; 974 case NETDEV_DOWN: 975 - fib_del_ifaddr(ifa); 976 - fib_update_nh_saddrs(dev); 977 if (ifa->ifa_dev->ifa_list == NULL) { 978 /* Last address was deleted from this interface. 979 * Disable IP. ··· 991 { 992 struct net_device *dev = ptr; 993 struct in_device *in_dev = __in_dev_get_rtnl(dev); 994 995 if (event == NETDEV_UNREGISTER) { 996 fib_disable_ip(dev, 2, -1); ··· 1009 #ifdef CONFIG_IP_ROUTE_MULTIPATH 1010 fib_sync_up(dev); 1011 #endif 1012 rt_cache_flush(dev_net(dev), -1); 1013 break; 1014 case NETDEV_DOWN:
··· 228 if (res.type != RTN_LOCAL || !accept_local) 229 goto e_inval; 230 } 231 + *spec_dst = FIB_RES_PREFSRC(net, res); 232 fib_combine_itag(itag, &res); 233 dev_match = false; 234 ··· 258 ret = 0; 259 if (fib_lookup(net, &fl4, &res) == 0) { 260 if (res.type == RTN_UNICAST) { 261 + *spec_dst = FIB_RES_PREFSRC(net, res); 262 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; 263 } 264 } ··· 722 } 723 } 724 725 + /* Delete primary or secondary address. 726 + * Optionally, on secondary address promotion consider the addresses 727 + * from subnet iprim as deleted, even if they are in device list. 728 + * In this case the secondary ifa can be in device list. 729 + */ 730 + void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim) 731 { 732 struct in_device *in_dev = ifa->ifa_dev; 733 struct net_device *dev = in_dev->dev; 734 struct in_ifaddr *ifa1; 735 + struct in_ifaddr *prim = ifa, *prim1 = NULL; 736 __be32 brd = ifa->ifa_address | ~ifa->ifa_mask; 737 __be32 any = ifa->ifa_address & ifa->ifa_mask; 738 #define LOCAL_OK 1 ··· 735 #define BRD0_OK 4 736 #define BRD1_OK 8 737 unsigned ok = 0; 738 + int subnet = 0; /* Primary network */ 739 + int gone = 1; /* Address is missing */ 740 + int same_prefsrc = 0; /* Another primary with same IP */ 741 742 + if (ifa->ifa_flags & IFA_F_SECONDARY) { 743 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); 744 if (prim == NULL) { 745 printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n"); 746 return; 747 } 748 + if (iprim && iprim != prim) { 749 + printk(KERN_WARNING "fib_del_ifaddr: bug: iprim != prim\n"); 750 + return; 751 + } 752 + } else if (!ipv4_is_zeronet(any) && 753 + (any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) { 754 + fib_magic(RTM_DELROUTE, 755 + dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST, 756 + any, ifa->ifa_prefixlen, prim); 757 + subnet = 1; 758 } 759 760 /* Deletion is more complicated than add. ··· 755 */ 756 757 for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) { 758 + if (ifa1 == ifa) { 759 + /* promotion, keep the IP */ 760 + gone = 0; 761 + continue; 762 + } 763 + /* Ignore IFAs from our subnet */ 764 + if (iprim && ifa1->ifa_mask == iprim->ifa_mask && 765 + inet_ifa_match(ifa1->ifa_address, iprim)) 766 + continue; 767 + 768 + /* Ignore ifa1 if it uses different primary IP (prefsrc) */ 769 + if (ifa1->ifa_flags & IFA_F_SECONDARY) { 770 + /* Another address from our subnet? */ 771 + if (ifa1->ifa_mask == prim->ifa_mask && 772 + inet_ifa_match(ifa1->ifa_address, prim)) 773 + prim1 = prim; 774 + else { 775 + /* We reached the secondaries, so 776 + * same_prefsrc should be determined. 777 + */ 778 + if (!same_prefsrc) 779 + continue; 780 + /* Search new prim1 if ifa1 is not 781 + * using the current prim1 782 + */ 783 + if (!prim1 || 784 + ifa1->ifa_mask != prim1->ifa_mask || 785 + !inet_ifa_match(ifa1->ifa_address, prim1)) 786 + prim1 = inet_ifa_byprefix(in_dev, 787 + ifa1->ifa_address, 788 + ifa1->ifa_mask); 789 + if (!prim1) 790 + continue; 791 + if (prim1->ifa_local != prim->ifa_local) 792 + continue; 793 + } 794 + } else { 795 + if (prim->ifa_local != ifa1->ifa_local) 796 + continue; 797 + prim1 = ifa1; 798 + if (prim != prim1) 799 + same_prefsrc = 1; 800 + } 801 if (ifa->ifa_local == ifa1->ifa_local) 802 ok |= LOCAL_OK; 803 if (ifa->ifa_broadcast == ifa1->ifa_broadcast) ··· 763 ok |= BRD1_OK; 764 if (any == ifa1->ifa_broadcast) 765 ok |= BRD0_OK; 766 + /* primary has network specific broadcasts */ 767 + if (prim1 == ifa1 && ifa1->ifa_prefixlen < 31) { 768 + __be32 brd1 = ifa1->ifa_address | ~ifa1->ifa_mask; 769 + __be32 any1 = ifa1->ifa_address & ifa1->ifa_mask; 770 + 771 + if (!ipv4_is_zeronet(any1)) { 772 + if (ifa->ifa_broadcast == brd1 || 773 + ifa->ifa_broadcast == any1) 774 + ok |= BRD_OK; 775 + if (brd == brd1 || brd == any1) 776 + ok |= BRD1_OK; 777 + if (any == brd1 || any == any1) 778 + ok |= BRD0_OK; 779 + } 780 + } 781 } 782 783 if (!(ok & BRD_OK)) 784 fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim); 785 + if (subnet && ifa->ifa_prefixlen < 31) { 786 + if (!(ok & BRD1_OK)) 787 + fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim); 788 + if (!(ok & BRD0_OK)) 789 + fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim); 790 + } 791 if (!(ok & LOCAL_OK)) { 792 fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim); 793 794 /* Check, that this local address finally disappeared. */ 795 + if (gone && 796 + inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) { 797 /* And the last, but not the least thing. 798 * We must flush stray FIB entries. 799 * ··· 885 { 886 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 887 struct net_device *dev = ifa->ifa_dev->dev; 888 + struct net *net = dev_net(dev); 889 890 switch (event) { 891 case NETDEV_UP: ··· 892 #ifdef CONFIG_IP_ROUTE_MULTIPATH 893 fib_sync_up(dev); 894 #endif 895 + atomic_inc(&net->ipv4.dev_addr_genid); 896 rt_cache_flush(dev_net(dev), -1); 897 break; 898 case NETDEV_DOWN: 899 + fib_del_ifaddr(ifa, NULL); 900 + atomic_inc(&net->ipv4.dev_addr_genid); 901 if (ifa->ifa_dev->ifa_list == NULL) { 902 /* Last address was deleted from this interface. 903 * Disable IP. ··· 915 { 916 struct net_device *dev = ptr; 917 struct in_device *in_dev = __in_dev_get_rtnl(dev); 918 + struct net *net = dev_net(dev); 919 920 if (event == NETDEV_UNREGISTER) { 921 fib_disable_ip(dev, 2, -1); ··· 932 #ifdef CONFIG_IP_ROUTE_MULTIPATH 933 fib_sync_up(dev); 934 #endif 935 + atomic_inc(&net->ipv4.dev_addr_genid); 936 rt_cache_flush(dev_net(dev), -1); 937 break; 938 case NETDEV_DOWN:
+1 -2
net/ipv4/fib_lookup.h
··· 10 struct fib_info *fa_info; 11 u8 fa_tos; 12 u8 fa_type; 13 - u8 fa_scope; 14 u8 fa_state; 15 struct rcu_head rcu; 16 }; ··· 28 extern struct fib_info *fib_create_info(struct fib_config *cfg); 29 extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi); 30 extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 31 - u32 tb_id, u8 type, u8 scope, __be32 dst, 32 int dst_len, u8 tos, struct fib_info *fi, 33 unsigned int); 34 extern void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
··· 10 struct fib_info *fa_info; 11 u8 fa_tos; 12 u8 fa_type; 13 u8 fa_state; 14 struct rcu_head rcu; 15 }; ··· 29 extern struct fib_info *fib_create_info(struct fib_config *cfg); 30 extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi); 31 extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 32 + u32 tb_id, u8 type, __be32 dst, 33 int dst_len, u8 tos, struct fib_info *fi, 34 unsigned int); 35 extern void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
+19 -28
net/ipv4/fib_semantics.c
··· 222 unsigned int mask = (fib_info_hash_size - 1); 223 unsigned int val = fi->fib_nhs; 224 225 - val ^= fi->fib_protocol; 226 val ^= (__force u32)fi->fib_prefsrc; 227 val ^= fi->fib_priority; 228 for_nexthops(fi) { ··· 248 if (fi->fib_nhs != nfi->fib_nhs) 249 continue; 250 if (nfi->fib_protocol == fi->fib_protocol && 251 nfi->fib_prefsrc == fi->fib_prefsrc && 252 nfi->fib_priority == fi->fib_priority && 253 memcmp(nfi->fib_metrics, fi->fib_metrics, 254 - sizeof(fi->fib_metrics)) == 0 && 255 ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && 256 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) 257 return fi; ··· 329 goto errout; 330 331 err = fib_dump_info(skb, info->pid, seq, event, tb_id, 332 - fa->fa_type, fa->fa_scope, key, dst_len, 333 fa->fa_tos, fa->fa_info, nlm_flags); 334 if (err < 0) { 335 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ ··· 696 fib_info_hash_free(old_laddrhash, bytes); 697 } 698 699 struct fib_info *fib_create_info(struct fib_config *cfg) 700 { 701 int err; ··· 764 765 fi->fib_net = hold_net(net); 766 fi->fib_protocol = cfg->fc_protocol; 767 fi->fib_flags = cfg->fc_flags; 768 fi->fib_priority = cfg->fc_priority; 769 fi->fib_prefsrc = cfg->fc_prefsrc; ··· 866 } 867 868 change_nexthops(fi) { 869 - nexthop_nh->nh_cfg_scope = cfg->fc_scope; 870 - nexthop_nh->nh_saddr = inet_select_addr(nexthop_nh->nh_dev, 871 - nexthop_nh->nh_gw, 872 - nexthop_nh->nh_cfg_scope); 873 } endfor_nexthops(fi) 874 875 link_it: ··· 915 } 916 917 int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 918 - u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos, 919 struct fib_info *fi, unsigned int flags) 920 { 921 struct nlmsghdr *nlh; ··· 937 NLA_PUT_U32(skb, RTA_TABLE, tb_id); 938 rtm->rtm_type = type; 939 rtm->rtm_flags = fi->fib_flags; 940 - rtm->rtm_scope = scope; 941 rtm->rtm_protocol = fi->fib_protocol; 942 943 if (rtm->rtm_dst_len) ··· 1093 list_for_each_entry_rcu(fa, fa_head, fa_list) { 1094 struct fib_info *next_fi = fa->fa_info; 1095 1096 - if (fa->fa_scope != res->scope || 1097 fa->fa_type != RTN_UNICAST) 1098 continue; 1099 ··· 1135 tb->tb_default = last_idx; 1136 out: 1137 return; 1138 - } 1139 - 1140 - void fib_update_nh_saddrs(struct net_device *dev) 1141 - { 1142 - struct hlist_head *head; 1143 - struct hlist_node *node; 1144 - struct fib_nh *nh; 1145 - unsigned int hash; 1146 - 1147 - hash = fib_devindex_hashfn(dev->ifindex); 1148 - head = &fib_info_devhash[hash]; 1149 - hlist_for_each_entry(nh, node, head, nh_hash) { 1150 - if (nh->nh_dev != dev) 1151 - continue; 1152 - nh->nh_saddr = inet_select_addr(nh->nh_dev, 1153 - nh->nh_gw, 1154 - nh->nh_cfg_scope); 1155 - } 1156 } 1157 1158 #ifdef CONFIG_IP_ROUTE_MULTIPATH
··· 222 unsigned int mask = (fib_info_hash_size - 1); 223 unsigned int val = fi->fib_nhs; 224 225 + val ^= (fi->fib_protocol << 8) | fi->fib_scope; 226 val ^= (__force u32)fi->fib_prefsrc; 227 val ^= fi->fib_priority; 228 for_nexthops(fi) { ··· 248 if (fi->fib_nhs != nfi->fib_nhs) 249 continue; 250 if (nfi->fib_protocol == fi->fib_protocol && 251 + nfi->fib_scope == fi->fib_scope && 252 nfi->fib_prefsrc == fi->fib_prefsrc && 253 nfi->fib_priority == fi->fib_priority && 254 memcmp(nfi->fib_metrics, fi->fib_metrics, 255 + sizeof(u32) * RTAX_MAX) == 0 && 256 ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 && 257 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0)) 258 return fi; ··· 328 goto errout; 329 330 err = fib_dump_info(skb, info->pid, seq, event, tb_id, 331 + fa->fa_type, key, dst_len, 332 fa->fa_tos, fa->fa_info, nlm_flags); 333 if (err < 0) { 334 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ ··· 695 fib_info_hash_free(old_laddrhash, bytes); 696 } 697 698 + __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) 699 + { 700 + nh->nh_saddr = inet_select_addr(nh->nh_dev, 701 + nh->nh_gw, 702 + nh->nh_parent->fib_scope); 703 + nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid); 704 + 705 + return nh->nh_saddr; 706 + } 707 + 708 struct fib_info *fib_create_info(struct fib_config *cfg) 709 { 710 int err; ··· 753 754 fi->fib_net = hold_net(net); 755 fi->fib_protocol = cfg->fc_protocol; 756 + fi->fib_scope = cfg->fc_scope; 757 fi->fib_flags = cfg->fc_flags; 758 fi->fib_priority = cfg->fc_priority; 759 fi->fib_prefsrc = cfg->fc_prefsrc; ··· 854 } 855 856 change_nexthops(fi) { 857 + fib_info_update_nh_saddr(net, nexthop_nh); 858 } endfor_nexthops(fi) 859 860 link_it: ··· 906 } 907 908 int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 909 + u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, 910 struct fib_info *fi, unsigned int flags) 911 { 912 struct nlmsghdr *nlh; ··· 928 NLA_PUT_U32(skb, RTA_TABLE, tb_id); 929 rtm->rtm_type = type; 930 rtm->rtm_flags = fi->fib_flags; 931 + rtm->rtm_scope = fi->fib_scope; 932 rtm->rtm_protocol = fi->fib_protocol; 933 934 if (rtm->rtm_dst_len) ··· 1084 list_for_each_entry_rcu(fa, fa_head, fa_list) { 1085 struct fib_info *next_fi = fa->fa_info; 1086 1087 + if (next_fi->fib_scope != res->scope || 1088 fa->fa_type != RTN_UNICAST) 1089 continue; 1090 ··· 1126 tb->tb_default = last_idx; 1127 out: 1128 return; 1129 } 1130 1131 #ifdef CONFIG_IP_ROUTE_MULTIPATH
+6 -8
net/ipv4/fib_trie.c
··· 1245 if (fa->fa_info->fib_priority != fi->fib_priority) 1246 break; 1247 if (fa->fa_type == cfg->fc_type && 1248 - fa->fa_scope == cfg->fc_scope && 1249 fa->fa_info == fi) { 1250 fa_match = fa; 1251 break; ··· 1270 new_fa->fa_tos = fa->fa_tos; 1271 new_fa->fa_info = fi; 1272 new_fa->fa_type = cfg->fc_type; 1273 - new_fa->fa_scope = cfg->fc_scope; 1274 state = fa->fa_state; 1275 new_fa->fa_state = state & ~FA_S_ACCESSED; 1276 ··· 1306 new_fa->fa_info = fi; 1307 new_fa->fa_tos = tos; 1308 new_fa->fa_type = cfg->fc_type; 1309 - new_fa->fa_scope = cfg->fc_scope; 1310 new_fa->fa_state = 0; 1311 /* 1312 * Insert new entry to the list. ··· 1359 1360 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) 1361 continue; 1362 - if (fa->fa_scope < flp->flowi4_scope) 1363 continue; 1364 fib_alias_accessed(fa); 1365 err = fib_props[fa->fa_type].error; ··· 1385 res->prefixlen = plen; 1386 res->nh_sel = nhsel; 1387 res->type = fa->fa_type; 1388 - res->scope = fa->fa_scope; 1389 res->fi = fi; 1390 res->table = tb; 1391 res->fa_head = &li->falh; ··· 1661 1662 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) && 1663 (cfg->fc_scope == RT_SCOPE_NOWHERE || 1664 - fa->fa_scope == cfg->fc_scope) && 1665 (!cfg->fc_protocol || 1666 fi->fib_protocol == cfg->fc_protocol) && 1667 fib_nh_match(cfg, fi) == 0) { ··· 1860 RTM_NEWROUTE, 1861 tb->tb_id, 1862 fa->fa_type, 1863 - fa->fa_scope, 1864 xkey, 1865 plen, 1866 fa->fa_tos, ··· 2380 seq_indent(seq, iter->depth+1); 2381 seq_printf(seq, " /%d %s %s", li->plen, 2382 rtn_scope(buf1, sizeof(buf1), 2383 - fa->fa_scope), 2384 rtn_type(buf2, sizeof(buf2), 2385 fa->fa_type)); 2386 if (fa->fa_tos)
··· 1245 if (fa->fa_info->fib_priority != fi->fib_priority) 1246 break; 1247 if (fa->fa_type == cfg->fc_type && 1248 fa->fa_info == fi) { 1249 fa_match = fa; 1250 break; ··· 1271 new_fa->fa_tos = fa->fa_tos; 1272 new_fa->fa_info = fi; 1273 new_fa->fa_type = cfg->fc_type; 1274 state = fa->fa_state; 1275 new_fa->fa_state = state & ~FA_S_ACCESSED; 1276 ··· 1308 new_fa->fa_info = fi; 1309 new_fa->fa_tos = tos; 1310 new_fa->fa_type = cfg->fc_type; 1311 new_fa->fa_state = 0; 1312 /* 1313 * Insert new entry to the list. ··· 1362 1363 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) 1364 continue; 1365 + if (fa->fa_info->fib_scope < flp->flowi4_scope) 1366 continue; 1367 fib_alias_accessed(fa); 1368 err = fib_props[fa->fa_type].error; ··· 1388 res->prefixlen = plen; 1389 res->nh_sel = nhsel; 1390 res->type = fa->fa_type; 1391 + res->scope = fa->fa_info->fib_scope; 1392 res->fi = fi; 1393 res->table = tb; 1394 res->fa_head = &li->falh; ··· 1664 1665 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) && 1666 (cfg->fc_scope == RT_SCOPE_NOWHERE || 1667 + fa->fa_info->fib_scope == cfg->fc_scope) && 1668 + (!cfg->fc_prefsrc || 1669 + fi->fib_prefsrc == cfg->fc_prefsrc) && 1670 (!cfg->fc_protocol || 1671 fi->fib_protocol == cfg->fc_protocol) && 1672 fib_nh_match(cfg, fi) == 0) { ··· 1861 RTM_NEWROUTE, 1862 tb->tb_id, 1863 fa->fa_type, 1864 xkey, 1865 plen, 1866 fa->fa_tos, ··· 2382 seq_indent(seq, iter->depth+1); 2383 seq_printf(seq, " /%d %s %s", li->plen, 2384 rtn_scope(buf1, sizeof(buf1), 2385 + fa->fa_info->fib_scope), 2386 rtn_type(buf2, sizeof(buf2), 2387 fa->fa_type)); 2388 if (fa->fa_tos)
+4 -4
net/ipv4/route.c
··· 1593 rt->rt_peer_genid = rt_peer_genid(); 1594 } 1595 check_peer_pmtu(dst, peer); 1596 - 1597 - inet_putpeer(peer); 1598 } 1599 } 1600 ··· 1718 1719 rcu_read_lock(); 1720 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0) 1721 - src = FIB_RES_PREFSRC(res); 1722 else 1723 src = inet_select_addr(rt->dst.dev, rt->rt_gateway, 1724 RT_SCOPE_UNIVERSE); ··· 2615 fib_select_default(&res); 2616 2617 if (!fl4.saddr) 2618 - fl4.saddr = FIB_RES_PREFSRC(res); 2619 2620 dev_out = FIB_RES_DEV(res); 2621 fl4.flowi4_oif = dev_out->ifindex; ··· 3219 { 3220 get_random_bytes(&net->ipv4.rt_genid, 3221 sizeof(net->ipv4.rt_genid)); 3222 return 0; 3223 } 3224
··· 1593 rt->rt_peer_genid = rt_peer_genid(); 1594 } 1595 check_peer_pmtu(dst, peer); 1596 } 1597 } 1598 ··· 1720 1721 rcu_read_lock(); 1722 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0) 1723 + src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res); 1724 else 1725 src = inet_select_addr(rt->dst.dev, rt->rt_gateway, 1726 RT_SCOPE_UNIVERSE); ··· 2617 fib_select_default(&res); 2618 2619 if (!fl4.saddr) 2620 + fl4.saddr = FIB_RES_PREFSRC(net, res); 2621 2622 dev_out = FIB_RES_DEV(res); 2623 fl4.flowi4_oif = dev_out->ifindex; ··· 3221 { 3222 get_random_bytes(&net->ipv4.rt_genid, 3223 sizeof(net->ipv4.rt_genid)); 3224 + get_random_bytes(&net->ipv4.dev_addr_genid, 3225 + sizeof(net->ipv4.dev_addr_genid)); 3226 return 0; 3227 } 3228
+12 -10
net/ipv4/tcp_input.c
··· 2659 #define DBGUNDO(x...) do { } while (0) 2660 #endif 2661 2662 - static void tcp_undo_cwr(struct sock *sk, const int undo) 2663 { 2664 struct tcp_sock *tp = tcp_sk(sk); 2665 ··· 2671 else 2672 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); 2673 2674 - if (undo && tp->prior_ssthresh > tp->snd_ssthresh) { 2675 tp->snd_ssthresh = tp->prior_ssthresh; 2676 TCP_ECN_withdraw_cwr(tp); 2677 } 2678 } else { 2679 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); 2680 } 2681 - tcp_moderate_cwnd(tp); 2682 tp->snd_cwnd_stamp = tcp_time_stamp; 2683 } 2684 ··· 2698 * or our original transmission succeeded. 2699 */ 2700 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2701 - tcp_undo_cwr(sk, 1); 2702 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2703 mib_idx = LINUX_MIB_TCPLOSSUNDO; 2704 else ··· 2725 2726 if (tp->undo_marker && !tp->undo_retrans) { 2727 DBGUNDO(sk, "D-SACK"); 2728 - tcp_undo_cwr(sk, 1); 2729 tp->undo_marker = 0; 2730 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); 2731 } ··· 2778 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); 2779 2780 DBGUNDO(sk, "Hoe"); 2781 - tcp_undo_cwr(sk, 0); 2782 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); 2783 2784 /* So... Do not make Hoe's retransmit yet. ··· 2807 2808 DBGUNDO(sk, "partial loss"); 2809 tp->lost_out = 0; 2810 - tcp_undo_cwr(sk, 1); 2811 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); 2812 inet_csk(sk)->icsk_retransmits = 0; 2813 tp->undo_marker = 0; ··· 2821 static inline void tcp_complete_cwr(struct sock *sk) 2822 { 2823 struct tcp_sock *tp = tcp_sk(sk); 2824 - tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2825 - tp->snd_cwnd_stamp = tcp_time_stamp; 2826 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2827 } 2828 ··· 3496 if (flag & FLAG_ECE) 3497 tcp_ratehalving_spur_to_response(sk); 3498 else 3499 - tcp_undo_cwr(sk, 1); 3500 } 3501 3502 /* F-RTO spurious RTO detection algorithm (RFC4138)
··· 2659 #define DBGUNDO(x...) do { } while (0) 2660 #endif 2661 2662 + static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh) 2663 { 2664 struct tcp_sock *tp = tcp_sk(sk); 2665 ··· 2671 else 2672 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); 2673 2674 + if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) { 2675 tp->snd_ssthresh = tp->prior_ssthresh; 2676 TCP_ECN_withdraw_cwr(tp); 2677 } 2678 } else { 2679 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); 2680 } 2681 tp->snd_cwnd_stamp = tcp_time_stamp; 2682 } 2683 ··· 2699 * or our original transmission succeeded. 2700 */ 2701 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2702 + tcp_undo_cwr(sk, true); 2703 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2704 mib_idx = LINUX_MIB_TCPLOSSUNDO; 2705 else ··· 2726 2727 if (tp->undo_marker && !tp->undo_retrans) { 2728 DBGUNDO(sk, "D-SACK"); 2729 + tcp_undo_cwr(sk, true); 2730 tp->undo_marker = 0; 2731 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); 2732 } ··· 2779 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); 2780 2781 DBGUNDO(sk, "Hoe"); 2782 + tcp_undo_cwr(sk, false); 2783 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); 2784 2785 /* So... Do not make Hoe's retransmit yet. ··· 2808 2809 DBGUNDO(sk, "partial loss"); 2810 tp->lost_out = 0; 2811 + tcp_undo_cwr(sk, true); 2812 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); 2813 inet_csk(sk)->icsk_retransmits = 0; 2814 tp->undo_marker = 0; ··· 2822 static inline void tcp_complete_cwr(struct sock *sk) 2823 { 2824 struct tcp_sock *tp = tcp_sk(sk); 2825 + /* Do not moderate cwnd if it's already undone in cwr or recovery */ 2826 + if (tp->undo_marker && tp->snd_cwnd > tp->snd_ssthresh) { 2827 + tp->snd_cwnd = tp->snd_ssthresh; 2828 + tp->snd_cwnd_stamp = tcp_time_stamp; 2829 + } 2830 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2831 } 2832 ··· 3494 if (flag & FLAG_ECE) 3495 tcp_ratehalving_spur_to_response(sk); 3496 else 3497 + tcp_undo_cwr(sk, true); 3498 } 3499 3500 /* F-RTO spurious RTO detection algorithm (RFC4138)
+1 -1
net/ipv6/route.c
··· 854 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); 855 } 856 857 - struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, 858 struct flowi6 *fl6) 859 { 860 int flags = 0;
··· 854 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); 855 } 856 857 + struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk, 858 struct flowi6 *fl6) 859 { 860 int flags = 0;
+1
net/mac80211/sta_info.c
··· 243 memcpy(sta->sta.addr, addr, ETH_ALEN); 244 sta->local = local; 245 sta->sdata = sdata; 246 247 ewma_init(&sta->avg_signal, 1024, 8); 248
··· 243 memcpy(sta->sta.addr, addr, ETH_ALEN); 244 sta->local = local; 245 sta->sdata = sdata; 246 + sta->last_rx = jiffies; 247 248 ewma_init(&sta->avg_signal, 1024, 8); 249