Merge branches 'misc', 'mlx4', 'mthca', 'nes' and 'srp' into for-next

+297 -60
+7 -5
drivers/infiniband/hw/mlx4/main.c
··· 623 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 624 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 625 626 - err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags & 627 - MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)); 628 if (err) 629 return err; 630 ··· 636 return 0; 637 638 err_add: 639 - mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw); 640 return err; 641 } 642 ··· 666 struct mlx4_ib_gid_entry *ge; 667 668 err = mlx4_multicast_detach(mdev->dev, 669 - &mqp->mqp, gid->raw); 670 if (err) 671 return err; 672 ··· 1006 if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) 1007 goto err_pd; 1008 1009 - ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1010 if (!ibdev->uar_map) 1011 goto err_uar; 1012 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
··· 623 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 624 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 625 626 + err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 627 + !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 628 + MLX4_PROTOCOL_IB); 629 if (err) 630 return err; 631 ··· 635 return 0; 636 637 err_add: 638 + mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); 639 return err; 640 } 641 ··· 665 struct mlx4_ib_gid_entry *ge; 666 667 err = mlx4_multicast_detach(mdev->dev, 668 + &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); 669 if (err) 670 return err; 671 ··· 1005 if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) 1006 goto err_pd; 1007 1008 + ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, 1009 + PAGE_SIZE); 1010 if (!ibdev->uar_map) 1011 goto err_uar; 1012 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
+3 -2
drivers/infiniband/hw/mthca/mthca_catas.c
··· 146 147 void mthca_start_catas_poll(struct mthca_dev *dev) 148 { 149 - unsigned long addr; 150 151 init_timer(&dev->catas_err.timer); 152 dev->catas_err.map = NULL; ··· 158 dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4); 159 if (!dev->catas_err.map) { 160 mthca_warn(dev, "couldn't map catastrophic error region " 161 - "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4); 162 return; 163 } 164
··· 146 147 void mthca_start_catas_poll(struct mthca_dev *dev) 148 { 149 + phys_addr_t addr; 150 151 init_timer(&dev->catas_err.timer); 152 dev->catas_err.map = NULL; ··· 158 dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4); 159 if (!dev->catas_err.map) { 160 mthca_warn(dev, "couldn't map catastrophic error region " 161 + "at 0x%llx/0x%x\n", (unsigned long long) addr, 162 + dev->catas_err.size * 4); 163 return; 164 } 165
+1 -1
drivers/infiniband/hw/mthca/mthca_cmd.c
··· 713 714 static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base) 715 { 716 - unsigned long addr; 717 u16 max_off = 0; 718 int i; 719
··· 713 714 static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base) 715 { 716 + phys_addr_t addr; 717 u16 max_off = 0; 718 int i; 719
+1 -1
drivers/infiniband/hw/mthca/mthca_eq.c
··· 653 unsigned long offset, unsigned long size, 654 void __iomem **map) 655 { 656 - unsigned long base = pci_resource_start(dev->pdev, 0); 657 658 *map = ioremap(base + offset, size); 659 if (!*map)
··· 653 unsigned long offset, unsigned long size, 654 void __iomem **map) 655 { 656 + phys_addr_t base = pci_resource_start(dev->pdev, 0); 657 658 *map = ioremap(base + offset, size); 659 if (!*map)
+1 -1
drivers/infiniband/hw/mthca/mthca_main.c
··· 790 goto err_uar_table_free; 791 } 792 793 - dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 794 if (!dev->kar) { 795 mthca_err(dev, "Couldn't map kernel access region, " 796 "aborting.\n");
··· 790 goto err_uar_table_free; 791 } 792 793 + dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 794 if (!dev->kar) { 795 mthca_err(dev, "Couldn't map kernel access region, " 796 "aborting.\n");
+1 -1
drivers/infiniband/hw/mthca/mthca_mr.c
··· 853 854 int mthca_init_mr_table(struct mthca_dev *dev) 855 { 856 - unsigned long addr; 857 int mpts, mtts, err, i; 858 859 err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
··· 853 854 int mthca_init_mr_table(struct mthca_dev *dev) 855 { 856 + phys_addr_t addr; 857 int mpts, mtts, err, i; 858 859 err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
+30 -5
drivers/infiniband/hw/nes/nes.c
··· 144 struct nes_device *nesdev; 145 struct net_device *netdev; 146 struct nes_vnic *nesvnic; 147 148 nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %pI4, netmask %pI4.\n", 149 &ifa->ifa_address, &ifa->ifa_mask); ··· 153 nesdev, nesdev->netdev[0]->name); 154 netdev = nesdev->netdev[0]; 155 nesvnic = netdev_priv(netdev); 156 - if (netdev == event_netdev) { 157 if (nesvnic->rdma_enabled == 0) { 158 nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since" 159 " RDMA is not enabled.\n", ··· 171 nes_manage_arp_cache(netdev, netdev->dev_addr, 172 ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE); 173 nesvnic->local_ipaddr = 0; 174 - return NOTIFY_OK; 175 break; 176 case NETDEV_UP: 177 nes_debug(NES_DBG_NETDEV, "event:UP\n"); ··· 183 nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n"); 184 return NOTIFY_OK; 185 } 186 /* Add the address to the IP table */ 187 - nesvnic->local_ipaddr = ifa->ifa_address; 188 189 nes_write_indexed(nesdev, 190 NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), 191 - ntohl(ifa->ifa_address)); 192 nes_manage_arp_cache(netdev, netdev->dev_addr, 193 ntohl(nesvnic->local_ipaddr), NES_ARP_ADD); 194 - return NOTIFY_OK; 195 break; 196 default: 197 break; ··· 674 } 675 nes_notifiers_registered++; 676 677 /* Initialize network devices */ 678 if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) 679 goto bail7; ··· 758 struct nes_device *nesdev = pci_get_drvdata(pcidev); 759 struct net_device *netdev; 760 int netdev_index = 0; 761 762 if (nesdev->netdev_count) { 763 netdev = nesdev->netdev[netdev_index]; ··· 784 785 free_irq(pcidev->irq, nesdev); 786 tasklet_kill(&nesdev->dpc_tasklet); 787 788 /* Deallocate the Adapter Structure */ 789 nes_destroy_adapter(nesdev->nesadapter);
··· 144 struct nes_device *nesdev; 145 struct net_device *netdev; 146 struct nes_vnic *nesvnic; 147 + unsigned int is_bonded; 148 149 nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %pI4, netmask %pI4.\n", 150 &ifa->ifa_address, &ifa->ifa_mask); ··· 152 nesdev, nesdev->netdev[0]->name); 153 netdev = nesdev->netdev[0]; 154 nesvnic = netdev_priv(netdev); 155 + is_bonded = (netdev->master == event_netdev); 156 + if ((netdev == event_netdev) || is_bonded) { 157 if (nesvnic->rdma_enabled == 0) { 158 nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since" 159 " RDMA is not enabled.\n", ··· 169 nes_manage_arp_cache(netdev, netdev->dev_addr, 170 ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE); 171 nesvnic->local_ipaddr = 0; 172 + if (is_bonded) 173 + continue; 174 + else 175 + return NOTIFY_OK; 176 break; 177 case NETDEV_UP: 178 nes_debug(NES_DBG_NETDEV, "event:UP\n"); ··· 178 nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n"); 179 return NOTIFY_OK; 180 } 181 + /* fall through */ 182 + case NETDEV_CHANGEADDR: 183 /* Add the address to the IP table */ 184 + if (netdev->master) 185 + nesvnic->local_ipaddr = 186 + ((struct in_device *)netdev->master->ip_ptr)->ifa_list->ifa_address; 187 + else 188 + nesvnic->local_ipaddr = ifa->ifa_address; 189 190 nes_write_indexed(nesdev, 191 NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), 192 + ntohl(nesvnic->local_ipaddr)); 193 nes_manage_arp_cache(netdev, netdev->dev_addr, 194 ntohl(nesvnic->local_ipaddr), NES_ARP_ADD); 195 + if (is_bonded) 196 + continue; 197 + else 198 + return NOTIFY_OK; 199 break; 200 default: 201 break; ··· 660 } 661 nes_notifiers_registered++; 662 663 + INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); 664 + 665 /* Initialize network devices */ 666 if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) 667 goto bail7; ··· 742 struct nes_device *nesdev = pci_get_drvdata(pcidev); 743 struct net_device *netdev; 744 int netdev_index = 0; 745 + unsigned long flags; 746 747 if (nesdev->netdev_count) { 748 netdev = nesdev->netdev[netdev_index]; ··· 767 768 free_irq(pcidev->irq, nesdev); 769 tasklet_kill(&nesdev->dpc_tasklet); 770 + 771 + spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); 772 + if (nesdev->link_recheck) { 773 + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); 774 + cancel_delayed_work_sync(&nesdev->work); 775 + } else { 776 + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); 777 + } 778 779 /* Deallocate the Adapter Structure */ 780 nes_destroy_adapter(nesdev->nesadapter);
+4
drivers/infiniband/hw/nes/nes.h
··· 268 u8 napi_isr_ran; 269 u8 disable_rx_flow_control; 270 u8 disable_tx_flow_control; 271 }; 272 273 ··· 510 void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *); 511 int nes_destroy_cqp(struct nes_device *); 512 int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); 513 514 /* nes_nic.c */ 515 struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
··· 268 u8 napi_isr_ran; 269 u8 disable_rx_flow_control; 270 u8 disable_tx_flow_control; 271 + 272 + struct delayed_work work; 273 + u8 link_recheck; 274 }; 275 276 ··· 507 void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *); 508 int nes_destroy_cqp(struct nes_device *); 509 int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); 510 + void nes_recheck_link_status(struct work_struct *work); 511 512 /* nes_nic.c */ 513 struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
+7 -1
drivers/infiniband/hw/nes/nes_cm.c
··· 1107 struct flowi fl; 1108 struct neighbour *neigh; 1109 int rc = arpindex; 1110 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; 1111 1112 memset(&fl, 0, sizeof fl); ··· 1118 return rc; 1119 } 1120 1121 - neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, nesvnic->netdev); 1122 if (neigh) { 1123 if (neigh->nud_state & NUD_VALID) { 1124 nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
··· 1107 struct flowi fl; 1108 struct neighbour *neigh; 1109 int rc = arpindex; 1110 + struct net_device *netdev; 1111 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; 1112 1113 memset(&fl, 0, sizeof fl); ··· 1117 return rc; 1118 } 1119 1120 + if (nesvnic->netdev->master) 1121 + netdev = nesvnic->netdev->master; 1122 + else 1123 + netdev = nesvnic->netdev; 1124 + 1125 + neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev); 1126 if (neigh) { 1127 if (neigh->nud_state & NUD_VALID) { 1128 nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
+95
drivers/infiniband/hw/nes/nes_hw.c
··· 2608 netif_start_queue(nesvnic->netdev); 2609 nesvnic->linkup = 1; 2610 netif_carrier_on(nesvnic->netdev); 2611 } 2612 } 2613 } else { ··· 2640 netif_stop_queue(nesvnic->netdev); 2641 nesvnic->linkup = 0; 2642 netif_carrier_off(nesvnic->netdev); 2643 } 2644 } 2645 } 2646 } 2647 ··· 2664 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE; 2665 } 2666 2667 2668 2669 static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
··· 2608 netif_start_queue(nesvnic->netdev); 2609 nesvnic->linkup = 1; 2610 netif_carrier_on(nesvnic->netdev); 2611 + 2612 + spin_lock(&nesvnic->port_ibevent_lock); 2613 + if (nesdev->iw_status == 0) { 2614 + nesdev->iw_status = 1; 2615 + nes_port_ibevent(nesvnic); 2616 + } 2617 + spin_unlock(&nesvnic->port_ibevent_lock); 2618 } 2619 } 2620 } else { ··· 2633 netif_stop_queue(nesvnic->netdev); 2634 nesvnic->linkup = 0; 2635 netif_carrier_off(nesvnic->netdev); 2636 + 2637 + spin_lock(&nesvnic->port_ibevent_lock); 2638 + if (nesdev->iw_status == 1) { 2639 + nesdev->iw_status = 0; 2640 + nes_port_ibevent(nesvnic); 2641 + } 2642 + spin_unlock(&nesvnic->port_ibevent_lock); 2643 } 2644 } 2645 + } 2646 + if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) { 2647 + if (nesdev->link_recheck) 2648 + cancel_delayed_work(&nesdev->work); 2649 + nesdev->link_recheck = 1; 2650 + schedule_delayed_work(&nesdev->work, 2651 + NES_LINK_RECHECK_DELAY); 2652 } 2653 } 2654 ··· 2643 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE; 2644 } 2645 2646 + void nes_recheck_link_status(struct work_struct *work) 2647 + { 2648 + unsigned long flags; 2649 + struct nes_device *nesdev = container_of(work, struct nes_device, work.work); 2650 + struct nes_adapter *nesadapter = nesdev->nesadapter; 2651 + struct nes_vnic *nesvnic; 2652 + u32 mac_index = nesdev->mac_index; 2653 + u16 phy_data; 2654 + u16 temp_phy_data; 2655 + 2656 + spin_lock_irqsave(&nesadapter->phy_lock, flags); 2657 + 2658 + /* check link status */ 2659 + nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003); 2660 + temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 2661 + 2662 + nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); 2663 + nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 2664 + nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); 2665 + phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 2666 + 2667 + phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0; 2668 + 2669 + nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", 2670 + __func__, phy_data, 2671 + nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP"); 2672 + 2673 + if (phy_data & 0x0004) { 2674 + nesadapter->mac_link_down[mac_index] = 0; 2675 + list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { 2676 + if (nesvnic->linkup == 0) { 2677 + printk(PFX "The Link is now up for port %s, netdev %p.\n", 2678 + nesvnic->netdev->name, nesvnic->netdev); 2679 + if (netif_queue_stopped(nesvnic->netdev)) 2680 + netif_start_queue(nesvnic->netdev); 2681 + nesvnic->linkup = 1; 2682 + netif_carrier_on(nesvnic->netdev); 2683 + 2684 + spin_lock(&nesvnic->port_ibevent_lock); 2685 + if (nesdev->iw_status == 0) { 2686 + nesdev->iw_status = 1; 2687 + nes_port_ibevent(nesvnic); 2688 + } 2689 + spin_unlock(&nesvnic->port_ibevent_lock); 2690 + } 2691 + } 2692 + 2693 + } else { 2694 + nesadapter->mac_link_down[mac_index] = 1; 2695 + list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { 2696 + if (nesvnic->linkup == 1) { 2697 + printk(PFX "The Link is now down for port %s, netdev %p.\n", 2698 + nesvnic->netdev->name, nesvnic->netdev); 2699 + if (!(netif_queue_stopped(nesvnic->netdev))) 2700 + netif_stop_queue(nesvnic->netdev); 2701 + nesvnic->linkup = 0; 2702 + netif_carrier_off(nesvnic->netdev); 2703 + 2704 + spin_lock(&nesvnic->port_ibevent_lock); 2705 + if (nesdev->iw_status == 1) { 2706 + nesdev->iw_status = 0; 2707 + nes_port_ibevent(nesvnic); 2708 + } 2709 + spin_unlock(&nesvnic->port_ibevent_lock); 2710 + } 2711 + } 2712 + } 2713 + if (nesdev->link_recheck++ < NES_LINK_RECHECK_MAX) 2714 + schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY); 2715 + else 2716 + nesdev->link_recheck = 0; 2717 + 2718 + spin_unlock_irqrestore(&nesadapter->phy_lock, flags); 2719 + } 2720 2721 2722 static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
+10
drivers/infiniband/hw/nes/nes_hw.h
··· 1193 1194 struct nes_ib_device; 1195 1196 struct nes_vnic { 1197 struct nes_ib_device *nesibdev; 1198 u64 sq_full; ··· 1249 u32 lro_max_aggr; 1250 struct net_lro_mgr lro_mgr; 1251 struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS]; 1252 }; 1253 1254 struct nes_ib_device { ··· 1353 #define RDMA_READ_REQ_OPCODE 1 1354 #define BAD_FRAME_OFFSET 64 1355 #define CQE_MAJOR_DRV 0x8000 1356 1357 #define nes_vlan_rx vlan_hwaccel_receive_skb 1358 #define nes_netif_rx netif_receive_skb
··· 1193 1194 struct nes_ib_device; 1195 1196 + #define NES_EVENT_DELAY msecs_to_jiffies(100) 1197 + 1198 struct nes_vnic { 1199 struct nes_ib_device *nesibdev; 1200 u64 sq_full; ··· 1247 u32 lro_max_aggr; 1248 struct net_lro_mgr lro_mgr; 1249 struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS]; 1250 + struct timer_list event_timer; 1251 + enum ib_event_type delayed_event; 1252 + enum ib_event_type last_dispatched_event; 1253 + spinlock_t port_ibevent_lock; 1254 }; 1255 1256 struct nes_ib_device { ··· 1347 #define RDMA_READ_REQ_OPCODE 1 1348 #define BAD_FRAME_OFFSET 64 1349 #define CQE_MAJOR_DRV 0x8000 1350 + 1351 + /* Used for link status recheck after interrupt processing */ 1352 + #define NES_LINK_RECHECK_DELAY msecs_to_jiffies(50) 1353 + #define NES_LINK_RECHECK_MAX 60 1354 1355 #define nes_vlan_rx vlan_hwaccel_receive_skb 1356 #define nes_netif_rx netif_receive_skb
+66 -12
drivers/infiniband/hw/nes/nes_nic.c
··· 144 u32 nic_active_bit; 145 u32 nic_active; 146 struct list_head *list_pos, *list_temp; 147 148 assert(nesdev != NULL); 149 ··· 234 first_nesvnic = nesvnic; 235 } 236 237 - if (nesvnic->of_device_registered) { 238 - nesdev->iw_status = 1; 239 - nesdev->nesadapter->send_term_ok = 1; 240 - nes_port_ibevent(nesvnic); 241 - } 242 - 243 if (first_nesvnic->linkup) { 244 /* Enable network packets */ 245 nesvnic->linkup = 1; 246 netif_start_queue(netdev); 247 netif_carrier_on(netdev); 248 } 249 napi_enable(&nesvnic->napi); 250 nesvnic->netdev_open = 1; 251 ··· 282 u32 nic_active; 283 struct nes_vnic *first_nesvnic = NULL; 284 struct list_head *list_pos, *list_temp; 285 286 nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n", 287 nesvnic, nesdev, netdev, netdev->name); ··· 335 nic_active &= nic_active_mask; 336 nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active); 337 338 - 339 if (nesvnic->of_device_registered) { 340 nesdev->nesadapter->send_term_ok = 0; 341 nesdev->iw_status = 0; 342 - nes_port_ibevent(nesvnic); 343 } 344 nes_destroy_nic_qp(nesvnic); 345 346 nesvnic->netdev_open = 0; ··· 1775 nesvnic->rdma_enabled = 0; 1776 } 1777 nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id; 1778 spin_lock_init(&nesvnic->tx_lock); 1779 nesdev->netdev[nesdev->netdev_count] = netdev; 1780 1781 nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n", ··· 1791 (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) || 1792 ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) { 1793 u32 u32temp; 1794 - u32 link_mask; 1795 - u32 link_val; 1796 1797 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1798 (0x200 * (nesdev->mac_index & 1))); ··· 1817 link_val = 0x02020000; 1818 } 1819 break; 1820 default: 1821 link_mask = 0x0f1f0000; 1822 link_val = 0x0f0f0000; ··· 1843 u32temp = nes_read_indexed(nesdev, 1844 NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1845 (0x200 * (nesdev->mac_index & 1))); 1846 - if ((u32temp & link_mask) == link_val) 1847 - nesvnic->linkup = 1; 1848 1849 /* clear the MAC interrupt status, assumes direct logical to physical mapping */ 1850 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
··· 144 u32 nic_active_bit; 145 u32 nic_active; 146 struct list_head *list_pos, *list_temp; 147 + unsigned long flags; 148 149 assert(nesdev != NULL); 150 ··· 233 first_nesvnic = nesvnic; 234 } 235 236 if (first_nesvnic->linkup) { 237 /* Enable network packets */ 238 nesvnic->linkup = 1; 239 netif_start_queue(netdev); 240 netif_carrier_on(netdev); 241 } 242 + 243 + spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); 244 + if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) { 245 + if (nesdev->link_recheck) 246 + cancel_delayed_work(&nesdev->work); 247 + nesdev->link_recheck = 1; 248 + schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY); 249 + } 250 + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); 251 + 252 + spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags); 253 + if (nesvnic->of_device_registered) { 254 + nesdev->nesadapter->send_term_ok = 1; 255 + if (nesvnic->linkup == 1) { 256 + if (nesdev->iw_status == 0) { 257 + nesdev->iw_status = 1; 258 + nes_port_ibevent(nesvnic); 259 + } 260 + } else { 261 + nesdev->iw_status = 0; 262 + } 263 + } 264 + spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags); 265 + 266 napi_enable(&nesvnic->napi); 267 nesvnic->netdev_open = 1; 268 ··· 263 u32 nic_active; 264 struct nes_vnic *first_nesvnic = NULL; 265 struct list_head *list_pos, *list_temp; 266 + unsigned long flags; 267 268 nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n", 269 nesvnic, nesdev, netdev, netdev->name); ··· 315 nic_active &= nic_active_mask; 316 nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active); 317 318 + spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags); 319 if (nesvnic->of_device_registered) { 320 nesdev->nesadapter->send_term_ok = 0; 321 nesdev->iw_status = 0; 322 + if (nesvnic->linkup == 1) 323 + nes_port_ibevent(nesvnic); 324 } 325 + del_timer_sync(&nesvnic->event_timer); 326 + nesvnic->event_timer.function = NULL; 327 + spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags); 328 + 329 nes_destroy_nic_qp(nesvnic); 330 331 nesvnic->netdev_open = 0; ··· 1750 nesvnic->rdma_enabled = 0; 1751 } 1752 nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id; 1753 + init_timer(&nesvnic->event_timer); 1754 + nesvnic->event_timer.function = NULL; 1755 spin_lock_init(&nesvnic->tx_lock); 1756 + spin_lock_init(&nesvnic->port_ibevent_lock); 1757 nesdev->netdev[nesdev->netdev_count] = netdev; 1758 1759 nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n", ··· 1763 (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) || 1764 ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) { 1765 u32 u32temp; 1766 + u32 link_mask = 0; 1767 + u32 link_val = 0; 1768 + u16 temp_phy_data; 1769 + u16 phy_data = 0; 1770 + unsigned long flags; 1771 1772 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1773 (0x200 * (nesdev->mac_index & 1))); ··· 1786 link_val = 0x02020000; 1787 } 1788 break; 1789 + case NES_PHY_TYPE_SFP_D: 1790 + spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); 1791 + nes_read_10G_phy_reg(nesdev, 1792 + nesdev->nesadapter->phy_index[nesdev->mac_index], 1793 + 1, 0x9003); 1794 + temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1795 + nes_read_10G_phy_reg(nesdev, 1796 + nesdev->nesadapter->phy_index[nesdev->mac_index], 1797 + 3, 0x0021); 1798 + nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1799 + nes_read_10G_phy_reg(nesdev, 1800 + nesdev->nesadapter->phy_index[nesdev->mac_index], 1801 + 3, 0x0021); 1802 + phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1803 + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); 1804 + phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0; 1805 + break; 1806 default: 1807 link_mask = 0x0f1f0000; 1808 link_val = 0x0f0f0000; ··· 1795 u32temp = nes_read_indexed(nesdev, 1796 NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1797 (0x200 * (nesdev->mac_index & 1))); 1798 + 1799 + if (phy_type == NES_PHY_TYPE_SFP_D) { 1800 + if (phy_data & 0x0004) 1801 + nesvnic->linkup = 1; 1802 + } else { 1803 + if ((u32temp & link_mask) == link_val) 1804 + nesvnic->linkup = 1; 1805 + } 1806 1807 /* clear the MAC interrupt status, assumes direct logical to physical mapping */ 1808 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
+36 -1
drivers/infiniband/hw/nes/nes_verbs.c
··· 3936 return nesibdev; 3937 } 3938 3939 void nes_port_ibevent(struct nes_vnic *nesvnic) 3940 { 3941 struct nes_ib_device *nesibdev = nesvnic->nesibdev; ··· 3968 event.device = &nesibdev->ibdev; 3969 event.element.port_num = nesvnic->logical_port + 1; 3970 event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 3971 - ib_dispatch_event(&event); 3972 } 3973 3974
··· 3936 return nesibdev; 3937 } 3938 3939 + 3940 + /** 3941 + * nes_handle_delayed_event 3942 + */ 3943 + static void nes_handle_delayed_event(unsigned long data) 3944 + { 3945 + struct nes_vnic *nesvnic = (void *) data; 3946 + 3947 + if (nesvnic->delayed_event != nesvnic->last_dispatched_event) { 3948 + struct ib_event event; 3949 + 3950 + event.device = &nesvnic->nesibdev->ibdev; 3951 + if (!event.device) 3952 + goto stop_timer; 3953 + event.event = nesvnic->delayed_event; 3954 + event.element.port_num = nesvnic->logical_port + 1; 3955 + ib_dispatch_event(&event); 3956 + } 3957 + 3958 + stop_timer: 3959 + nesvnic->event_timer.function = NULL; 3960 + } 3961 + 3962 + 3963 void nes_port_ibevent(struct nes_vnic *nesvnic) 3964 { 3965 struct nes_ib_device *nesibdev = nesvnic->nesibdev; ··· 3944 event.device = &nesibdev->ibdev; 3945 event.element.port_num = nesvnic->logical_port + 1; 3946 event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 3947 + 3948 + if (!nesvnic->event_timer.function) { 3949 + ib_dispatch_event(&event); 3950 + nesvnic->last_dispatched_event = event.event; 3951 + nesvnic->event_timer.function = nes_handle_delayed_event; 3952 + nesvnic->event_timer.data = (unsigned long) nesvnic; 3953 + nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY; 3954 + add_timer(&nesvnic->event_timer); 3955 + } else { 3956 + mod_timer(&nesvnic->event_timer, jiffies + NES_EVENT_DELAY); 3957 + } 3958 + nesvnic->delayed_event = event.event; 3959 } 3960 3961
+7 -8
drivers/infiniband/ulp/srp/ib_srp.c
··· 1132 1133 spin_lock_irqsave(&target->lock, flags); 1134 iu = __srp_get_tx_iu(target, SRP_IU_CMD); 1135 - if (iu) { 1136 - req = list_first_entry(&target->free_reqs, struct srp_request, 1137 - list); 1138 - list_del(&req->list); 1139 - } 1140 - spin_unlock_irqrestore(&target->lock, flags); 1141 - 1142 if (!iu) 1143 - goto err; 1144 1145 dev = target->srp_host->srp_dev->dev; 1146 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, ··· 1182 1183 spin_lock_irqsave(&target->lock, flags); 1184 list_add(&req->list, &target->free_reqs); 1185 spin_unlock_irqrestore(&target->lock, flags); 1186 1187 err:
··· 1132 1133 spin_lock_irqsave(&target->lock, flags); 1134 iu = __srp_get_tx_iu(target, SRP_IU_CMD); 1135 if (!iu) 1136 + goto err_unlock; 1137 + 1138 + req = list_first_entry(&target->free_reqs, struct srp_request, list); 1139 + list_del(&req->list); 1140 + spin_unlock_irqrestore(&target->lock, flags); 1141 1142 dev = target->srp_host->srp_dev->dev; 1143 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, ··· 1185 1186 spin_lock_irqsave(&target->lock, flags); 1187 list_add(&req->list, &target->free_reqs); 1188 + 1189 + err_unlock: 1190 spin_unlock_irqrestore(&target->lock, flags); 1191 1192 err:
+3 -3
drivers/net/mlx4/catas.c
··· 113 void mlx4_start_catas_poll(struct mlx4_dev *dev) 114 { 115 struct mlx4_priv *priv = mlx4_priv(dev); 116 - unsigned long addr; 117 118 INIT_LIST_HEAD(&priv->catas_err.list); 119 init_timer(&priv->catas_err.timer); ··· 124 125 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); 126 if (!priv->catas_err.map) { 127 - mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n", 128 - addr); 129 return; 130 } 131
··· 113 void mlx4_start_catas_poll(struct mlx4_dev *dev) 114 { 115 struct mlx4_priv *priv = mlx4_priv(dev); 116 + phys_addr_t addr; 117 118 INIT_LIST_HEAD(&priv->catas_err.list); 119 init_timer(&priv->catas_err.timer); ··· 124 125 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); 126 if (!priv->catas_err.map) { 127 + mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n", 128 + (unsigned long long) addr); 129 return; 130 } 131
+2 -1
drivers/net/mlx4/en_main.c
··· 202 if (mlx4_uar_alloc(dev, &mdev->priv_uar)) 203 goto err_pd; 204 205 - mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 206 if (!mdev->uar_map) 207 goto err_uar; 208 spin_lock_init(&mdev->uar_lock);
··· 202 if (mlx4_uar_alloc(dev, &mdev->priv_uar)) 203 goto err_pd; 204 205 + mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT, 206 + PAGE_SIZE); 207 if (!mdev->uar_map) 208 goto err_uar; 209 spin_lock_init(&mdev->uar_lock);
+1 -1
drivers/net/mlx4/main.c
··· 829 goto err_uar_table_free; 830 } 831 832 - priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 833 if (!priv->kar) { 834 mlx4_err(dev, "Couldn't map kernel access region, " 835 "aborting.\n");
··· 829 goto err_uar_table_free; 830 } 831 832 + priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 833 if (!priv->kar) { 834 mlx4_err(dev, "Couldn't map kernel access region, " 835 "aborting.\n");
+13 -10
drivers/net/mlx4/mcg.c
··· 95 * entry in hash chain and *mgm holds end of hash chain. 96 */ 97 static int find_mgm(struct mlx4_dev *dev, 98 - u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox, 99 u16 *hash, int *prev, int *index) 100 { 101 struct mlx4_cmd_mailbox *mailbox; ··· 135 return err; 136 } 137 138 - if (!memcmp(mgm->gid, gid, 16)) 139 return err; 140 141 *prev = *index; ··· 148 } 149 150 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 151 - int block_mcast_loopback) 152 { 153 struct mlx4_priv *priv = mlx4_priv(dev); 154 struct mlx4_cmd_mailbox *mailbox; ··· 167 168 mutex_lock(&priv->mcg_table.mutex); 169 170 - err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); 171 if (err) 172 goto out; 173 ··· 189 memcpy(mgm->gid, gid, 16); 190 } 191 192 - members_count = be32_to_cpu(mgm->members_count); 193 if (members_count == MLX4_QP_PER_MGM) { 194 mlx4_err(dev, "MGM at index %x is full.\n", index); 195 err = -ENOMEM; ··· 209 else 210 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 211 212 - mgm->members_count = cpu_to_be32(members_count); 213 214 err = mlx4_WRITE_MCG(dev, index, mailbox); 215 if (err) ··· 244 } 245 EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 246 247 - int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) 248 { 249 struct mlx4_priv *priv = mlx4_priv(dev); 250 struct mlx4_cmd_mailbox *mailbox; ··· 263 264 mutex_lock(&priv->mcg_table.mutex); 265 266 - err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); 267 if (err) 268 goto out; 269 ··· 273 goto out; 274 } 275 276 - members_count = be32_to_cpu(mgm->members_count); 277 for (loc = -1, i = 0; i < members_count; ++i) 278 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 279 loc = i; ··· 285 } 286 287 288 - mgm->members_count = cpu_to_be32(--members_count); 289 mgm->qp[loc] = mgm->qp[i - 1]; 290 mgm->qp[i - 1] = 0; 291
··· 95 * entry in hash chain and *mgm holds end of hash chain. 96 */ 97 static int find_mgm(struct mlx4_dev *dev, 98 + u8 *gid, enum mlx4_protocol protocol, 99 + struct mlx4_cmd_mailbox *mgm_mailbox, 100 u16 *hash, int *prev, int *index) 101 { 102 struct mlx4_cmd_mailbox *mailbox; ··· 134 return err; 135 } 136 137 + if (!memcmp(mgm->gid, gid, 16) && 138 + be32_to_cpu(mgm->members_count) >> 30 == protocol) 139 return err; 140 141 *prev = *index; ··· 146 } 147 148 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 149 + int block_mcast_loopback, enum mlx4_protocol protocol) 150 { 151 struct mlx4_priv *priv = mlx4_priv(dev); 152 struct mlx4_cmd_mailbox *mailbox; ··· 165 166 mutex_lock(&priv->mcg_table.mutex); 167 168 + err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 169 if (err) 170 goto out; 171 ··· 187 memcpy(mgm->gid, gid, 16); 188 } 189 190 + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 191 if (members_count == MLX4_QP_PER_MGM) { 192 mlx4_err(dev, "MGM at index %x is full.\n", index); 193 err = -ENOMEM; ··· 207 else 208 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 209 210 + mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30); 211 212 err = mlx4_WRITE_MCG(dev, index, mailbox); 213 if (err) ··· 242 } 243 EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 244 245 + int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 246 + enum mlx4_protocol protocol) 247 { 248 struct mlx4_priv *priv = mlx4_priv(dev); 249 struct mlx4_cmd_mailbox *mailbox; ··· 260 261 mutex_lock(&priv->mcg_table.mutex); 262 263 + err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 264 if (err) 265 goto out; 266 ··· 270 goto out; 271 } 272 273 + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 274 for (loc = -1, i = 0; i < members_count; ++i) 275 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 276 loc = i; ··· 282 } 283 284 285 + mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30); 286 mgm->qp[loc] = mgm->qp[i - 1]; 287 mgm->qp[i - 1] = 0; 288
+8 -2
include/linux/mlx4/device.h
··· 144 MLX4_STAT_RATE_OFFSET = 5 145 }; 146 147 enum { 148 MLX4_MTT_FLAG_PRESENT = 1 149 }; ··· 505 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); 506 507 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 508 - int block_mcast_loopback); 509 - int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); 510 511 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index); 512 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
··· 144 MLX4_STAT_RATE_OFFSET = 5 145 }; 146 147 + enum mlx4_protocol { 148 + MLX4_PROTOCOL_IB, 149 + MLX4_PROTOCOL_EN, 150 + }; 151 + 152 enum { 153 MLX4_MTT_FLAG_PRESENT = 1 154 }; ··· 500 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); 501 502 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 503 + int block_mcast_loopback, enum mlx4_protocol protocol); 504 + int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 505 + enum mlx4_protocol protocol); 506 507 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index); 508 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
+1 -5
include/linux/mlx4/driver.h
··· 34 #define MLX4_DRIVER_H 35 36 #include <linux/device.h> 37 38 struct mlx4_dev; 39 ··· 43 MLX4_DEV_EVENT_PORT_UP, 44 MLX4_DEV_EVENT_PORT_DOWN, 45 MLX4_DEV_EVENT_PORT_REINIT, 46 - }; 47 - 48 - enum mlx4_protocol { 49 - MLX4_PROTOCOL_IB, 50 - MLX4_PROTOCOL_EN, 51 }; 52 53 struct mlx4_interface {
··· 34 #define MLX4_DRIVER_H 35 36 #include <linux/device.h> 37 + #include <linux/mlx4/device.h> 38 39 struct mlx4_dev; 40 ··· 42 MLX4_DEV_EVENT_PORT_UP, 43 MLX4_DEV_EVENT_PORT_DOWN, 44 MLX4_DEV_EVENT_PORT_REINIT, 45 }; 46 47 struct mlx4_interface {