Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
RDMA: Update workqueue usage
RDMA/nes: Fix incorrect SFP+ link status detection on driver init
RDMA/nes: Fix SFP+ link down detection issue with switch port disable
RDMA/nes: Generate IB_EVENT_PORT_ERR/PORT_ACTIVE events
RDMA/nes: Fix bonding on iw_nes
IB/srp: Test only once whether iu allocation succeeded
IB/mlx4: Handle protocol field in multicast table
RDMA: Use vzalloc() to replace vmalloc()+memset(0)
mlx4_{core, ib, en}: Fix driver when sizeof (phys_addr_t) > sizeof (long)
IB/mthca: Fix driver when sizeof (phys_addr_t) > sizeof (long)

+351 -144
+2 -2
drivers/infiniband/core/cache.c
··· 308 308 INIT_WORK(&work->work, ib_cache_task); 309 309 work->device = event->device; 310 310 work->port_num = event->element.port_num; 311 - schedule_work(&work->work); 311 + queue_work(ib_wq, &work->work); 312 312 } 313 313 } 314 314 } ··· 368 368 int p; 369 369 370 370 ib_unregister_event_handler(&device->cache.event_handler); 371 - flush_scheduled_work(); 371 + flush_workqueue(ib_wq); 372 372 373 373 for (p = 0; p <= end_port(device) - start_port(device); ++p) { 374 374 kfree(device->cache.pkey_cache[p]);
+9 -2
drivers/infiniband/core/device.c
··· 38 38 #include <linux/slab.h> 39 39 #include <linux/init.h> 40 40 #include <linux/mutex.h> 41 - #include <linux/workqueue.h> 42 41 43 42 #include "core_priv.h" 44 43 ··· 50 51 struct ib_client *client; 51 52 void * data; 52 53 }; 54 + 55 + struct workqueue_struct *ib_wq; 56 + EXPORT_SYMBOL_GPL(ib_wq); 53 57 54 58 static LIST_HEAD(device_list); 55 59 static LIST_HEAD(client_list); ··· 720 718 { 721 719 int ret; 722 720 721 + ib_wq = alloc_workqueue("infiniband", 0, 0); 722 + if (!ib_wq) 723 + return -ENOMEM; 724 + 723 725 ret = ib_sysfs_setup(); 724 726 if (ret) 725 727 printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); ··· 732 726 if (ret) { 733 727 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); 734 728 ib_sysfs_cleanup(); 729 + destroy_workqueue(ib_wq); 735 730 } 736 731 737 732 return ret; ··· 743 736 ib_cache_cleanup(); 744 737 ib_sysfs_cleanup(); 745 738 /* Make sure that any pending umem accounting work is done. */ 746 - flush_scheduled_work(); 739 + destroy_workqueue(ib_wq); 747 740 } 748 741 749 742 module_init(ib_core_init);
+1 -1
drivers/infiniband/core/sa_query.c
··· 425 425 port->sm_ah = NULL; 426 426 spin_unlock_irqrestore(&port->ah_lock, flags); 427 427 428 - schedule_work(&sa_dev->port[event->element.port_num - 428 + queue_work(ib_wq, &sa_dev->port[event->element.port_num - 429 429 sa_dev->start_port].update_task); 430 430 } 431 431 }
+1 -1
drivers/infiniband/core/umem.c
··· 262 262 umem->mm = mm; 263 263 umem->diff = diff; 264 264 265 - schedule_work(&umem->work); 265 + queue_work(ib_wq, &umem->work); 266 266 return; 267 267 } 268 268 } else
+2 -3
drivers/infiniband/hw/amso1100/c2_rnic.c
··· 459 459 IB_DEVICE_MEM_WINDOW); 460 460 461 461 /* Allocate the qptr_array */ 462 - c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); 462 + c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *)); 463 463 if (!c2dev->qptr_array) { 464 464 return -ENOMEM; 465 465 } 466 466 467 - /* Inialize the qptr_array */ 468 - memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *)); 467 + /* Initialize the qptr_array */ 469 468 c2dev->qptr_array[0] = (void *) &c2dev->req_vq; 470 469 c2dev->qptr_array[1] = (void *) &c2dev->rep_vq; 471 470 c2dev->qptr_array[2] = (void *) &c2dev->aeq;
+2 -3
drivers/infiniband/hw/ehca/ipz_pt_fn.c
··· 222 222 queue->small_page = NULL; 223 223 224 224 /* allocate queue page pointers */ 225 - queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); 225 + queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); 226 226 if (!queue->queue_pages) { 227 - queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); 227 + queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *)); 228 228 if (!queue->queue_pages) { 229 229 ehca_gen_err("Couldn't allocate queue page list"); 230 230 return 0; 231 231 } 232 232 } 233 - memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *)); 234 233 235 234 /* allocate actual queue pages */ 236 235 if (is_small) {
+2 -3
drivers/infiniband/hw/ipath/ipath_driver.c
··· 199 199 goto bail; 200 200 } 201 201 202 - dd = vmalloc(sizeof(*dd)); 202 + dd = vzalloc(sizeof(*dd)); 203 203 if (!dd) { 204 204 dd = ERR_PTR(-ENOMEM); 205 205 goto bail; 206 206 } 207 - memset(dd, 0, sizeof(*dd)); 208 207 dd->ipath_unit = -1; 209 208 210 209 spin_lock_irqsave(&ipath_devs_lock, flags); ··· 755 756 */ 756 757 ipath_shutdown_device(dd); 757 758 758 - flush_scheduled_work(); 759 + flush_workqueue(ib_wq); 759 760 760 761 if (dd->verbs_dev) 761 762 ipath_unregister_ib_device(dd->verbs_dev);
+3 -8
drivers/infiniband/hw/ipath/ipath_file_ops.c
··· 1530 1530 } 1531 1531 1532 1532 num_subports = uinfo->spu_subport_cnt; 1533 - pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports); 1533 + pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports); 1534 1534 if (!pd->subport_uregbase) { 1535 1535 ret = -ENOMEM; 1536 1536 goto bail; ··· 1538 1538 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ 1539 1539 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * 1540 1540 sizeof(u32), PAGE_SIZE) * num_subports; 1541 - pd->subport_rcvhdr_base = vmalloc(size); 1541 + pd->subport_rcvhdr_base = vzalloc(size); 1542 1542 if (!pd->subport_rcvhdr_base) { 1543 1543 ret = -ENOMEM; 1544 1544 goto bail_ureg; 1545 1545 } 1546 1546 1547 - pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * 1547 + pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks * 1548 1548 pd->port_rcvegrbuf_size * 1549 1549 num_subports); 1550 1550 if (!pd->subport_rcvegrbuf) { ··· 1556 1556 pd->port_subport_id = uinfo->spu_subport_id; 1557 1557 pd->active_slaves = 1; 1558 1558 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); 1559 - memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports); 1560 - memset(pd->subport_rcvhdr_base, 0, size); 1561 - memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks * 1562 - pd->port_rcvegrbuf_size * 1563 - num_subports); 1564 1559 goto bail; 1565 1560 1566 1561 bail_rhdr:
+1 -4
drivers/infiniband/hw/ipath/ipath_init_chip.c
··· 442 442 struct page **pages; 443 443 dma_addr_t *addrs; 444 444 445 - pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * 445 + pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * 446 446 sizeof(struct page *)); 447 447 if (!pages) { 448 448 ipath_dev_err(dd, "failed to allocate shadow page * " ··· 460 460 dd->ipath_pageshadow = NULL; 461 461 return; 462 462 } 463 - 464 - memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt * 465 - sizeof(struct page *)); 466 463 467 464 dd->ipath_pageshadow = pages; 468 465 dd->ipath_physshadow = addrs;
+1 -1
drivers/infiniband/hw/ipath/ipath_user_pages.c
··· 220 220 work->mm = mm; 221 221 work->num_pages = num_pages; 222 222 223 - schedule_work(&work->work); 223 + queue_work(ib_wq, &work->work); 224 224 return; 225 225 226 226 bail_mm:
+7 -5
drivers/infiniband/hw/mlx4/main.c
··· 623 623 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 624 624 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 625 625 626 - err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags & 627 - MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)); 626 + err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 627 + !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 628 + MLX4_PROTOCOL_IB); 628 629 if (err) 629 630 return err; 630 631 ··· 636 635 return 0; 637 636 638 637 err_add: 639 - mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw); 638 + mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); 640 639 return err; 641 640 } 642 641 ··· 666 665 struct mlx4_ib_gid_entry *ge; 667 666 668 667 err = mlx4_multicast_detach(mdev->dev, 669 - &mqp->mqp, gid->raw); 668 + &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB); 670 669 if (err) 671 670 return err; 672 671 ··· 1006 1005 if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) 1007 1006 goto err_pd; 1008 1007 1009 - ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1008 + ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, 1009 + PAGE_SIZE); 1010 1010 if (!ibdev->uar_map) 1011 1011 goto err_uar; 1012 1012 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
+3 -2
drivers/infiniband/hw/mthca/mthca_catas.c
··· 146 146 147 147 void mthca_start_catas_poll(struct mthca_dev *dev) 148 148 { 149 - unsigned long addr; 149 + phys_addr_t addr; 150 150 151 151 init_timer(&dev->catas_err.timer); 152 152 dev->catas_err.map = NULL; ··· 158 158 dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4); 159 159 if (!dev->catas_err.map) { 160 160 mthca_warn(dev, "couldn't map catastrophic error region " 161 - "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4); 161 + "at 0x%llx/0x%x\n", (unsigned long long) addr, 162 + dev->catas_err.size * 4); 162 163 return; 163 164 } 164 165
+1 -1
drivers/infiniband/hw/mthca/mthca_cmd.c
··· 713 713 714 714 static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base) 715 715 { 716 - unsigned long addr; 716 + phys_addr_t addr; 717 717 u16 max_off = 0; 718 718 int i; 719 719
+1 -1
drivers/infiniband/hw/mthca/mthca_eq.c
··· 653 653 unsigned long offset, unsigned long size, 654 654 void __iomem **map) 655 655 { 656 - unsigned long base = pci_resource_start(dev->pdev, 0); 656 + phys_addr_t base = pci_resource_start(dev->pdev, 0); 657 657 658 658 *map = ioremap(base + offset, size); 659 659 if (!*map)
+1 -1
drivers/infiniband/hw/mthca/mthca_main.c
··· 790 790 goto err_uar_table_free; 791 791 } 792 792 793 - dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 793 + dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 794 794 if (!dev->kar) { 795 795 mthca_err(dev, "Couldn't map kernel access region, " 796 796 "aborting.\n");
+1 -1
drivers/infiniband/hw/mthca/mthca_mr.c
··· 853 853 854 854 int mthca_init_mr_table(struct mthca_dev *dev) 855 855 { 856 - unsigned long addr; 856 + phys_addr_t addr; 857 857 int mpts, mtts, err, i; 858 858 859 859 err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
+30 -5
drivers/infiniband/hw/nes/nes.c
··· 144 144 struct nes_device *nesdev; 145 145 struct net_device *netdev; 146 146 struct nes_vnic *nesvnic; 147 + unsigned int is_bonded; 147 148 148 149 nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %pI4, netmask %pI4.\n", 149 150 &ifa->ifa_address, &ifa->ifa_mask); ··· 153 152 nesdev, nesdev->netdev[0]->name); 154 153 netdev = nesdev->netdev[0]; 155 154 nesvnic = netdev_priv(netdev); 156 - if (netdev == event_netdev) { 155 + is_bonded = (netdev->master == event_netdev); 156 + if ((netdev == event_netdev) || is_bonded) { 157 157 if (nesvnic->rdma_enabled == 0) { 158 158 nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since" 159 159 " RDMA is not enabled.\n", ··· 171 169 nes_manage_arp_cache(netdev, netdev->dev_addr, 172 170 ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE); 173 171 nesvnic->local_ipaddr = 0; 174 - return NOTIFY_OK; 172 + if (is_bonded) 173 + continue; 174 + else 175 + return NOTIFY_OK; 175 176 break; 176 177 case NETDEV_UP: 177 178 nes_debug(NES_DBG_NETDEV, "event:UP\n"); ··· 183 178 nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n"); 184 179 return NOTIFY_OK; 185 180 } 181 + /* fall through */ 182 + case NETDEV_CHANGEADDR: 186 183 /* Add the address to the IP table */ 187 - nesvnic->local_ipaddr = ifa->ifa_address; 184 + if (netdev->master) 185 + nesvnic->local_ipaddr = 186 + ((struct in_device *)netdev->master->ip_ptr)->ifa_list->ifa_address; 187 + else 188 + nesvnic->local_ipaddr = ifa->ifa_address; 188 189 189 190 nes_write_indexed(nesdev, 190 191 NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), 191 - ntohl(ifa->ifa_address)); 192 + ntohl(nesvnic->local_ipaddr)); 192 193 nes_manage_arp_cache(netdev, netdev->dev_addr, 193 194 ntohl(nesvnic->local_ipaddr), NES_ARP_ADD); 194 - return NOTIFY_OK; 195 + if (is_bonded) 196 + continue; 197 + else 198 + return NOTIFY_OK; 195 199 break; 196 200 default: 197 201 break; ··· 674 660 } 675 661 nes_notifiers_registered++; 676 662 663 + INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); 664 + 677 665 /* Initialize network devices */ 678 666 if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) 679 667 goto bail7; ··· 758 742 struct nes_device *nesdev = pci_get_drvdata(pcidev); 759 743 struct net_device *netdev; 760 744 int netdev_index = 0; 745 + unsigned long flags; 761 746 762 747 if (nesdev->netdev_count) { 763 748 netdev = nesdev->netdev[netdev_index]; ··· 784 767 785 768 free_irq(pcidev->irq, nesdev); 786 769 tasklet_kill(&nesdev->dpc_tasklet); 770 + 771 + spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); 772 + if (nesdev->link_recheck) { 773 + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); 774 + cancel_delayed_work_sync(&nesdev->work); 775 + } else { 776 + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); 777 + } 787 778 788 779 /* Deallocate the Adapter Structure */ 789 780 nes_destroy_adapter(nesdev->nesadapter);
+4
drivers/infiniband/hw/nes/nes.h
··· 268 268 u8 napi_isr_ran; 269 269 u8 disable_rx_flow_control; 270 270 u8 disable_tx_flow_control; 271 + 272 + struct delayed_work work; 273 + u8 link_recheck; 271 274 }; 272 275 273 276 ··· 510 507 void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *); 511 508 int nes_destroy_cqp(struct nes_device *); 512 509 int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); 510 + void nes_recheck_link_status(struct work_struct *work); 513 511 514 512 /* nes_nic.c */ 515 513 struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
+7 -1
drivers/infiniband/hw/nes/nes_cm.c
··· 1107 1107 struct flowi fl; 1108 1108 struct neighbour *neigh; 1109 1109 int rc = arpindex; 1110 + struct net_device *netdev; 1110 1111 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; 1111 1112 1112 1113 memset(&fl, 0, sizeof fl); ··· 1118 1117 return rc; 1119 1118 } 1120 1119 1121 - neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, nesvnic->netdev); 1120 + if (nesvnic->netdev->master) 1121 + netdev = nesvnic->netdev->master; 1122 + else 1123 + netdev = nesvnic->netdev; 1124 + 1125 + neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev); 1122 1126 if (neigh) { 1123 1127 if (neigh->nud_state & NUD_VALID) { 1124 1128 nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
+95
drivers/infiniband/hw/nes/nes_hw.c
··· 2608 2608 netif_start_queue(nesvnic->netdev); 2609 2609 nesvnic->linkup = 1; 2610 2610 netif_carrier_on(nesvnic->netdev); 2611 + 2612 + spin_lock(&nesvnic->port_ibevent_lock); 2613 + if (nesdev->iw_status == 0) { 2614 + nesdev->iw_status = 1; 2615 + nes_port_ibevent(nesvnic); 2616 + } 2617 + spin_unlock(&nesvnic->port_ibevent_lock); 2611 2618 } 2612 2619 } 2613 2620 } else { ··· 2640 2633 netif_stop_queue(nesvnic->netdev); 2641 2634 nesvnic->linkup = 0; 2642 2635 netif_carrier_off(nesvnic->netdev); 2636 + 2637 + spin_lock(&nesvnic->port_ibevent_lock); 2638 + if (nesdev->iw_status == 1) { 2639 + nesdev->iw_status = 0; 2640 + nes_port_ibevent(nesvnic); 2641 + } 2642 + spin_unlock(&nesvnic->port_ibevent_lock); 2643 2643 } 2644 2644 } 2645 + } 2646 + if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) { 2647 + if (nesdev->link_recheck) 2648 + cancel_delayed_work(&nesdev->work); 2649 + nesdev->link_recheck = 1; 2650 + schedule_delayed_work(&nesdev->work, 2651 + NES_LINK_RECHECK_DELAY); 2645 2652 } 2646 2653 } 2647 2654 ··· 2664 2643 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE; 2665 2644 } 2666 2645 2646 + void nes_recheck_link_status(struct work_struct *work) 2647 + { 2648 + unsigned long flags; 2649 + struct nes_device *nesdev = container_of(work, struct nes_device, work.work); 2650 + struct nes_adapter *nesadapter = nesdev->nesadapter; 2651 + struct nes_vnic *nesvnic; 2652 + u32 mac_index = nesdev->mac_index; 2653 + u16 phy_data; 2654 + u16 temp_phy_data; 2655 + 2656 + spin_lock_irqsave(&nesadapter->phy_lock, flags); 2657 + 2658 + /* check link status */ 2659 + nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003); 2660 + temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 2661 + 2662 + nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); 2663 + nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 2664 + nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); 2665 + phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 2666 + 2667 + phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0; 2668 + 2669 + nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", 2670 + __func__, phy_data, 2671 + nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP"); 2672 + 2673 + if (phy_data & 0x0004) { 2674 + nesadapter->mac_link_down[mac_index] = 0; 2675 + list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { 2676 + if (nesvnic->linkup == 0) { 2677 + printk(PFX "The Link is now up for port %s, netdev %p.\n", 2678 + nesvnic->netdev->name, nesvnic->netdev); 2679 + if (netif_queue_stopped(nesvnic->netdev)) 2680 + netif_start_queue(nesvnic->netdev); 2681 + nesvnic->linkup = 1; 2682 + netif_carrier_on(nesvnic->netdev); 2683 + 2684 + spin_lock(&nesvnic->port_ibevent_lock); 2685 + if (nesdev->iw_status == 0) { 2686 + nesdev->iw_status = 1; 2687 + nes_port_ibevent(nesvnic); 2688 + } 2689 + spin_unlock(&nesvnic->port_ibevent_lock); 2690 + } 2691 + } 2692 + 2693 + } else { 2694 + nesadapter->mac_link_down[mac_index] = 1; 2695 + list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { 2696 + if (nesvnic->linkup == 1) { 2697 + printk(PFX "The Link is now down for port %s, netdev %p.\n", 2698 + nesvnic->netdev->name, nesvnic->netdev); 2699 + if (!(netif_queue_stopped(nesvnic->netdev))) 2700 + netif_stop_queue(nesvnic->netdev); 2701 + nesvnic->linkup = 0; 2702 + netif_carrier_off(nesvnic->netdev); 2703 + 2704 + spin_lock(&nesvnic->port_ibevent_lock); 2705 + if (nesdev->iw_status == 1) { 2706 + nesdev->iw_status = 0; 2707 + nes_port_ibevent(nesvnic); 2708 + } 2709 + spin_unlock(&nesvnic->port_ibevent_lock); 2710 + } 2711 + } 2712 + } 2713 + if (nesdev->link_recheck++ < NES_LINK_RECHECK_MAX) 2714 + schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY); 2715 + else 2716 + nesdev->link_recheck = 0; 2717 + 2718 + spin_unlock_irqrestore(&nesadapter->phy_lock, flags); 2719 + } 2667 2720 2668 2721 2669 2722 static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
+10
drivers/infiniband/hw/nes/nes_hw.h
··· 1193 1193 1194 1194 struct nes_ib_device; 1195 1195 1196 + #define NES_EVENT_DELAY msecs_to_jiffies(100) 1197 + 1196 1198 struct nes_vnic { 1197 1199 struct nes_ib_device *nesibdev; 1198 1200 u64 sq_full; ··· 1249 1247 u32 lro_max_aggr; 1250 1248 struct net_lro_mgr lro_mgr; 1251 1249 struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS]; 1250 + struct timer_list event_timer; 1251 + enum ib_event_type delayed_event; 1252 + enum ib_event_type last_dispatched_event; 1253 + spinlock_t port_ibevent_lock; 1252 1254 }; 1253 1255 1254 1256 struct nes_ib_device { ··· 1353 1347 #define RDMA_READ_REQ_OPCODE 1 1354 1348 #define BAD_FRAME_OFFSET 64 1355 1349 #define CQE_MAJOR_DRV 0x8000 1350 + 1351 + /* Used for link status recheck after interrupt processing */ 1352 + #define NES_LINK_RECHECK_DELAY msecs_to_jiffies(50) 1353 + #define NES_LINK_RECHECK_MAX 60 1356 1354 1357 1355 #define nes_vlan_rx vlan_hwaccel_receive_skb 1358 1356 #define nes_netif_rx netif_receive_skb
+66 -12
drivers/infiniband/hw/nes/nes_nic.c
··· 144 144 u32 nic_active_bit; 145 145 u32 nic_active; 146 146 struct list_head *list_pos, *list_temp; 147 + unsigned long flags; 147 148 148 149 assert(nesdev != NULL); 149 150 ··· 234 233 first_nesvnic = nesvnic; 235 234 } 236 235 237 - if (nesvnic->of_device_registered) { 238 - nesdev->iw_status = 1; 239 - nesdev->nesadapter->send_term_ok = 1; 240 - nes_port_ibevent(nesvnic); 241 - } 242 - 243 236 if (first_nesvnic->linkup) { 244 237 /* Enable network packets */ 245 238 nesvnic->linkup = 1; 246 239 netif_start_queue(netdev); 247 240 netif_carrier_on(netdev); 248 241 } 242 + 243 + spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); 244 + if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) { 245 + if (nesdev->link_recheck) 246 + cancel_delayed_work(&nesdev->work); 247 + nesdev->link_recheck = 1; 248 + schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY); 249 + } 250 + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); 251 + 252 + spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags); 253 + if (nesvnic->of_device_registered) { 254 + nesdev->nesadapter->send_term_ok = 1; 255 + if (nesvnic->linkup == 1) { 256 + if (nesdev->iw_status == 0) { 257 + nesdev->iw_status = 1; 258 + nes_port_ibevent(nesvnic); 259 + } 260 + } else { 261 + nesdev->iw_status = 0; 262 + } 263 + } 264 + spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags); 265 + 249 266 napi_enable(&nesvnic->napi); 250 267 nesvnic->netdev_open = 1; 251 268 ··· 282 263 u32 nic_active; 283 264 struct nes_vnic *first_nesvnic = NULL; 284 265 struct list_head *list_pos, *list_temp; 266 + unsigned long flags; 285 267 286 268 nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n", 287 269 nesvnic, nesdev, netdev, netdev->name); ··· 335 315 nic_active &= nic_active_mask; 336 316 nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active); 337 317 338 - 318 + spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags); 339 319 if (nesvnic->of_device_registered) { 340 320 nesdev->nesadapter->send_term_ok = 0; 341 321 nesdev->iw_status = 0; 342 - nes_port_ibevent(nesvnic); 322 + if (nesvnic->linkup == 1) 323 + nes_port_ibevent(nesvnic); 343 324 } 325 + del_timer_sync(&nesvnic->event_timer); 326 + nesvnic->event_timer.function = NULL; 327 + spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags); 328 + 344 329 nes_destroy_nic_qp(nesvnic); 345 330 346 331 nesvnic->netdev_open = 0; ··· 1775 1750 nesvnic->rdma_enabled = 0; 1776 1751 } 1777 1752 nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id; 1753 + init_timer(&nesvnic->event_timer); 1754 + nesvnic->event_timer.function = NULL; 1778 1755 spin_lock_init(&nesvnic->tx_lock); 1756 + spin_lock_init(&nesvnic->port_ibevent_lock); 1779 1757 nesdev->netdev[nesdev->netdev_count] = netdev; 1780 1758 1781 1759 nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n", ··· 1791 1763 (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) || 1792 1764 ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) { 1793 1765 u32 u32temp; 1794 - u32 link_mask; 1795 - u32 link_val; 1766 + u32 link_mask = 0; 1767 + u32 link_val = 0; 1768 + u16 temp_phy_data; 1769 + u16 phy_data = 0; 1770 + unsigned long flags; 1796 1771 1797 1772 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1798 1773 (0x200 * (nesdev->mac_index & 1))); ··· 1817 1786 link_val = 0x02020000; 1818 1787 } 1819 1788 break; 1789 + case NES_PHY_TYPE_SFP_D: 1790 + spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); 1791 + nes_read_10G_phy_reg(nesdev, 1792 + nesdev->nesadapter->phy_index[nesdev->mac_index], 1793 + 1, 0x9003); 1794 + temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1795 + nes_read_10G_phy_reg(nesdev, 1796 + nesdev->nesadapter->phy_index[nesdev->mac_index], 1797 + 3, 0x0021); 1798 + nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1799 + nes_read_10G_phy_reg(nesdev, 1800 + nesdev->nesadapter->phy_index[nesdev->mac_index], 1801 + 3, 0x0021); 1802 + phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1803 + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); 1804 + phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0; 1805 + break; 1820 1806 default: 1821 1807 link_mask = 0x0f1f0000; 1822 1808 link_val = 0x0f0f0000; ··· 1843 1795 u32temp = nes_read_indexed(nesdev, 1844 1796 NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1845 1797 (0x200 * (nesdev->mac_index & 1))); 1846 - if ((u32temp & link_mask) == link_val) 1847 - nesvnic->linkup = 1; 1798 + 1799 + if (phy_type == NES_PHY_TYPE_SFP_D) { 1800 + if (phy_data & 0x0004) 1801 + nesvnic->linkup = 1; 1802 + } else { 1803 + if ((u32temp & link_mask) == link_val) 1804 + nesvnic->linkup = 1; 1805 + } 1848 1806 1849 1807 /* clear the MAC interrupt status, assumes direct logical to physical mapping */ 1850 1808 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
+36 -1
drivers/infiniband/hw/nes/nes_verbs.c
··· 3936 3936 return nesibdev; 3937 3937 } 3938 3938 3939 + 3940 + /** 3941 + * nes_handle_delayed_event 3942 + */ 3943 + static void nes_handle_delayed_event(unsigned long data) 3944 + { 3945 + struct nes_vnic *nesvnic = (void *) data; 3946 + 3947 + if (nesvnic->delayed_event != nesvnic->last_dispatched_event) { 3948 + struct ib_event event; 3949 + 3950 + event.device = &nesvnic->nesibdev->ibdev; 3951 + if (!event.device) 3952 + goto stop_timer; 3953 + event.event = nesvnic->delayed_event; 3954 + event.element.port_num = nesvnic->logical_port + 1; 3955 + ib_dispatch_event(&event); 3956 + } 3957 + 3958 + stop_timer: 3959 + nesvnic->event_timer.function = NULL; 3960 + } 3961 + 3962 + 3939 3963 void nes_port_ibevent(struct nes_vnic *nesvnic) 3940 3964 { 3941 3965 struct nes_ib_device *nesibdev = nesvnic->nesibdev; ··· 3968 3944 event.device = &nesibdev->ibdev; 3969 3945 event.element.port_num = nesvnic->logical_port + 1; 3970 3946 event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 3971 - ib_dispatch_event(&event); 3947 + 3948 + if (!nesvnic->event_timer.function) { 3949 + ib_dispatch_event(&event); 3950 + nesvnic->last_dispatched_event = event.event; 3951 + nesvnic->event_timer.function = nes_handle_delayed_event; 3952 + nesvnic->event_timer.data = (unsigned long) nesvnic; 3953 + nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY; 3954 + add_timer(&nesvnic->event_timer); 3955 + } else { 3956 + mod_timer(&nesvnic->event_timer, jiffies + NES_EVENT_DELAY); 3957 + } 3958 + nesvnic->delayed_event = event.event; 3972 3959 } 3973 3960 3974 3961
+3 -4
drivers/infiniband/hw/qib/qib_iba7220.c
··· 1692 1692 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; 1693 1693 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 1694 1694 wake_up(&ppd->cpspec->autoneg_wait); 1695 - cancel_delayed_work(&ppd->cpspec->autoneg_work); 1696 - flush_scheduled_work(); 1695 + cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); 1697 1696 1698 1697 shutdown_7220_relock_poll(ppd->dd); 1699 1698 val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); ··· 3514 3515 3515 3516 toggle_7220_rclkrls(ppd->dd); 3516 3517 /* 2 msec is minimum length of a poll cycle */ 3517 - schedule_delayed_work(&ppd->cpspec->autoneg_work, 3518 - msecs_to_jiffies(2)); 3518 + queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, 3519 + msecs_to_jiffies(2)); 3519 3520 } 3520 3521 3521 3522 /*
+7 -7
drivers/infiniband/hw/qib/qib_iba7322.c
··· 2406 2406 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; 2407 2407 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 2408 2408 wake_up(&ppd->cpspec->autoneg_wait); 2409 - cancel_delayed_work(&ppd->cpspec->autoneg_work); 2409 + cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); 2410 2410 if (ppd->dd->cspec->r1) 2411 - cancel_delayed_work(&ppd->cpspec->ipg_work); 2412 - flush_scheduled_work(); 2411 + cancel_delayed_work_sync(&ppd->cpspec->ipg_work); 2413 2412 2414 2413 ppd->cpspec->chase_end = 0; 2415 2414 if (ppd->cpspec->chase_timer.data) /* if initted */ ··· 2705 2706 if (!(pins & mask)) { 2706 2707 ++handled; 2707 2708 qd->t_insert = get_jiffies_64(); 2708 - schedule_work(&qd->work); 2709 + queue_work(ib_wq, &qd->work); 2709 2710 } 2710 2711 } 2711 2712 } ··· 4989 4990 set_7322_ibspeed_fast(ppd, QIB_IB_DDR); 4990 4991 qib_7322_mini_pcs_reset(ppd); 4991 4992 /* 2 msec is minimum length of a poll cycle */ 4992 - schedule_delayed_work(&ppd->cpspec->autoneg_work, 4993 - msecs_to_jiffies(2)); 4993 + queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, 4994 + msecs_to_jiffies(2)); 4994 4995 } 4995 4996 4996 4997 /* ··· 5120 5121 ib_free_send_mad(send_buf); 5121 5122 retry: 5122 5123 delay = 2 << ppd->cpspec->ipg_tries; 5123 - schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); 5124 + queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work, 5125 + msecs_to_jiffies(delay)); 5124 5126 } 5125 5127 5126 5128 /*
+6 -27
drivers/infiniband/hw/qib/qib_init.c
··· 80 80 module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); 81 81 MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); 82 82 83 - struct workqueue_struct *qib_wq; 84 83 struct workqueue_struct *qib_cq_wq; 85 84 86 85 static void verify_interrupt(unsigned long); ··· 269 270 struct page **pages; 270 271 dma_addr_t *addrs; 271 272 272 - pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); 273 + pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); 273 274 if (!pages) { 274 275 qib_dev_err(dd, "failed to allocate shadow page * " 275 276 "array, no expected sends!\n"); 276 277 goto bail; 277 278 } 278 279 279 - addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); 280 + addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); 280 281 if (!addrs) { 281 282 qib_dev_err(dd, "failed to allocate shadow dma handle " 282 283 "array, no expected sends!\n"); 283 284 goto bail_free; 284 285 } 285 - 286 - memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); 287 - memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); 288 286 289 287 dd->pageshadow = pages; 290 288 dd->physshadow = addrs; ··· 1043 1047 if (ret) 1044 1048 goto bail; 1045 1049 1046 - /* 1047 - * We create our own workqueue mainly because we want to be 1048 - * able to flush it when devices are being removed. We can't 1049 - * use schedule_work()/flush_scheduled_work() because both 1050 - * unregister_netdev() and linkwatch_event take the rtnl lock, 1051 - * so flush_scheduled_work() can deadlock during device 1052 - * removal. 1053 - */ 1054 - qib_wq = create_workqueue("qib"); 1055 - if (!qib_wq) { 1056 - ret = -ENOMEM; 1057 - goto bail_dev; 1058 - } 1059 - 1060 1050 qib_cq_wq = create_singlethread_workqueue("qib_cq"); 1061 1051 if (!qib_cq_wq) { 1062 1052 ret = -ENOMEM; 1063 - goto bail_wq; 1053 + goto bail_dev; 1064 1054 } 1065 1055 1066 1056 /* ··· 1076 1094 idr_destroy(&qib_unit_table); 1077 1095 bail_cq_wq: 1078 1096 destroy_workqueue(qib_cq_wq); 1079 - bail_wq: 1080 - destroy_workqueue(qib_wq); 1081 1097 bail_dev: 1082 1098 qib_dev_cleanup(); 1083 1099 bail: ··· 1099 1119 1100 1120 pci_unregister_driver(&qib_driver); 1101 1121 1102 - destroy_workqueue(qib_wq); 1103 1122 destroy_workqueue(qib_cq_wq); 1104 1123 1105 1124 qib_cpulist_count = 0; ··· 1271 1292 1272 1293 if (qib_mini_init || initfail || ret) { 1273 1294 qib_stop_timers(dd); 1274 - flush_scheduled_work(); 1295 + flush_workqueue(ib_wq); 1275 1296 for (pidx = 0; pidx < dd->num_pports; ++pidx) 1276 1297 dd->f_quiet_serdes(dd->pport + pidx); 1277 1298 if (qib_mini_init) ··· 1320 1341 1321 1342 qib_stop_timers(dd); 1322 1343 1323 - /* wait until all of our (qsfp) schedule_work() calls complete */ 1324 - flush_scheduled_work(); 1344 + /* wait until all of our (qsfp) queue_work() calls complete */ 1345 + flush_workqueue(ib_wq); 1325 1346 1326 1347 ret = qibfs_remove(dd); 1327 1348 if (ret)
+4 -5
drivers/infiniband/hw/qib/qib_qsfp.c
··· 485 485 goto bail; 486 486 /* We see a module, but it may be unwise to look yet. Just schedule */ 487 487 qd->t_insert = get_jiffies_64(); 488 - schedule_work(&qd->work); 488 + queue_work(ib_wq, &qd->work); 489 489 bail: 490 490 return; 491 491 } ··· 493 493 void qib_qsfp_deinit(struct qib_qsfp_data *qd) 494 494 { 495 495 /* 496 - * There is nothing to do here for now. our 497 - * work is scheduled with schedule_work(), and 498 - * flush_scheduled_work() from remove_one will 499 - * block until all work ssetup with schedule_work() 496 + * There is nothing to do here for now. our work is scheduled 497 + * with queue_work(), and flush_workqueue() from remove_one 498 + * will block until all work setup with queue_work() 500 499 * completes. 501 500 */ 502 501 }
+1 -2
drivers/infiniband/hw/qib/qib_verbs.h
··· 805 805 !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); 806 806 } 807 807 808 - extern struct workqueue_struct *qib_wq; 809 808 extern struct workqueue_struct *qib_cq_wq; 810 809 811 810 /* ··· 813 814 static inline void qib_schedule_send(struct qib_qp *qp) 814 815 { 815 816 if (qib_send_ok(qp)) 816 - queue_work(qib_wq, &qp->s_work); 817 + queue_work(ib_wq, &qp->s_work); 817 818 } 818 819 819 820 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
+3 -7
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 352 352 int ret; 353 353 int i; 354 354 355 - rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring); 355 + rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring); 356 356 if (!rx->rx_ring) { 357 357 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n", 358 358 priv->ca->name, ipoib_recvq_size); 359 359 return -ENOMEM; 360 360 } 361 - 362 - memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring); 363 361 364 362 t = kmalloc(sizeof *t, GFP_KERNEL); 365 363 if (!t) { ··· 1095 1097 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 1096 1098 int ret; 1097 1099 1098 - p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring); 1100 + p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring); 1099 1101 if (!p->tx_ring) { 1100 1102 ipoib_warn(priv, "failed to allocate tx ring\n"); 1101 1103 ret = -ENOMEM; 1102 1104 goto err_tx; 1103 1105 } 1104 - memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); 1105 1106 1106 1107 p->qp = ipoib_cm_create_tx_qp(p->dev, p); 1107 1108 if (IS_ERR(p->qp)) { ··· 1518 1521 return; 1519 1522 } 1520 1523 1521 - priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring); 1524 + priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring); 1522 1525 if (!priv->cm.srq_ring) { 1523 1526 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", 1524 1527 priv->ca->name, ipoib_recvq_size); ··· 1527 1530 return; 1528 1531 } 1529 1532 1530 - memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring); 1531 1533 } 1532 1534 1533 1535 int ipoib_cm_dev_init(struct net_device *dev)
+1 -2
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 916 916 goto out; 917 917 } 918 918 919 - priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 919 + priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 920 920 if (!priv->tx_ring) { 921 921 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 922 922 ca->name, ipoib_sendq_size); 923 923 goto out_rx_ring_cleanup; 924 924 } 925 - memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); 926 925 927 926 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 928 927
+9 -10
drivers/infiniband/ulp/srp/ib_srp.c
··· 638 638 if (target->state == SRP_TARGET_CONNECTING) { 639 639 target->state = SRP_TARGET_DEAD; 640 640 INIT_WORK(&target->work, srp_remove_work); 641 - schedule_work(&target->work); 641 + queue_work(ib_wq, &target->work); 642 642 } 643 643 spin_unlock_irq(&target->lock); 644 644 ··· 1132 1132 1133 1133 spin_lock_irqsave(&target->lock, flags); 1134 1134 iu = __srp_get_tx_iu(target, SRP_IU_CMD); 1135 - if (iu) { 1136 - req = list_first_entry(&target->free_reqs, struct srp_request, 1137 - list); 1138 - list_del(&req->list); 1139 - } 1140 - spin_unlock_irqrestore(&target->lock, flags); 1141 - 1142 1135 if (!iu) 1143 - goto err; 1136 + goto err_unlock; 1137 + 1138 + req = list_first_entry(&target->free_reqs, struct srp_request, list); 1139 + list_del(&req->list); 1140 + spin_unlock_irqrestore(&target->lock, flags); 1144 1141 1145 1142 dev = target->srp_host->srp_dev->dev; 1146 1143 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, ··· 1182 1185 1183 1186 spin_lock_irqsave(&target->lock, flags); 1184 1187 list_add(&req->list, &target->free_reqs); 1188 + 1189 + err_unlock: 1185 1190 spin_unlock_irqrestore(&target->lock, flags); 1186 1191 1187 1192 err: ··· 2198 2199 * started before we marked our target ports as 2199 2200 * removed, and any target port removal tasks. 2200 2201 */ 2201 - flush_scheduled_work(); 2202 + flush_workqueue(ib_wq); 2202 2203 2203 2204 list_for_each_entry_safe(target, tmp_target, 2204 2205 &host->target_list, list) {
+3 -3
drivers/net/mlx4/catas.c
··· 113 113 void mlx4_start_catas_poll(struct mlx4_dev *dev) 114 114 { 115 115 struct mlx4_priv *priv = mlx4_priv(dev); 116 - unsigned long addr; 116 + phys_addr_t addr; 117 117 118 118 INIT_LIST_HEAD(&priv->catas_err.list); 119 119 init_timer(&priv->catas_err.timer); ··· 124 124 125 125 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); 126 126 if (!priv->catas_err.map) { 127 - mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n", 128 - addr); 127 + mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n", 128 + (unsigned long long) addr); 129 129 return; 130 130 } 131 131
+2 -1
drivers/net/mlx4/en_main.c
··· 202 202 if (mlx4_uar_alloc(dev, &mdev->priv_uar)) 203 203 goto err_pd; 204 204 205 - mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 205 + mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT, 206 + PAGE_SIZE); 206 207 if (!mdev->uar_map) 207 208 goto err_uar; 208 209 spin_lock_init(&mdev->uar_lock);
+1 -1
drivers/net/mlx4/main.c
··· 829 829 goto err_uar_table_free; 830 830 } 831 831 832 - priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 832 + priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 833 833 if (!priv->kar) { 834 834 mlx4_err(dev, "Couldn't map kernel access region, " 835 835 "aborting.\n");
+13 -10
drivers/net/mlx4/mcg.c
··· 95 95 * entry in hash chain and *mgm holds end of hash chain. 96 96 */ 97 97 static int find_mgm(struct mlx4_dev *dev, 98 - u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox, 98 + u8 *gid, enum mlx4_protocol protocol, 99 + struct mlx4_cmd_mailbox *mgm_mailbox, 99 100 u16 *hash, int *prev, int *index) 100 101 { 101 102 struct mlx4_cmd_mailbox *mailbox; ··· 135 134 return err; 136 135 } 137 136 138 - if (!memcmp(mgm->gid, gid, 16)) 137 + if (!memcmp(mgm->gid, gid, 16) && 138 + be32_to_cpu(mgm->members_count) >> 30 == protocol) 139 139 return err; 140 140 141 141 *prev = *index; ··· 148 146 } 149 147 150 148 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 151 - int block_mcast_loopback) 149 + int block_mcast_loopback, enum mlx4_protocol protocol) 152 150 { 153 151 struct mlx4_priv *priv = mlx4_priv(dev); 154 152 struct mlx4_cmd_mailbox *mailbox; ··· 167 165 168 166 mutex_lock(&priv->mcg_table.mutex); 169 167 170 - err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); 168 + err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 171 169 if (err) 172 170 goto out; 173 171 ··· 189 187 memcpy(mgm->gid, gid, 16); 190 188 } 191 189 192 - members_count = be32_to_cpu(mgm->members_count); 190 + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 193 191 if (members_count == MLX4_QP_PER_MGM) { 194 192 mlx4_err(dev, "MGM at index %x is full.\n", index); 195 193 err = -ENOMEM; ··· 209 207 else 210 208 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 211 209 212 - mgm->members_count = cpu_to_be32(members_count); 210 + mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30); 213 211 214 212 err = mlx4_WRITE_MCG(dev, index, mailbox); 215 213 if (err) ··· 244 242 } 245 243 EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 246 244 247 - int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) 245 + int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 246 + enum mlx4_protocol protocol) 248 247 { 249 248 struct mlx4_priv *priv = mlx4_priv(dev); 250 249 struct mlx4_cmd_mailbox *mailbox; ··· 263 260 264 261 mutex_lock(&priv->mcg_table.mutex); 265 262 266 - err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); 263 + err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 267 264 if (err) 268 265 goto out; 269 266 ··· 273 270 goto out; 274 271 } 275 272 276 - members_count = be32_to_cpu(mgm->members_count); 273 + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 277 274 for (loc = -1, i = 0; i < members_count; ++i) 278 275 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 279 276 loc = i; ··· 285 282 } 286 283 287 284 288 - mgm->members_count = cpu_to_be32(--members_count); 285 + mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30); 289 286 mgm->qp[loc] = mgm->qp[i - 1]; 290 287 mgm->qp[i - 1] = 0; 291 288
+8 -2
include/linux/mlx4/device.h
··· 144 144 MLX4_STAT_RATE_OFFSET = 5 145 145 }; 146 146 147 + enum mlx4_protocol { 148 + MLX4_PROTOCOL_IB, 149 + MLX4_PROTOCOL_EN, 150 + }; 151 + 147 152 enum { 148 153 MLX4_MTT_FLAG_PRESENT = 1 149 154 }; ··· 505 500 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); 506 501 507 502 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 508 - int block_mcast_loopback); 509 - int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); 503 + int block_mcast_loopback, enum mlx4_protocol protocol); 504 + int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 505 + enum mlx4_protocol protocol); 510 506 511 507 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index); 512 508 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
+1 -5
include/linux/mlx4/driver.h
··· 34 34 #define MLX4_DRIVER_H 35 35 36 36 #include <linux/device.h> 37 + #include <linux/mlx4/device.h> 37 38 38 39 struct mlx4_dev; 39 40 ··· 43 42 MLX4_DEV_EVENT_PORT_UP, 44 43 MLX4_DEV_EVENT_PORT_DOWN, 45 44 MLX4_DEV_EVENT_PORT_REINIT, 46 - }; 47 - 48 - enum mlx4_protocol { 49 - MLX4_PROTOCOL_IB, 50 - MLX4_PROTOCOL_EN, 51 45 }; 52 46 53 47 struct mlx4_interface {
+3
include/rdma/ib_verbs.h
··· 47 47 #include <linux/list.h> 48 48 #include <linux/rwsem.h> 49 49 #include <linux/scatterlist.h> 50 + #include <linux/workqueue.h> 50 51 51 52 #include <asm/atomic.h> 52 53 #include <asm/uaccess.h> 54 + 55 + extern struct workqueue_struct *ib_wq; 53 56 54 57 union ib_gid { 55 58 u8 raw[16];