···3163}3164EXPORT_SYMBOL(ib_cm_init_qp_attr);31653166-static __be64 cm_get_ca_guid(struct ib_device *device)3167-{3168- struct ib_device_attr *device_attr;3169- __be64 guid;3170- int ret;3171-3172- device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);3173- if (!device_attr)3174- return 0;3175-3176- ret = ib_query_device(device, device_attr);3177- guid = ret ? 0 : device_attr->node_guid;3178- kfree(device_attr);3179- return guid;3180-}3181-3182static void cm_add_one(struct ib_device *device)3183{3184 struct cm_device *cm_dev;···3184 return;31853186 cm_dev->device = device;3187- cm_dev->ca_guid = cm_get_ca_guid(device);3188- if (!cm_dev->ca_guid)3189- goto error1;31903191 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);3192 for (i = 1; i <= device->phys_port_cnt; i++) {···3199 cm_recv_handler,3200 port);3201 if (IS_ERR(port->mad_agent))3202- goto error2;32033204 ret = ib_modify_port(device, i, 0, &port_modify);3205 if (ret)3206- goto error3;3207 }3208 ib_set_client_data(device, &cm_client, cm_dev);3209···3212 write_unlock_irqrestore(&cm.device_lock, flags);3213 return;32143215-error3:3216- ib_unregister_mad_agent(port->mad_agent);3217error2:003218 port_modify.set_port_cap_mask = 0;3219 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;3220 while (--i) {···3222 ib_modify_port(device, port->port_num, 0, &port_modify);3223 ib_unregister_mad_agent(port->mad_agent);3224 }3225-error1:3226 kfree(cm_dev);3227}3228
···3163}3164EXPORT_SYMBOL(ib_cm_init_qp_attr);316500000000000000003166static void cm_add_one(struct ib_device *device)3167{3168 struct cm_device *cm_dev;···3200 return;32013202 cm_dev->device = device;3203+ cm_dev->ca_guid = device->node_guid;0032043205 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);3206 for (i = 1; i <= device->phys_port_cnt; i++) {···3217 cm_recv_handler,3218 port);3219 if (IS_ERR(port->mad_agent))3220+ goto error1;32213222 ret = ib_modify_port(device, i, 0, &port_modify);3223 if (ret)3224+ goto error2;3225 }3226 ib_set_client_data(device, &cm_client, cm_dev);3227···3230 write_unlock_irqrestore(&cm.device_lock, flags);3231 return;3232003233error2:3234+ ib_unregister_mad_agent(port->mad_agent);3235+error1:3236 port_modify.set_port_cap_mask = 0;3237 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;3238 while (--i) {···3240 ib_modify_port(device, port->port_num, 0, &port_modify);3241 ib_unregister_mad_agent(port->mad_agent);3242 }03243 kfree(cm_dev);3244}3245
+11-12
drivers/infiniband/core/device.c
···38#include <linux/errno.h>39#include <linux/slab.h>40#include <linux/init.h>41-42-#include <asm/semaphore.h>4344#include "core_priv.h"45···56static LIST_HEAD(client_list);5758/*59- * device_sem protects access to both device_list and client_list.60 * There's no real point to using multiple locks or something fancier61 * like an rwsem: we always access both lists, and we're always62 * modifying one list or the other list. In any case this is not a63 * hot path so there's no point in trying to optimize.64 */65-static DECLARE_MUTEX(device_sem);6667static int ib_device_check_mandatory(struct ib_device *device)68{···220{221 int ret;222223- down(&device_sem);224225 if (strchr(device->name, '%')) {226 ret = alloc_name(device->name);···258 }259260 out:261- up(&device_sem);262 return ret;263}264EXPORT_SYMBOL(ib_register_device);···275 struct ib_client_data *context, *tmp;276 unsigned long flags;277278- down(&device_sem);279280 list_for_each_entry_reverse(client, &client_list, list)281 if (client->remove)···283284 list_del(&device->core_list);285286- up(&device_sem);287288 spin_lock_irqsave(&device->client_data_lock, flags);289 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)···311{312 struct ib_device *device;313314- down(&device_sem);315316 list_add_tail(&client->list, &client_list);317 list_for_each_entry(device, &device_list, core_list)318 if (client->add && !add_client_context(device, client))319 client->add(device);320321- up(&device_sem);322323 return 0;324}···338 struct ib_device *device;339 unsigned long flags;340341- down(&device_sem);342343 list_for_each_entry(device, &device_list, core_list) {344 if (client->remove)···354 }355 list_del(&client->list);356357- up(&device_sem);358}359EXPORT_SYMBOL(ib_unregister_client);360
···38#include <linux/errno.h>39#include <linux/slab.h>40#include <linux/init.h>41+#include <linux/mutex.h>04243#include "core_priv.h"44···57static LIST_HEAD(client_list);5859/*60+ * device_mutex protects access to both device_list and client_list.61 * There's no real point to using multiple locks or something fancier62 * like an rwsem: we always access both lists, and we're always63 * modifying one list or the other list. In any case this is not a64 * hot path so there's no point in trying to optimize.65 */66+static DEFINE_MUTEX(device_mutex);6768static int ib_device_check_mandatory(struct ib_device *device)69{···221{222 int ret;223224+ mutex_lock(&device_mutex);225226 if (strchr(device->name, '%')) {227 ret = alloc_name(device->name);···259 }260261 out:262+ mutex_unlock(&device_mutex);263 return ret;264}265EXPORT_SYMBOL(ib_register_device);···276 struct ib_client_data *context, *tmp;277 unsigned long flags;278279+ mutex_lock(&device_mutex);280281 list_for_each_entry_reverse(client, &client_list, list)282 if (client->remove)···284285 list_del(&device->core_list);286287+ mutex_unlock(&device_mutex);288289 spin_lock_irqsave(&device->client_data_lock, flags);290 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)···312{313 struct ib_device *device;314315+ mutex_lock(&device_mutex);316317 list_add_tail(&client->list, &client_list);318 list_for_each_entry(device, &device_list, core_list)319 if (client->add && !add_client_context(device, client))320 client->add(device);321322+ mutex_unlock(&device_mutex);323324 return 0;325}···339 struct ib_device *device;340 unsigned long flags;341342+ mutex_lock(&device_mutex);343344 list_for_each_entry(device, &device_list, core_list) {345 if (client->remove)···355 }356 list_del(&client->list);357358+ mutex_unlock(&device_mutex);359}360EXPORT_SYMBOL(ib_unregister_client);361
+5-17
drivers/infiniband/core/sysfs.c
···445 return -ENOMEM;446447 /*448- * It might be nice to pass the node GUID with the event, but449- * right now the only way to get it is to query the device450- * provider, and this can crash during device removal because451- * we are will be running after driver removal has started.452- * We could add a node_guid field to struct ib_device, or we453- * could just let userspace read the node GUID from sysfs when454- * devices are added.455 */456457 envp[i] = NULL;···617static ssize_t show_node_guid(struct class_device *cdev, char *buf)618{619 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);620- struct ib_device_attr attr;621- ssize_t ret;622623 if (!ibdev_is_alive(dev))624 return -ENODEV;625626- ret = ib_query_device(dev, &attr);627- if (ret)628- return ret;629-630 return sprintf(buf, "%04x:%04x:%04x:%04x\n",631- be16_to_cpu(((__be16 *) &attr.node_guid)[0]),632- be16_to_cpu(((__be16 *) &attr.node_guid)[1]),633- be16_to_cpu(((__be16 *) &attr.node_guid)[2]),634- be16_to_cpu(((__be16 *) &attr.node_guid)[3]));635}636637static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
···445 return -ENOMEM;446447 /*448+ * It would be nice to pass the node GUID with the event...000000449 */450451 envp[i] = NULL;···623static ssize_t show_node_guid(struct class_device *cdev, char *buf)624{625 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);00626627 if (!ibdev_is_alive(dev))628 return -ENODEV;6290000630 return sprintf(buf, "%04x:%04x:%04x:%04x\n",631+ be16_to_cpu(((__be16 *) &dev->node_guid)[0]),632+ be16_to_cpu(((__be16 *) &dev->node_guid)[1]),633+ be16_to_cpu(((__be16 *) &dev->node_guid)[2]),634+ be16_to_cpu(((__be16 *) &dev->node_guid)[3]));635}636637static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
···5253#define IPOIB_OP_RECV (1ul << 31)5455-static DECLARE_MUTEX(pkey_sem);5657struct ipoib_ah *ipoib_create_ah(struct net_device *dev,58 struct ib_pd *pd, struct ib_ah_attr *attr)···445446 /* Shutdown the P_Key thread if still active */447 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {448- down(&pkey_sem);449 set_bit(IPOIB_PKEY_STOP, &priv->flags);450 cancel_delayed_work(&priv->pkey_task);451- up(&pkey_sem);452 flush_workqueue(ipoib_workqueue);453 }454455 ipoib_mcast_stop_thread(dev, 1);456-457- /*458- * Flush the multicast groups first so we stop any multicast joins. The459- * completion thread may have already died and we may deadlock waiting460- * for the completion thread to finish some multicast joins.461- */462 ipoib_mcast_dev_flush(dev);463-464- /* Delete broadcast and local addresses since they will be recreated */465- ipoib_mcast_dev_down(dev);466467 ipoib_flush_paths(dev);468···599 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))600 ipoib_ib_dev_up(dev);601602- down(&priv->vlan_mutex);603604 /* Flush any child interfaces too */605 list_for_each_entry(cpriv, &priv->child_intfs, list)606 ipoib_ib_dev_flush(&cpriv->dev);607608- up(&priv->vlan_mutex);609}610611void ipoib_ib_dev_cleanup(struct net_device *dev)···615 ipoib_dbg(priv, "cleaning up ib_dev\n");616617 ipoib_mcast_stop_thread(dev, 1);618-619- /* Delete the broadcast address and the local address */620- ipoib_mcast_dev_down(dev);621622 ipoib_transport_dev_cleanup(dev);623}···651 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))652 ipoib_open(dev);653 else {654- down(&pkey_sem);655 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))656 queue_delayed_work(ipoib_workqueue,657 &priv->pkey_task,658 HZ);659- up(&pkey_sem);660 }661}662···670671 /* P_Key value not assigned yet - start polling */672 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {673- down(&pkey_sem);674 clear_bit(IPOIB_PKEY_STOP, &priv->flags);675 queue_delayed_work(ipoib_workqueue,676 &priv->pkey_task,677 HZ);678- up(&pkey_sem);679 return 1;680 }681
···5253#define IPOIB_OP_RECV (1ul << 31)5455+static DEFINE_MUTEX(pkey_mutex);5657struct ipoib_ah *ipoib_create_ah(struct net_device *dev,58 struct ib_pd *pd, struct ib_ah_attr *attr)···445446 /* Shutdown the P_Key thread if still active */447 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {448+ mutex_lock(&pkey_mutex);449 set_bit(IPOIB_PKEY_STOP, &priv->flags);450 cancel_delayed_work(&priv->pkey_task);451+ mutex_unlock(&pkey_mutex);452 flush_workqueue(ipoib_workqueue);453 }454455 ipoib_mcast_stop_thread(dev, 1);000000456 ipoib_mcast_dev_flush(dev);000457458 ipoib_flush_paths(dev);459···608 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))609 ipoib_ib_dev_up(dev);610611+ mutex_lock(&priv->vlan_mutex);612613 /* Flush any child interfaces too */614 list_for_each_entry(cpriv, &priv->child_intfs, list)615 ipoib_ib_dev_flush(&cpriv->dev);616617+ mutex_unlock(&priv->vlan_mutex);618}619620void ipoib_ib_dev_cleanup(struct net_device *dev)···624 ipoib_dbg(priv, "cleaning up ib_dev\n");625626 ipoib_mcast_stop_thread(dev, 1);627+ ipoib_mcast_dev_flush(dev);00628629 ipoib_transport_dev_cleanup(dev);630}···662 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))663 ipoib_open(dev);664 else {665+ mutex_lock(&pkey_mutex);666 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))667 queue_delayed_work(ipoib_workqueue,668 &priv->pkey_task,669 HZ);670+ mutex_unlock(&pkey_mutex);671 }672}673···681682 /* P_Key value not assigned yet - start polling */683 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {684+ mutex_lock(&pkey_mutex);685 clear_bit(IPOIB_PKEY_STOP, &priv->flags);686 queue_delayed_work(ipoib_workqueue,687 &priv->pkey_task,688 HZ);689+ mutex_unlock(&pkey_mutex);690 return 1;691 }692
+6-6
drivers/infiniband/ulp/ipoib/ipoib_main.c
···105 struct ipoib_dev_priv *cpriv;106107 /* Bring up any child interfaces too */108- down(&priv->vlan_mutex);109 list_for_each_entry(cpriv, &priv->child_intfs, list) {110 int flags;111···115116 dev_change_flags(cpriv->dev, flags | IFF_UP);117 }118- up(&priv->vlan_mutex);119 }120121 netif_start_queue(dev);···140 struct ipoib_dev_priv *cpriv;141142 /* Bring down any child interfaces too */143- down(&priv->vlan_mutex);144 list_for_each_entry(cpriv, &priv->child_intfs, list) {145 int flags;146···150151 dev_change_flags(cpriv->dev, flags & ~IFF_UP);152 }153- up(&priv->vlan_mutex);154 }155156 return 0;···892 spin_lock_init(&priv->lock);893 spin_lock_init(&priv->tx_lock);894895- init_MUTEX(&priv->mcast_mutex);896- init_MUTEX(&priv->vlan_mutex);897898 INIT_LIST_HEAD(&priv->path_list);899 INIT_LIST_HEAD(&priv->child_intfs);
···105 struct ipoib_dev_priv *cpriv;106107 /* Bring up any child interfaces too */108+ mutex_lock(&priv->vlan_mutex);109 list_for_each_entry(cpriv, &priv->child_intfs, list) {110 int flags;111···115116 dev_change_flags(cpriv->dev, flags | IFF_UP);117 }118+ mutex_unlock(&priv->vlan_mutex);119 }120121 netif_start_queue(dev);···140 struct ipoib_dev_priv *cpriv;141142 /* Bring down any child interfaces too */143+ mutex_lock(&priv->vlan_mutex);144 list_for_each_entry(cpriv, &priv->child_intfs, list) {145 int flags;146···150151 dev_change_flags(cpriv->dev, flags & ~IFF_UP);152 }153+ mutex_unlock(&priv->vlan_mutex);154 }155156 return 0;···892 spin_lock_init(&priv->lock);893 spin_lock_init(&priv->tx_lock);894895+ mutex_init(&priv->mcast_mutex);896+ mutex_init(&priv->vlan_mutex);897898 INIT_LIST_HEAD(&priv->path_list);899 INIT_LIST_HEAD(&priv->child_intfs);
+33-72
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
···55 "Enable multicast debug tracing if > 0");56#endif5758-static DECLARE_MUTEX(mcast_mutex);5960/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */61struct ipoib_mcast {···97 struct ipoib_dev_priv *priv = netdev_priv(dev);98 struct ipoib_neigh *neigh, *tmp;99 unsigned long flags;100- LIST_HEAD(ah_list);101- struct ipoib_ah *ah, *tah;102103 ipoib_dbg_mcast(netdev_priv(dev),104 "deleting multicast group " IPOIB_GID_FMT "\n",···105 spin_lock_irqsave(&priv->lock, flags);106107 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {000000108 if (neigh->ah)109- list_add_tail(&neigh->ah->list, &ah_list);110 *to_ipoib_neigh(neigh->neighbour) = NULL;111 neigh->neighbour->ops->destructor = NULL;112 kfree(neigh);113 }114115 spin_unlock_irqrestore(&priv->lock, flags);116-117- list_for_each_entry_safe(ah, tah, &ah_list, list)118- ipoib_put_ah(ah);119120 if (mcast->ah)121 ipoib_put_ah(mcast->ah);···385386 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {387 mcast->backoff = 1;388- down(&mcast_mutex);389 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))390 queue_work(ipoib_workqueue, &priv->mcast_task);391- up(&mcast_mutex);392 complete(&mcast->done);393 return;394 }···418419 mcast->query = NULL;420421- down(&mcast_mutex);422 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {423 if (status == -ETIMEDOUT)424 queue_work(ipoib_workqueue, &priv->mcast_task);···427 mcast->backoff * HZ);428 } else429 complete(&mcast->done);430- up(&mcast_mutex);431432 return;433}···482 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)483 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;484485- down(&mcast_mutex);486 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))487 queue_delayed_work(ipoib_workqueue,488 &priv->mcast_task,489 mcast->backoff * HZ);490- up(&mcast_mutex);491 } else492 mcast->query_id = ret;493}···520 priv->broadcast = ipoib_mcast_alloc(dev, 1);521 if (!priv->broadcast) {522 ipoib_warn(priv, "failed to allocate broadcast group\n");523- down(&mcast_mutex);524 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))525 queue_delayed_work(ipoib_workqueue,526 &priv->mcast_task, HZ);527- up(&mcast_mutex);528 return;529 }530···580581 ipoib_dbg_mcast(priv, "starting multicast thread\n");582583- down(&mcast_mutex);584 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))585 queue_work(ipoib_workqueue, &priv->mcast_task);586- up(&mcast_mutex);587588 return 0;589}···595596 ipoib_dbg_mcast(priv, "stopping multicast thread\n");597598- down(&mcast_mutex);599 clear_bit(IPOIB_MCAST_RUN, &priv->flags);600 cancel_delayed_work(&priv->mcast_task);601- up(&mcast_mutex);602603 if (flush)604 flush_workqueue(ipoib_workqueue);···742{743 struct ipoib_dev_priv *priv = netdev_priv(dev);744 LIST_HEAD(remove_list);745- struct ipoib_mcast *mcast, *tmcast, *nmcast;746 unsigned long flags;747748 ipoib_dbg_mcast(priv, "flushing multicast list\n");749750 spin_lock_irqsave(&priv->lock, flags);0751 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {752- nmcast = ipoib_mcast_alloc(dev, 0);753- if (nmcast) {754- nmcast->flags =755- mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY);756-757- nmcast->mcmember.mgid = mcast->mcmember.mgid;758-759- /* Add the new group in before the to-be-destroyed group */760- list_add_tail(&nmcast->list, &mcast->list);761- list_del_init(&mcast->list);762-763- rb_replace_node(&mcast->rb_node, &nmcast->rb_node,764- &priv->multicast_tree);765-766- list_add_tail(&mcast->list, &remove_list);767- } else {768- ipoib_warn(priv, "could not reallocate multicast group "769- IPOIB_GID_FMT "\n",770- IPOIB_GID_ARG(mcast->mcmember.mgid));771- }772 }773774 if (priv->broadcast) {775- nmcast = ipoib_mcast_alloc(dev, 0);776- if (nmcast) {777- nmcast->mcmember.mgid = priv->broadcast->mcmember.mgid;778-779- rb_replace_node(&priv->broadcast->rb_node,780- &nmcast->rb_node,781- &priv->multicast_tree);782-783- list_add_tail(&priv->broadcast->list, &remove_list);784- }785-786- priv->broadcast = nmcast;787 }788789 spin_unlock_irqrestore(&priv->lock, flags);···766 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {767 ipoib_mcast_leave(dev, mcast);768 ipoib_mcast_free(mcast);769- }770-}771-772-void ipoib_mcast_dev_down(struct net_device *dev)773-{774- struct ipoib_dev_priv *priv = netdev_priv(dev);775- unsigned long flags;776-777- /* Delete broadcast since it will be recreated */778- if (priv->broadcast) {779- ipoib_dbg_mcast(priv, "deleting broadcast group\n");780-781- spin_lock_irqsave(&priv->lock, flags);782- rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);783- spin_unlock_irqrestore(&priv->lock, flags);784- ipoib_mcast_leave(dev, priv->broadcast);785- ipoib_mcast_free(priv->broadcast);786- priv->broadcast = NULL;787 }788}789···782783 ipoib_mcast_stop_thread(dev, 0);784785- spin_lock_irqsave(&priv->lock, flags);0786787 /*788 * Unfortunately, the networking core only gives us a list of all of···855 list_add_tail(&mcast->list, &remove_list);856 }857 }858- spin_unlock_irqrestore(&priv->lock, flags);00859860 /* We have to cancel outside of the spinlock */861 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
···55 "Enable multicast debug tracing if > 0");56#endif5758+static DEFINE_MUTEX(mcast_mutex);5960/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */61struct ipoib_mcast {···97 struct ipoib_dev_priv *priv = netdev_priv(dev);98 struct ipoib_neigh *neigh, *tmp;99 unsigned long flags;00100101 ipoib_dbg_mcast(netdev_priv(dev),102 "deleting multicast group " IPOIB_GID_FMT "\n",···107 spin_lock_irqsave(&priv->lock, flags);108109 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {110+ /*111+ * It's safe to call ipoib_put_ah() inside priv->lock112+ * here, because we know that mcast->ah will always113+ * hold one more reference, so ipoib_put_ah() will114+ * never do more than decrement the ref count.115+ */116 if (neigh->ah)117+ ipoib_put_ah(neigh->ah);118 *to_ipoib_neigh(neigh->neighbour) = NULL;119 neigh->neighbour->ops->destructor = NULL;120 kfree(neigh);121 }122123 spin_unlock_irqrestore(&priv->lock, flags);000124125 if (mcast->ah)126 ipoib_put_ah(mcast->ah);···384385 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {386 mcast->backoff = 1;387+ mutex_lock(&mcast_mutex);388 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))389 queue_work(ipoib_workqueue, &priv->mcast_task);390+ mutex_unlock(&mcast_mutex);391 complete(&mcast->done);392 return;393 }···417418 mcast->query = NULL;419420+ mutex_lock(&mcast_mutex);421 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {422 if (status == -ETIMEDOUT)423 queue_work(ipoib_workqueue, &priv->mcast_task);···426 mcast->backoff * HZ);427 } else428 complete(&mcast->done);429+ mutex_unlock(&mcast_mutex);430431 return;432}···481 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)482 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;483484+ mutex_lock(&mcast_mutex);485 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))486 queue_delayed_work(ipoib_workqueue,487 &priv->mcast_task,488 mcast->backoff * HZ);489+ mutex_unlock(&mcast_mutex);490 } else491 mcast->query_id = ret;492}···519 priv->broadcast = ipoib_mcast_alloc(dev, 1);520 if (!priv->broadcast) {521 ipoib_warn(priv, "failed to allocate broadcast group\n");522+ mutex_lock(&mcast_mutex);523 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))524 queue_delayed_work(ipoib_workqueue,525 &priv->mcast_task, HZ);526+ mutex_unlock(&mcast_mutex);527 return;528 }529···579580 ipoib_dbg_mcast(priv, "starting multicast thread\n");581582+ mutex_lock(&mcast_mutex);583 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))584 queue_work(ipoib_workqueue, &priv->mcast_task);585+ mutex_unlock(&mcast_mutex);586587 return 0;588}···594595 ipoib_dbg_mcast(priv, "stopping multicast thread\n");596597+ mutex_lock(&mcast_mutex);598 clear_bit(IPOIB_MCAST_RUN, &priv->flags);599 cancel_delayed_work(&priv->mcast_task);600+ mutex_unlock(&mcast_mutex);601602 if (flush)603 flush_workqueue(ipoib_workqueue);···741{742 struct ipoib_dev_priv *priv = netdev_priv(dev);743 LIST_HEAD(remove_list);744+ struct ipoib_mcast *mcast, *tmcast;745 unsigned long flags;746747 ipoib_dbg_mcast(priv, "flushing multicast list\n");748749 spin_lock_irqsave(&priv->lock, flags);750+751 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {752+ list_del(&mcast->list);753+ rb_erase(&mcast->rb_node, &priv->multicast_tree);754+ list_add_tail(&mcast->list, &remove_list);00000000000000000755 }756757 if (priv->broadcast) {758+ rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);759+ list_add_tail(&priv->broadcast->list, &remove_list);760+ priv->broadcast = NULL;000000000761 }762763 spin_unlock_irqrestore(&priv->lock, flags);···790 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {791 ipoib_mcast_leave(dev, mcast);792 ipoib_mcast_free(mcast);000000000000000000793 }794}795···824825 ipoib_mcast_stop_thread(dev, 0);826827+ spin_lock_irqsave(&dev->xmit_lock, flags);828+ spin_lock(&priv->lock);829830 /*831 * Unfortunately, the networking core only gives us a list of all of···896 list_add_tail(&mcast->list, &remove_list);897 }898 }899+900+ spin_unlock(&priv->lock);901+ spin_unlock_irqrestore(&dev->xmit_lock, flags);902903 /* We have to cancel outside of the spinlock */904 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
+4-4
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
···65 }6667 /* attach QP to multicast group */68- down(&priv->mcast_mutex);69 ret = ib_attach_mcast(priv->qp, mgid, mlid);70- up(&priv->mcast_mutex);71 if (ret)72 ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret);73···81 struct ipoib_dev_priv *priv = netdev_priv(dev);82 int ret;8384- down(&priv->mcast_mutex);85 ret = ib_detach_mcast(priv->qp, mgid, mlid);86- up(&priv->mcast_mutex);87 if (ret)88 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);89
···65 }6667 /* attach QP to multicast group */68+ mutex_lock(&priv->mcast_mutex);69 ret = ib_attach_mcast(priv->qp, mgid, mlid);70+ mutex_unlock(&priv->mcast_mutex);71 if (ret)72 ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret);73···81 struct ipoib_dev_priv *priv = netdev_priv(dev);82 int ret;8384+ mutex_lock(&priv->mcast_mutex);85 ret = ib_detach_mcast(priv->qp, mgid, mlid);86+ mutex_unlock(&priv->mcast_mutex);87 if (ret)88 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);89
+5-5
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
···6364 ppriv = netdev_priv(pdev);6566- down(&ppriv->vlan_mutex);6768 /*69 * First ensure this isn't a duplicate. We check the parent device and···124125 list_add_tail(&priv->list, &ppriv->child_intfs);126127- up(&ppriv->vlan_mutex);128129 return 0;130···139 free_netdev(priv->dev);140141err:142- up(&ppriv->vlan_mutex);143 return result;144}145···153154 ppriv = netdev_priv(pdev);155156- down(&ppriv->vlan_mutex);157 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {158 if (priv->pkey == pkey) {159 unregister_netdev(priv->dev);···167 break;168 }169 }170- up(&ppriv->vlan_mutex);171172 return ret;173}
···6364 ppriv = netdev_priv(pdev);6566+ mutex_lock(&ppriv->vlan_mutex);6768 /*69 * First ensure this isn't a duplicate. We check the parent device and···124125 list_add_tail(&priv->list, &ppriv->child_intfs);126127+ mutex_unlock(&ppriv->vlan_mutex);128129 return 0;130···139 free_netdev(priv->dev);140141err:142+ mutex_unlock(&ppriv->vlan_mutex);143 return result;144}145···153154 ppriv = netdev_priv(pdev);155156+ mutex_lock(&ppriv->vlan_mutex);157 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {158 if (priv->pkey == pkey) {159 unregister_netdev(priv->dev);···167 break;168 }169 }170+ mutex_unlock(&ppriv->vlan_mutex);171172 return ret;173}