···31633163}31643164EXPORT_SYMBOL(ib_cm_init_qp_attr);3165316531663166-static __be64 cm_get_ca_guid(struct ib_device *device)31673167-{31683168- struct ib_device_attr *device_attr;31693169- __be64 guid;31703170- int ret;31713171-31723172- device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);31733173- if (!device_attr)31743174- return 0;31753175-31763176- ret = ib_query_device(device, device_attr);31773177- guid = ret ? 0 : device_attr->node_guid;31783178- kfree(device_attr);31793179- return guid;31803180-}31813181-31823166static void cm_add_one(struct ib_device *device)31833167{31843168 struct cm_device *cm_dev;···31843200 return;3185320131863202 cm_dev->device = device;31873187- cm_dev->ca_guid = cm_get_ca_guid(device);31883188- if (!cm_dev->ca_guid)31893189- goto error1;32033203+ cm_dev->ca_guid = device->node_guid;3190320431913205 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);31923206 for (i = 1; i <= device->phys_port_cnt; i++) {···31993217 cm_recv_handler,32003218 port);32013219 if (IS_ERR(port->mad_agent))32023202- goto error2;32203220+ goto error1;3203322132043222 ret = ib_modify_port(device, i, 0, &port_modify);32053223 if (ret)32063206- goto error3;32243224+ goto error2;32073225 }32083226 ib_set_client_data(device, &cm_client, cm_dev);32093227···32123230 write_unlock_irqrestore(&cm.device_lock, flags);32133231 return;3214323232153215-error3:32163216- ib_unregister_mad_agent(port->mad_agent);32173233error2:32343234+ ib_unregister_mad_agent(port->mad_agent);32353235+error1:32183236 port_modify.set_port_cap_mask = 0;32193237 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;32203238 while (--i) {···32223240 ib_modify_port(device, port->port_num, 0, &port_modify);32233241 ib_unregister_mad_agent(port->mad_agent);32243242 }32253225-error1:32263243 kfree(cm_dev);32273244}32283245
+11-12
drivers/infiniband/core/device.c
···3838#include <linux/errno.h>3939#include <linux/slab.h>4040#include <linux/init.h>4141-4242-#include <asm/semaphore.h>4141+#include <linux/mutex.h>43424443#include "core_priv.h"4544···5657static LIST_HEAD(client_list);57585859/*5959- * device_sem protects access to both device_list and client_list.6060+ * device_mutex protects access to both device_list and client_list.6061 * There's no real point to using multiple locks or something fancier6162 * like an rwsem: we always access both lists, and we're always6263 * modifying one list or the other list. In any case this is not a6364 * hot path so there's no point in trying to optimize.6465 */6565-static DECLARE_MUTEX(device_sem);6666+static DEFINE_MUTEX(device_mutex);66676768static int ib_device_check_mandatory(struct ib_device *device)6869{···220221{221222 int ret;222223223223- down(&device_sem);224224+ mutex_lock(&device_mutex);224225225226 if (strchr(device->name, '%')) {226227 ret = alloc_name(device->name);···258259 }259260260261 out:261261- up(&device_sem);262262+ mutex_unlock(&device_mutex);262263 return ret;263264}264265EXPORT_SYMBOL(ib_register_device);···275276 struct ib_client_data *context, *tmp;276277 unsigned long flags;277278278278- down(&device_sem);279279+ mutex_lock(&device_mutex);279280280281 list_for_each_entry_reverse(client, &client_list, list)281282 if (client->remove)···283284284285 list_del(&device->core_list);285286286286- up(&device_sem);287287+ mutex_unlock(&device_mutex);287288288289 spin_lock_irqsave(&device->client_data_lock, flags);289290 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)···311312{312313 struct ib_device *device;313314314314- down(&device_sem);315315+ mutex_lock(&device_mutex);315316316317 list_add_tail(&client->list, &client_list);317318 list_for_each_entry(device, &device_list, core_list)318319 if (client->add && !add_client_context(device, client))319320 client->add(device);320321321321- up(&device_sem);322322+ mutex_unlock(&device_mutex);322323323324 return 0;324325}···338339 struct ib_device *device;339340 unsigned long flags;340341341341- down(&device_sem);342342+ mutex_lock(&device_mutex);342343343344 list_for_each_entry(device, &device_list, core_list) {344345 if (client->remove)···354355 }355356 list_del(&client->list);356357357357- up(&device_sem);358358+ mutex_unlock(&device_mutex);358359}359360EXPORT_SYMBOL(ib_unregister_client);360361
+5-17
drivers/infiniband/core/sysfs.c
···445445 return -ENOMEM;446446447447 /*448448- * It might be nice to pass the node GUID with the event, but449449- * right now the only way to get it is to query the device450450- * provider, and this can crash during device removal because451451- * we are will be running after driver removal has started.452452- * We could add a node_guid field to struct ib_device, or we453453- * could just let userspace read the node GUID from sysfs when454454- * devices are added.448448+ * It would be nice to pass the node GUID with the event...455449 */456450457451 envp[i] = NULL;···617623static ssize_t show_node_guid(struct class_device *cdev, char *buf)618624{619625 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);620620- struct ib_device_attr attr;621621- ssize_t ret;622626623627 if (!ibdev_is_alive(dev))624628 return -ENODEV;625629626626- ret = ib_query_device(dev, &attr);627627- if (ret)628628- return ret;629629-630630 return sprintf(buf, "%04x:%04x:%04x:%04x\n",631631- be16_to_cpu(((__be16 *) &attr.node_guid)[0]),632632- be16_to_cpu(((__be16 *) &attr.node_guid)[1]),633633- be16_to_cpu(((__be16 *) &attr.node_guid)[2]),634634- be16_to_cpu(((__be16 *) &attr.node_guid)[3]));631631+ be16_to_cpu(((__be16 *) &dev->node_guid)[0]),632632+ be16_to_cpu(((__be16 *) &dev->node_guid)[1]),633633+ be16_to_cpu(((__be16 *) &dev->node_guid)[2]),634634+ be16_to_cpu(((__be16 *) &dev->node_guid)[3]));635635}636636637637static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
···4545enum {4646 MTHCA_NUM_ASYNC_EQE = 0x80,4747 MTHCA_NUM_CMD_EQE = 0x80,4848+ MTHCA_NUM_SPARE_EQE = 0x80,4849 MTHCA_EQ_ENTRY_SIZE = 0x204950};5051···278277{279278 struct mthca_eqe *eqe;280279 int disarm_cqn;281281- int eqes_found = 0;280280+ int eqes_found = 0;281281+ int set_ci = 0;282282283283 while ((eqe = next_eqe_sw(eq))) {284284- int set_ci = 0;285285-286284 /*287285 * Make sure we read EQ entry contents after we've288286 * checked the ownership bit.···345345 be16_to_cpu(eqe->event.cmd.token),346346 eqe->event.cmd.status,347347 be64_to_cpu(eqe->event.cmd.out_param));348348- /*349349- * cmd_event() may add more commands.350350- * The card will think the queue has overflowed if351351- * we don't tell it we've been processing events.352352- */353353- set_ci = 1;354348 break;355349356350 case MTHCA_EVENT_TYPE_PORT_CHANGE:···379385 set_eqe_hw(eqe);380386 ++eq->cons_index;381387 eqes_found = 1;388388+ ++set_ci;382389383383- if (unlikely(set_ci)) {390390+ /*391391+ * The HCA will think the queue has overflowed if we392392+ * don't tell it we've been processing events. We393393+ * create our EQs with MTHCA_NUM_SPARE_EQE extra394394+ * entries, so we must update our consumer index at395395+ * least that often.396396+ */397397+ if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {384398 /*385399 * Conditional on hca_type is OK here because386400 * this is a rare case, not the fast path.···864862 intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?865863 128 : dev->eq_table.inta_pin;866864867867- err = mthca_create_eq(dev, dev->limits.num_cqs,865865+ err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,868866 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,869867 &dev->eq_table.eq[MTHCA_EQ_COMP]);870868 if (err)871869 goto err_out_unmap;872870873873- err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE,871871+ err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,874872 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,875873 &dev->eq_table.eq[MTHCA_EQ_ASYNC]);876874 if (err)877875 goto err_out_comp;878876879879- err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE,877877+ err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,880878 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,881879 &dev->eq_table.eq[MTHCA_EQ_CMD]);882880 if (err)
···52525353#define IPOIB_OP_RECV (1ul << 31)54545555-static DECLARE_MUTEX(pkey_sem);5555+static DEFINE_MUTEX(pkey_mutex);56565757struct ipoib_ah *ipoib_create_ah(struct net_device *dev,5858 struct ib_pd *pd, struct ib_ah_attr *attr)···445445446446 /* Shutdown the P_Key thread if still active */447447 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {448448- down(&pkey_sem);448448+ mutex_lock(&pkey_mutex);449449 set_bit(IPOIB_PKEY_STOP, &priv->flags);450450 cancel_delayed_work(&priv->pkey_task);451451- up(&pkey_sem);451451+ mutex_unlock(&pkey_mutex);452452 flush_workqueue(ipoib_workqueue);453453 }454454455455 ipoib_mcast_stop_thread(dev, 1);456456-457457- /*458458- * Flush the multicast groups first so we stop any multicast joins. The459459- * completion thread may have already died and we may deadlock waiting460460- * for the completion thread to finish some multicast joins.461461- */462456 ipoib_mcast_dev_flush(dev);463463-464464- /* Delete broadcast and local addresses since they will be recreated */465465- ipoib_mcast_dev_down(dev);466457467458 ipoib_flush_paths(dev);468459···599608 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))600609 ipoib_ib_dev_up(dev);601610602602- down(&priv->vlan_mutex);611611+ mutex_lock(&priv->vlan_mutex);603612604613 /* Flush any child interfaces too */605614 list_for_each_entry(cpriv, &priv->child_intfs, list)606615 ipoib_ib_dev_flush(&cpriv->dev);607616608608- up(&priv->vlan_mutex);617617+ mutex_unlock(&priv->vlan_mutex);609618}610619611620void ipoib_ib_dev_cleanup(struct net_device *dev)···615624 ipoib_dbg(priv, "cleaning up ib_dev\n");616625617626 ipoib_mcast_stop_thread(dev, 1);618618-619619- /* Delete the broadcast address and the local address */620620- ipoib_mcast_dev_down(dev);627627+ ipoib_mcast_dev_flush(dev);621628622629 ipoib_transport_dev_cleanup(dev);623630}···651662 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))652663 ipoib_open(dev);653664 else {654654- down(&pkey_sem);665665+ mutex_lock(&pkey_mutex);655666 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))656667 queue_delayed_work(ipoib_workqueue,657668 &priv->pkey_task,658669 HZ);659659- up(&pkey_sem);670670+ mutex_unlock(&pkey_mutex);660671 }661672}662673···670681671682 /* P_Key value not assigned yet - start polling */672683 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {673673- down(&pkey_sem);684684+ mutex_lock(&pkey_mutex);674685 clear_bit(IPOIB_PKEY_STOP, &priv->flags);675686 queue_delayed_work(ipoib_workqueue,676687 &priv->pkey_task,677688 HZ);678678- up(&pkey_sem);689689+ mutex_unlock(&pkey_mutex);679690 return 1;680691 }681692
+6-6
drivers/infiniband/ulp/ipoib/ipoib_main.c
···105105 struct ipoib_dev_priv *cpriv;106106107107 /* Bring up any child interfaces too */108108- down(&priv->vlan_mutex);108108+ mutex_lock(&priv->vlan_mutex);109109 list_for_each_entry(cpriv, &priv->child_intfs, list) {110110 int flags;111111···115115116116 dev_change_flags(cpriv->dev, flags | IFF_UP);117117 }118118- up(&priv->vlan_mutex);118118+ mutex_unlock(&priv->vlan_mutex);119119 }120120121121 netif_start_queue(dev);···140140 struct ipoib_dev_priv *cpriv;141141142142 /* Bring down any child interfaces too */143143- down(&priv->vlan_mutex);143143+ mutex_lock(&priv->vlan_mutex);144144 list_for_each_entry(cpriv, &priv->child_intfs, list) {145145 int flags;146146···150150151151 dev_change_flags(cpriv->dev, flags & ~IFF_UP);152152 }153153- up(&priv->vlan_mutex);153153+ mutex_unlock(&priv->vlan_mutex);154154 }155155156156 return 0;···892892 spin_lock_init(&priv->lock);893893 spin_lock_init(&priv->tx_lock);894894895895- init_MUTEX(&priv->mcast_mutex);896896- init_MUTEX(&priv->vlan_mutex);895895+ mutex_init(&priv->mcast_mutex);896896+ mutex_init(&priv->vlan_mutex);897897898898 INIT_LIST_HEAD(&priv->path_list);899899 INIT_LIST_HEAD(&priv->child_intfs);
+33-72
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
···5555 "Enable multicast debug tracing if > 0");5656#endif57575858-static DECLARE_MUTEX(mcast_mutex);5858+static DEFINE_MUTEX(mcast_mutex);59596060/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */6161struct ipoib_mcast {···9797 struct ipoib_dev_priv *priv = netdev_priv(dev);9898 struct ipoib_neigh *neigh, *tmp;9999 unsigned long flags;100100- LIST_HEAD(ah_list);101101- struct ipoib_ah *ah, *tah;102100103101 ipoib_dbg_mcast(netdev_priv(dev),104102 "deleting multicast group " IPOIB_GID_FMT "\n",···105107 spin_lock_irqsave(&priv->lock, flags);106108107109 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {110110+ /*111111+ * It's safe to call ipoib_put_ah() inside priv->lock112112+ * here, because we know that mcast->ah will always113113+ * hold one more reference, so ipoib_put_ah() will114114+ * never do more than decrement the ref count.115115+ */108116 if (neigh->ah)109109- list_add_tail(&neigh->ah->list, &ah_list);117117+ ipoib_put_ah(neigh->ah);110118 *to_ipoib_neigh(neigh->neighbour) = NULL;111119 neigh->neighbour->ops->destructor = NULL;112120 kfree(neigh);113121 }114122115123 spin_unlock_irqrestore(&priv->lock, flags);116116-117117- list_for_each_entry_safe(ah, tah, &ah_list, list)118118- ipoib_put_ah(ah);119124120125 if (mcast->ah)121126 ipoib_put_ah(mcast->ah);···385384386385 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {387386 mcast->backoff = 1;388388- down(&mcast_mutex);387387+ mutex_lock(&mcast_mutex);389388 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))390389 queue_work(ipoib_workqueue, &priv->mcast_task);391391- up(&mcast_mutex);390390+ mutex_unlock(&mcast_mutex);392391 complete(&mcast->done);393392 return;394393 }···418417419418 mcast->query = NULL;420419421421- down(&mcast_mutex);420420+ mutex_lock(&mcast_mutex);422421 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {423422 if (status == -ETIMEDOUT)424423 queue_work(ipoib_workqueue, &priv->mcast_task);···427426 mcast->backoff * HZ);428427 } else429428 complete(&mcast->done);430430- up(&mcast_mutex);429429+ mutex_unlock(&mcast_mutex);431430432431 return;433432}···482481 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)483482 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;484483485485- down(&mcast_mutex);484484+ mutex_lock(&mcast_mutex);486485 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))487486 queue_delayed_work(ipoib_workqueue,488487 &priv->mcast_task,489488 mcast->backoff * HZ);490490- up(&mcast_mutex);489489+ mutex_unlock(&mcast_mutex);491490 } else492491 mcast->query_id = ret;493492}···520519 priv->broadcast = ipoib_mcast_alloc(dev, 1);521520 if (!priv->broadcast) {522521 ipoib_warn(priv, "failed to allocate broadcast group\n");523523- down(&mcast_mutex);522522+ mutex_lock(&mcast_mutex);524523 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))525524 queue_delayed_work(ipoib_workqueue,526525 &priv->mcast_task, HZ);527527- up(&mcast_mutex);526526+ mutex_unlock(&mcast_mutex);528527 return;529528 }530529···580579581580 ipoib_dbg_mcast(priv, "starting multicast thread\n");582581583583- down(&mcast_mutex);582582+ mutex_lock(&mcast_mutex);584583 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))585584 queue_work(ipoib_workqueue, &priv->mcast_task);586586- up(&mcast_mutex);585585+ mutex_unlock(&mcast_mutex);587586588587 return 0;589588}···595594596595 ipoib_dbg_mcast(priv, "stopping multicast thread\n");597596598598- down(&mcast_mutex);597597+ mutex_lock(&mcast_mutex);599598 clear_bit(IPOIB_MCAST_RUN, &priv->flags);600599 cancel_delayed_work(&priv->mcast_task);601601- up(&mcast_mutex);600600+ mutex_unlock(&mcast_mutex);602601603602 if (flush)604603 flush_workqueue(ipoib_workqueue);···742741{743742 struct ipoib_dev_priv *priv = netdev_priv(dev);744743 LIST_HEAD(remove_list);745745- struct ipoib_mcast *mcast, *tmcast, *nmcast;744744+ struct ipoib_mcast *mcast, *tmcast;746745 unsigned long flags;747746748747 ipoib_dbg_mcast(priv, "flushing multicast list\n");749748750749 spin_lock_irqsave(&priv->lock, flags);750750+751751 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {752752- nmcast = ipoib_mcast_alloc(dev, 0);753753- if (nmcast) {754754- nmcast->flags =755755- mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY);756756-757757- nmcast->mcmember.mgid = mcast->mcmember.mgid;758758-759759- /* Add the new group in before the to-be-destroyed group */760760- list_add_tail(&nmcast->list, &mcast->list);761761- list_del_init(&mcast->list);762762-763763- rb_replace_node(&mcast->rb_node, &nmcast->rb_node,764764- &priv->multicast_tree);765765-766766- list_add_tail(&mcast->list, &remove_list);767767- } else {768768- ipoib_warn(priv, "could not reallocate multicast group "769769- IPOIB_GID_FMT "\n",770770- IPOIB_GID_ARG(mcast->mcmember.mgid));771771- }752752+ list_del(&mcast->list);753753+ rb_erase(&mcast->rb_node, &priv->multicast_tree);754754+ list_add_tail(&mcast->list, &remove_list);772755 }773756774757 if (priv->broadcast) {775775- nmcast = ipoib_mcast_alloc(dev, 0);776776- if (nmcast) {777777- nmcast->mcmember.mgid = priv->broadcast->mcmember.mgid;778778-779779- rb_replace_node(&priv->broadcast->rb_node,780780- &nmcast->rb_node,781781- &priv->multicast_tree);782782-783783- list_add_tail(&priv->broadcast->list, &remove_list);784784- }785785-786786- priv->broadcast = nmcast;758758+ rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);759759+ list_add_tail(&priv->broadcast->list, &remove_list);760760+ priv->broadcast = NULL;787761 }788762789763 spin_unlock_irqrestore(&priv->lock, flags);···766790 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {767791 ipoib_mcast_leave(dev, mcast);768792 ipoib_mcast_free(mcast);769769- }770770-}771771-772772-void ipoib_mcast_dev_down(struct net_device *dev)773773-{774774- struct ipoib_dev_priv *priv = netdev_priv(dev);775775- unsigned long flags;776776-777777- /* Delete broadcast since it will be recreated */778778- if (priv->broadcast) {779779- ipoib_dbg_mcast(priv, "deleting broadcast group\n");780780-781781- spin_lock_irqsave(&priv->lock, flags);782782- rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);783783- spin_unlock_irqrestore(&priv->lock, flags);784784- ipoib_mcast_leave(dev, priv->broadcast);785785- ipoib_mcast_free(priv->broadcast);786786- priv->broadcast = NULL;787793 }788794}789795···782824783825 ipoib_mcast_stop_thread(dev, 0);784826785785- spin_lock_irqsave(&priv->lock, flags);827827+ spin_lock_irqsave(&dev->xmit_lock, flags);828828+ spin_lock(&priv->lock);786829787830 /*788831 * Unfortunately, the networking core only gives us a list of all of···855896 list_add_tail(&mcast->list, &remove_list);856897 }857898 }858858- spin_unlock_irqrestore(&priv->lock, flags);899899+900900+ spin_unlock(&priv->lock);901901+ spin_unlock_irqrestore(&dev->xmit_lock, flags);859902860903 /* We have to cancel outside of the spinlock */861904 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
+4-4
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
···6565 }66666767 /* attach QP to multicast group */6868- down(&priv->mcast_mutex);6868+ mutex_lock(&priv->mcast_mutex);6969 ret = ib_attach_mcast(priv->qp, mgid, mlid);7070- up(&priv->mcast_mutex);7070+ mutex_unlock(&priv->mcast_mutex);7171 if (ret)7272 ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret);7373···8181 struct ipoib_dev_priv *priv = netdev_priv(dev);8282 int ret;83838484- down(&priv->mcast_mutex);8484+ mutex_lock(&priv->mcast_mutex);8585 ret = ib_detach_mcast(priv->qp, mgid, mlid);8686- up(&priv->mcast_mutex);8686+ mutex_unlock(&priv->mcast_mutex);8787 if (ret)8888 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);8989
+5-5
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
···63636464 ppriv = netdev_priv(pdev);65656666- down(&ppriv->vlan_mutex);6666+ mutex_lock(&ppriv->vlan_mutex);67676868 /*6969 * First ensure this isn't a duplicate. We check the parent device and···124124125125 list_add_tail(&priv->list, &ppriv->child_intfs);126126127127- up(&ppriv->vlan_mutex);127127+ mutex_unlock(&ppriv->vlan_mutex);128128129129 return 0;130130···139139 free_netdev(priv->dev);140140141141err:142142- up(&ppriv->vlan_mutex);142142+ mutex_unlock(&ppriv->vlan_mutex);143143 return result;144144}145145···153153154154 ppriv = netdev_priv(pdev);155155156156- down(&ppriv->vlan_mutex);156156+ mutex_lock(&ppriv->vlan_mutex);157157 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {158158 if (priv->pkey == pkey) {159159 unregister_netdev(priv->dev);···167167 break;168168 }169169 }170170- up(&ppriv->vlan_mutex);170170+ mutex_unlock(&ppriv->vlan_mutex);171171172172 return ret;173173}