IB: convert from semaphores to mutexes

semaphore to mutex conversion by Ingo and Arjan's script.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
[ Sanity-checked on real IB hardware ]
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Ingo Molnar and committed by Roland Dreier 95ed644f 9eacee2a

+145 -144
+11 -12
drivers/infiniband/core/device.c
··· 38 #include <linux/errno.h> 39 #include <linux/slab.h> 40 #include <linux/init.h> 41 - 42 - #include <asm/semaphore.h> 43 44 #include "core_priv.h" 45 ··· 56 static LIST_HEAD(client_list); 57 58 /* 59 - * device_sem protects access to both device_list and client_list. 60 * There's no real point to using multiple locks or something fancier 61 * like an rwsem: we always access both lists, and we're always 62 * modifying one list or the other list. In any case this is not a 63 * hot path so there's no point in trying to optimize. 64 */ 65 - static DECLARE_MUTEX(device_sem); 66 67 static int ib_device_check_mandatory(struct ib_device *device) 68 { ··· 220 { 221 int ret; 222 223 - down(&device_sem); 224 225 if (strchr(device->name, '%')) { 226 ret = alloc_name(device->name); ··· 258 } 259 260 out: 261 - up(&device_sem); 262 return ret; 263 } 264 EXPORT_SYMBOL(ib_register_device); ··· 275 struct ib_client_data *context, *tmp; 276 unsigned long flags; 277 278 - down(&device_sem); 279 280 list_for_each_entry_reverse(client, &client_list, list) 281 if (client->remove) ··· 283 284 list_del(&device->core_list); 285 286 - up(&device_sem); 287 288 spin_lock_irqsave(&device->client_data_lock, flags); 289 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) ··· 311 { 312 struct ib_device *device; 313 314 - down(&device_sem); 315 316 list_add_tail(&client->list, &client_list); 317 list_for_each_entry(device, &device_list, core_list) 318 if (client->add && !add_client_context(device, client)) 319 client->add(device); 320 321 - up(&device_sem); 322 323 return 0; 324 } ··· 338 struct ib_device *device; 339 unsigned long flags; 340 341 - down(&device_sem); 342 343 list_for_each_entry(device, &device_list, core_list) { 344 if (client->remove) ··· 354 } 355 list_del(&client->list); 356 357 - up(&device_sem); 358 } 359 EXPORT_SYMBOL(ib_unregister_client); 360
··· 38 #include <linux/errno.h> 39 #include <linux/slab.h> 40 #include <linux/init.h> 41 + #include <linux/mutex.h> 42 43 #include "core_priv.h" 44 ··· 57 static LIST_HEAD(client_list); 58 59 /* 60 + * device_mutex protects access to both device_list and client_list. 61 * There's no real point to using multiple locks or something fancier 62 * like an rwsem: we always access both lists, and we're always 63 * modifying one list or the other list. In any case this is not a 64 * hot path so there's no point in trying to optimize. 65 */ 66 + static DEFINE_MUTEX(device_mutex); 67 68 static int ib_device_check_mandatory(struct ib_device *device) 69 { ··· 221 { 222 int ret; 223 224 + mutex_lock(&device_mutex); 225 226 if (strchr(device->name, '%')) { 227 ret = alloc_name(device->name); ··· 259 } 260 261 out: 262 + mutex_unlock(&device_mutex); 263 return ret; 264 } 265 EXPORT_SYMBOL(ib_register_device); ··· 276 struct ib_client_data *context, *tmp; 277 unsigned long flags; 278 279 + mutex_lock(&device_mutex); 280 281 list_for_each_entry_reverse(client, &client_list, list) 282 if (client->remove) ··· 284 285 list_del(&device->core_list); 286 287 + mutex_unlock(&device_mutex); 288 289 spin_lock_irqsave(&device->client_data_lock, flags); 290 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) ··· 312 { 313 struct ib_device *device; 314 315 + mutex_lock(&device_mutex); 316 317 list_add_tail(&client->list, &client_list); 318 list_for_each_entry(device, &device_list, core_list) 319 if (client->add && !add_client_context(device, client)) 320 client->add(device); 321 322 + mutex_unlock(&device_mutex); 323 324 return 0; 325 } ··· 339 struct ib_device *device; 340 unsigned long flags; 341 342 + mutex_lock(&device_mutex); 343 344 list_for_each_entry(device, &device_list, core_list) { 345 if (client->remove) ··· 355 } 356 list_del(&client->list); 357 358 + mutex_unlock(&device_mutex); 359 } 360 EXPORT_SYMBOL(ib_unregister_client); 361
+12 -11
drivers/infiniband/core/ucm.c
··· 42 #include <linux/mount.h> 43 #include <linux/cdev.h> 44 #include <linux/idr.h> 45 46 #include <asm/uaccess.h> 47 ··· 114 .remove = ib_ucm_remove_one 115 }; 116 117 - static DECLARE_MUTEX(ctx_id_mutex); 118 static DEFINE_IDR(ctx_id_table); 119 static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES); 120 ··· 122 { 123 struct ib_ucm_context *ctx; 124 125 - down(&ctx_id_mutex); 126 ctx = idr_find(&ctx_id_table, id); 127 if (!ctx) 128 ctx = ERR_PTR(-ENOENT); ··· 130 ctx = ERR_PTR(-EINVAL); 131 else 132 atomic_inc(&ctx->ref); 133 - up(&ctx_id_mutex); 134 135 return ctx; 136 } ··· 187 if (!result) 188 goto error; 189 190 - down(&ctx_id_mutex); 191 result = idr_get_new(&ctx_id_table, ctx, &ctx->id); 192 - up(&ctx_id_mutex); 193 } while (result == -EAGAIN); 194 195 if (result) ··· 551 err2: 552 ib_destroy_cm_id(ctx->cm_id); 553 err1: 554 - down(&ctx_id_mutex); 555 idr_remove(&ctx_id_table, ctx->id); 556 - up(&ctx_id_mutex); 557 kfree(ctx); 558 return result; 559 } ··· 573 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 574 return -EFAULT; 575 576 - down(&ctx_id_mutex); 577 ctx = idr_find(&ctx_id_table, cmd.id); 578 if (!ctx) 579 ctx = ERR_PTR(-ENOENT); ··· 581 ctx = ERR_PTR(-EINVAL); 582 else 583 idr_remove(&ctx_id_table, ctx->id); 584 - up(&ctx_id_mutex); 585 586 if (IS_ERR(ctx)) 587 return PTR_ERR(ctx); ··· 1281 struct ib_ucm_context, file_list); 1282 up(&file->mutex); 1283 1284 - down(&ctx_id_mutex); 1285 idr_remove(&ctx_id_table, ctx->id); 1286 - up(&ctx_id_mutex); 1287 1288 ib_destroy_cm_id(ctx->cm_id); 1289 ib_ucm_cleanup_events(ctx);
··· 42 #include <linux/mount.h> 43 #include <linux/cdev.h> 44 #include <linux/idr.h> 45 + #include <linux/mutex.h> 46 47 #include <asm/uaccess.h> 48 ··· 113 .remove = ib_ucm_remove_one 114 }; 115 116 + static DEFINE_MUTEX(ctx_id_mutex); 117 static DEFINE_IDR(ctx_id_table); 118 static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES); 119 ··· 121 { 122 struct ib_ucm_context *ctx; 123 124 + mutex_lock(&ctx_id_mutex); 125 ctx = idr_find(&ctx_id_table, id); 126 if (!ctx) 127 ctx = ERR_PTR(-ENOENT); ··· 129 ctx = ERR_PTR(-EINVAL); 130 else 131 atomic_inc(&ctx->ref); 132 + mutex_unlock(&ctx_id_mutex); 133 134 return ctx; 135 } ··· 186 if (!result) 187 goto error; 188 189 + mutex_lock(&ctx_id_mutex); 190 result = idr_get_new(&ctx_id_table, ctx, &ctx->id); 191 + mutex_unlock(&ctx_id_mutex); 192 } while (result == -EAGAIN); 193 194 if (result) ··· 550 err2: 551 ib_destroy_cm_id(ctx->cm_id); 552 err1: 553 + mutex_lock(&ctx_id_mutex); 554 idr_remove(&ctx_id_table, ctx->id); 555 + mutex_unlock(&ctx_id_mutex); 556 kfree(ctx); 557 return result; 558 } ··· 572 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 573 return -EFAULT; 574 575 + mutex_lock(&ctx_id_mutex); 576 ctx = idr_find(&ctx_id_table, cmd.id); 577 if (!ctx) 578 ctx = ERR_PTR(-ENOENT); ··· 580 ctx = ERR_PTR(-EINVAL); 581 else 582 idr_remove(&ctx_id_table, ctx->id); 583 + mutex_unlock(&ctx_id_mutex); 584 585 if (IS_ERR(ctx)) 586 return PTR_ERR(ctx); ··· 1280 struct ib_ucm_context, file_list); 1281 up(&file->mutex); 1282 1283 + mutex_lock(&ctx_id_mutex); 1284 idr_remove(&ctx_id_table, ctx->id); 1285 + mutex_unlock(&ctx_id_mutex); 1286 1287 ib_destroy_cm_id(ctx->cm_id); 1288 ib_ucm_cleanup_events(ctx);
+3 -2
drivers/infiniband/core/uverbs.h
··· 41 42 #include <linux/kref.h> 43 #include <linux/idr.h> 44 45 #include <rdma/ib_verbs.h> 46 #include <rdma/ib_user_verbs.h> ··· 89 90 struct ib_uverbs_file { 91 struct kref ref; 92 - struct semaphore mutex; 93 struct ib_uverbs_device *device; 94 struct ib_ucontext *ucontext; 95 struct ib_event_handler event_handler; ··· 132 u32 async_events_reported; 133 }; 134 135 - extern struct semaphore ib_uverbs_idr_mutex; 136 extern struct idr ib_uverbs_pd_idr; 137 extern struct idr ib_uverbs_mr_idr; 138 extern struct idr ib_uverbs_mw_idr;
··· 41 42 #include <linux/kref.h> 43 #include <linux/idr.h> 44 + #include <linux/mutex.h> 45 46 #include <rdma/ib_verbs.h> 47 #include <rdma/ib_user_verbs.h> ··· 88 89 struct ib_uverbs_file { 90 struct kref ref; 91 + struct mutex mutex; 92 struct ib_uverbs_device *device; 93 struct ib_ucontext *ucontext; 94 struct ib_event_handler event_handler; ··· 131 u32 async_events_reported; 132 }; 133 134 + extern struct mutex ib_uverbs_idr_mutex; 135 extern struct idr ib_uverbs_pd_idr; 136 extern struct idr ib_uverbs_mr_idr; 137 extern struct idr ib_uverbs_mw_idr;
+75 -75
drivers/infiniband/core/uverbs_cmd.c
··· 67 if (copy_from_user(&cmd, buf, sizeof cmd)) 68 return -EFAULT; 69 70 - down(&file->mutex); 71 72 if (file->ucontext) { 73 ret = -EINVAL; ··· 119 120 fd_install(resp.async_fd, filp); 121 122 - up(&file->mutex); 123 124 return in_len; 125 ··· 131 ibdev->dealloc_ucontext(ucontext); 132 133 err: 134 - up(&file->mutex); 135 return ret; 136 } 137 ··· 290 pd->uobject = uobj; 291 atomic_set(&pd->usecnt, 0); 292 293 - down(&ib_uverbs_idr_mutex); 294 295 retry: 296 if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) { ··· 314 goto err_idr; 315 } 316 317 - down(&file->mutex); 318 list_add_tail(&uobj->list, &file->ucontext->pd_list); 319 - up(&file->mutex); 320 321 - up(&ib_uverbs_idr_mutex); 322 323 return in_len; 324 ··· 326 idr_remove(&ib_uverbs_pd_idr, uobj->id); 327 328 err_up: 329 - up(&ib_uverbs_idr_mutex); 330 ib_dealloc_pd(pd); 331 332 err: ··· 346 if (copy_from_user(&cmd, buf, sizeof cmd)) 347 return -EFAULT; 348 349 - down(&ib_uverbs_idr_mutex); 350 351 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 352 if (!pd || pd->uobject->context != file->ucontext) ··· 360 361 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle); 362 363 - down(&file->mutex); 364 list_del(&uobj->list); 365 - up(&file->mutex); 366 367 kfree(uobj); 368 369 out: 370 - up(&ib_uverbs_idr_mutex); 371 372 return ret ? ret : in_len; 373 } ··· 426 427 obj->umem.virt_base = cmd.hca_va; 428 429 - down(&ib_uverbs_idr_mutex); 430 431 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 432 if (!pd || pd->uobject->context != file->ucontext) { ··· 476 goto err_idr; 477 } 478 479 - down(&file->mutex); 480 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); 481 - up(&file->mutex); 482 483 - up(&ib_uverbs_idr_mutex); 484 485 return in_len; 486 ··· 492 atomic_dec(&pd->usecnt); 493 494 err_up: 495 - up(&ib_uverbs_idr_mutex); 496 497 ib_umem_release(file->device->ib_dev, &obj->umem); 498 ··· 513 if (copy_from_user(&cmd, buf, sizeof cmd)) 514 return -EFAULT; 515 516 - down(&ib_uverbs_idr_mutex); 517 518 mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle); 519 if (!mr || mr->uobject->context != file->ucontext) ··· 527 528 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle); 529 530 - down(&file->mutex); 531 list_del(&memobj->uobject.list); 532 - up(&file->mutex); 533 534 ib_umem_release(file->device->ib_dev, &memobj->umem); 535 kfree(memobj); 536 537 out: 538 - up(&ib_uverbs_idr_mutex); 539 540 return ret ? ret : in_len; 541 } ··· 628 cq->cq_context = ev_file; 629 atomic_set(&cq->usecnt, 0); 630 631 - down(&ib_uverbs_idr_mutex); 632 633 retry: 634 if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) { ··· 653 goto err_idr; 654 } 655 656 - down(&file->mutex); 657 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); 658 - up(&file->mutex); 659 660 - up(&ib_uverbs_idr_mutex); 661 662 return in_len; 663 ··· 665 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); 666 667 err_up: 668 - up(&ib_uverbs_idr_mutex); 669 ib_destroy_cq(cq); 670 671 err: ··· 701 goto out_wc; 702 } 703 704 - down(&ib_uverbs_idr_mutex); 705 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 706 if (!cq || cq->uobject->context != file->ucontext) { 707 ret = -EINVAL; ··· 731 ret = -EFAULT; 732 733 out: 734 - up(&ib_uverbs_idr_mutex); 735 kfree(resp); 736 737 out_wc: ··· 750 if (copy_from_user(&cmd, buf, sizeof cmd)) 751 return -EFAULT; 752 753 - down(&ib_uverbs_idr_mutex); 754 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 755 if (cq && cq->uobject->context == file->ucontext) { 756 ib_req_notify_cq(cq, cmd.solicited_only ? 757 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 758 ret = in_len; 759 } 760 - up(&ib_uverbs_idr_mutex); 761 762 return ret; 763 } ··· 779 780 memset(&resp, 0, sizeof resp); 781 782 - down(&ib_uverbs_idr_mutex); 783 784 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 785 if (!cq || cq->uobject->context != file->ucontext) ··· 795 796 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 797 798 - down(&file->mutex); 799 list_del(&uobj->uobject.list); 800 - up(&file->mutex); 801 802 ib_uverbs_release_ucq(file, ev_file, uobj); 803 ··· 811 ret = -EFAULT; 812 813 out: 814 - up(&ib_uverbs_idr_mutex); 815 816 return ret ? ret : in_len; 817 } ··· 845 if (!uobj) 846 return -ENOMEM; 847 848 - down(&ib_uverbs_idr_mutex); 849 850 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 851 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle); ··· 930 goto err_idr; 931 } 932 933 - down(&file->mutex); 934 list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list); 935 - up(&file->mutex); 936 937 - up(&ib_uverbs_idr_mutex); 938 939 return in_len; 940 ··· 950 atomic_dec(&attr.srq->usecnt); 951 952 err_up: 953 - up(&ib_uverbs_idr_mutex); 954 955 kfree(uobj); 956 return ret; ··· 972 if (!attr) 973 return -ENOMEM; 974 975 - down(&ib_uverbs_idr_mutex); 976 977 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 978 if (!qp || qp->uobject->context != file->ucontext) { ··· 1033 ret = in_len; 1034 1035 out: 1036 - up(&ib_uverbs_idr_mutex); 1037 kfree(attr); 1038 1039 return ret; ··· 1054 1055 memset(&resp, 0, sizeof resp); 1056 1057 - down(&ib_uverbs_idr_mutex); 1058 1059 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1060 if (!qp || qp->uobject->context != file->ucontext) ··· 1073 1074 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 1075 1076 - down(&file->mutex); 1077 list_del(&uobj->uevent.uobject.list); 1078 - up(&file->mutex); 1079 1080 ib_uverbs_release_uevent(file, &uobj->uevent); 1081 ··· 1088 ret = -EFAULT; 1089 1090 out: 1091 - up(&ib_uverbs_idr_mutex); 1092 1093 return ret ? ret : in_len; 1094 } ··· 1119 if (!user_wr) 1120 return -ENOMEM; 1121 1122 - down(&ib_uverbs_idr_mutex); 1123 1124 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1125 if (!qp || qp->uobject->context != file->ucontext) ··· 1224 ret = -EFAULT; 1225 1226 out: 1227 - up(&ib_uverbs_idr_mutex); 1228 1229 while (wr) { 1230 next = wr->next; ··· 1341 if (IS_ERR(wr)) 1342 return PTR_ERR(wr); 1343 1344 - down(&ib_uverbs_idr_mutex); 1345 1346 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1347 if (!qp || qp->uobject->context != file->ucontext) ··· 1362 ret = -EFAULT; 1363 1364 out: 1365 - up(&ib_uverbs_idr_mutex); 1366 1367 while (wr) { 1368 next = wr->next; ··· 1392 if (IS_ERR(wr)) 1393 return PTR_ERR(wr); 1394 1395 - down(&ib_uverbs_idr_mutex); 1396 1397 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1398 if (!srq || srq->uobject->context != file->ucontext) ··· 1413 ret = -EFAULT; 1414 1415 out: 1416 - up(&ib_uverbs_idr_mutex); 1417 1418 while (wr) { 1419 next = wr->next; ··· 1446 if (!uobj) 1447 return -ENOMEM; 1448 1449 - down(&ib_uverbs_idr_mutex); 1450 1451 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1452 if (!pd || pd->uobject->context != file->ucontext) { ··· 1498 goto err_idr; 1499 } 1500 1501 - down(&file->mutex); 1502 list_add_tail(&uobj->list, &file->ucontext->ah_list); 1503 - up(&file->mutex); 1504 1505 - up(&ib_uverbs_idr_mutex); 1506 1507 return in_len; 1508 ··· 1513 ib_destroy_ah(ah); 1514 1515 err_up: 1516 - up(&ib_uverbs_idr_mutex); 1517 1518 kfree(uobj); 1519 return ret; ··· 1530 if (copy_from_user(&cmd, buf, sizeof cmd)) 1531 return -EFAULT; 1532 1533 - down(&ib_uverbs_idr_mutex); 1534 1535 ah = idr_find(&ib_uverbs_ah_idr, cmd.ah_handle); 1536 if (!ah || ah->uobject->context != file->ucontext) ··· 1544 1545 idr_remove(&ib_uverbs_ah_idr, cmd.ah_handle); 1546 1547 - down(&file->mutex); 1548 list_del(&uobj->list); 1549 - up(&file->mutex); 1550 1551 kfree(uobj); 1552 1553 out: 1554 - up(&ib_uverbs_idr_mutex); 1555 1556 return ret ? ret : in_len; 1557 } ··· 1569 if (copy_from_user(&cmd, buf, sizeof cmd)) 1570 return -EFAULT; 1571 1572 - down(&ib_uverbs_idr_mutex); 1573 1574 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1575 if (!qp || qp->uobject->context != file->ucontext) ··· 1602 kfree(mcast); 1603 1604 out: 1605 - up(&ib_uverbs_idr_mutex); 1606 1607 return ret ? ret : in_len; 1608 } ··· 1620 if (copy_from_user(&cmd, buf, sizeof cmd)) 1621 return -EFAULT; 1622 1623 - down(&ib_uverbs_idr_mutex); 1624 1625 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1626 if (!qp || qp->uobject->context != file->ucontext) ··· 1641 } 1642 1643 out: 1644 - up(&ib_uverbs_idr_mutex); 1645 1646 return ret ? ret : in_len; 1647 } ··· 1673 if (!uobj) 1674 return -ENOMEM; 1675 1676 - down(&ib_uverbs_idr_mutex); 1677 1678 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1679 ··· 1730 goto err_idr; 1731 } 1732 1733 - down(&file->mutex); 1734 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); 1735 - up(&file->mutex); 1736 1737 - up(&ib_uverbs_idr_mutex); 1738 1739 return in_len; 1740 ··· 1746 atomic_dec(&pd->usecnt); 1747 1748 err_up: 1749 - up(&ib_uverbs_idr_mutex); 1750 1751 kfree(uobj); 1752 return ret; ··· 1764 if (copy_from_user(&cmd, buf, sizeof cmd)) 1765 return -EFAULT; 1766 1767 - down(&ib_uverbs_idr_mutex); 1768 1769 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1770 if (!srq || srq->uobject->context != file->ucontext) { ··· 1778 ret = ib_modify_srq(srq, &attr, cmd.attr_mask); 1779 1780 out: 1781 - up(&ib_uverbs_idr_mutex); 1782 1783 return ret ? ret : in_len; 1784 } ··· 1796 if (copy_from_user(&cmd, buf, sizeof cmd)) 1797 return -EFAULT; 1798 1799 - down(&ib_uverbs_idr_mutex); 1800 1801 memset(&resp, 0, sizeof resp); 1802 ··· 1812 1813 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1814 1815 - down(&file->mutex); 1816 list_del(&uobj->uobject.list); 1817 - up(&file->mutex); 1818 1819 ib_uverbs_release_uevent(file, uobj); 1820 ··· 1827 ret = -EFAULT; 1828 1829 out: 1830 - up(&ib_uverbs_idr_mutex); 1831 1832 return ret ? ret : in_len; 1833 }
··· 67 if (copy_from_user(&cmd, buf, sizeof cmd)) 68 return -EFAULT; 69 70 + mutex_lock(&file->mutex); 71 72 if (file->ucontext) { 73 ret = -EINVAL; ··· 119 120 fd_install(resp.async_fd, filp); 121 122 + mutex_unlock(&file->mutex); 123 124 return in_len; 125 ··· 131 ibdev->dealloc_ucontext(ucontext); 132 133 err: 134 + mutex_unlock(&file->mutex); 135 return ret; 136 } 137 ··· 290 pd->uobject = uobj; 291 atomic_set(&pd->usecnt, 0); 292 293 + mutex_lock(&ib_uverbs_idr_mutex); 294 295 retry: 296 if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) { ··· 314 goto err_idr; 315 } 316 317 + mutex_lock(&file->mutex); 318 list_add_tail(&uobj->list, &file->ucontext->pd_list); 319 + mutex_unlock(&file->mutex); 320 321 + mutex_unlock(&ib_uverbs_idr_mutex); 322 323 return in_len; 324 ··· 326 idr_remove(&ib_uverbs_pd_idr, uobj->id); 327 328 err_up: 329 + mutex_unlock(&ib_uverbs_idr_mutex); 330 ib_dealloc_pd(pd); 331 332 err: ··· 346 if (copy_from_user(&cmd, buf, sizeof cmd)) 347 return -EFAULT; 348 349 + mutex_lock(&ib_uverbs_idr_mutex); 350 351 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 352 if (!pd || pd->uobject->context != file->ucontext) ··· 360 361 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle); 362 363 + mutex_lock(&file->mutex); 364 list_del(&uobj->list); 365 + mutex_unlock(&file->mutex); 366 367 kfree(uobj); 368 369 out: 370 + mutex_unlock(&ib_uverbs_idr_mutex); 371 372 return ret ? ret : in_len; 373 } ··· 426 427 obj->umem.virt_base = cmd.hca_va; 428 429 + mutex_lock(&ib_uverbs_idr_mutex); 430 431 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 432 if (!pd || pd->uobject->context != file->ucontext) { ··· 476 goto err_idr; 477 } 478 479 + mutex_lock(&file->mutex); 480 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); 481 + mutex_unlock(&file->mutex); 482 483 + mutex_unlock(&ib_uverbs_idr_mutex); 484 485 return in_len; 486 ··· 492 atomic_dec(&pd->usecnt); 493 494 err_up: 495 + mutex_unlock(&ib_uverbs_idr_mutex); 496 497 ib_umem_release(file->device->ib_dev, &obj->umem); 498 ··· 513 if (copy_from_user(&cmd, buf, sizeof cmd)) 514 return -EFAULT; 515 516 + mutex_lock(&ib_uverbs_idr_mutex); 517 518 mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle); 519 if (!mr || mr->uobject->context != file->ucontext) ··· 527 528 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle); 529 530 + mutex_lock(&file->mutex); 531 list_del(&memobj->uobject.list); 532 + mutex_unlock(&file->mutex); 533 534 ib_umem_release(file->device->ib_dev, &memobj->umem); 535 kfree(memobj); 536 537 out: 538 + mutex_unlock(&ib_uverbs_idr_mutex); 539 540 return ret ? ret : in_len; 541 } ··· 628 cq->cq_context = ev_file; 629 atomic_set(&cq->usecnt, 0); 630 631 + mutex_lock(&ib_uverbs_idr_mutex); 632 633 retry: 634 if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) { ··· 653 goto err_idr; 654 } 655 656 + mutex_lock(&file->mutex); 657 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); 658 + mutex_unlock(&file->mutex); 659 660 + mutex_unlock(&ib_uverbs_idr_mutex); 661 662 return in_len; 663 ··· 665 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); 666 667 err_up: 668 + mutex_unlock(&ib_uverbs_idr_mutex); 669 ib_destroy_cq(cq); 670 671 err: ··· 701 goto out_wc; 702 } 703 704 + mutex_lock(&ib_uverbs_idr_mutex); 705 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 706 if (!cq || cq->uobject->context != file->ucontext) { 707 ret = -EINVAL; ··· 731 ret = -EFAULT; 732 733 out: 734 + mutex_unlock(&ib_uverbs_idr_mutex); 735 kfree(resp); 736 737 out_wc: ··· 750 if (copy_from_user(&cmd, buf, sizeof cmd)) 751 return -EFAULT; 752 753 + mutex_lock(&ib_uverbs_idr_mutex); 754 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 755 if (cq && cq->uobject->context == file->ucontext) { 756 ib_req_notify_cq(cq, cmd.solicited_only ? 757 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 758 ret = in_len; 759 } 760 + mutex_unlock(&ib_uverbs_idr_mutex); 761 762 return ret; 763 } ··· 779 780 memset(&resp, 0, sizeof resp); 781 782 + mutex_lock(&ib_uverbs_idr_mutex); 783 784 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 785 if (!cq || cq->uobject->context != file->ucontext) ··· 795 796 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 797 798 + mutex_lock(&file->mutex); 799 list_del(&uobj->uobject.list); 800 + mutex_unlock(&file->mutex); 801 802 ib_uverbs_release_ucq(file, ev_file, uobj); 803 ··· 811 ret = -EFAULT; 812 813 out: 814 + mutex_unlock(&ib_uverbs_idr_mutex); 815 816 return ret ? ret : in_len; 817 } ··· 845 if (!uobj) 846 return -ENOMEM; 847 848 + mutex_lock(&ib_uverbs_idr_mutex); 849 850 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 851 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle); ··· 930 goto err_idr; 931 } 932 933 + mutex_lock(&file->mutex); 934 list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list); 935 + mutex_unlock(&file->mutex); 936 937 + mutex_unlock(&ib_uverbs_idr_mutex); 938 939 return in_len; 940 ··· 950 atomic_dec(&attr.srq->usecnt); 951 952 err_up: 953 + mutex_unlock(&ib_uverbs_idr_mutex); 954 955 kfree(uobj); 956 return ret; ··· 972 if (!attr) 973 return -ENOMEM; 974 975 + mutex_lock(&ib_uverbs_idr_mutex); 976 977 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 978 if (!qp || qp->uobject->context != file->ucontext) { ··· 1033 ret = in_len; 1034 1035 out: 1036 + mutex_unlock(&ib_uverbs_idr_mutex); 1037 kfree(attr); 1038 1039 return ret; ··· 1054 1055 memset(&resp, 0, sizeof resp); 1056 1057 + mutex_lock(&ib_uverbs_idr_mutex); 1058 1059 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1060 if (!qp || qp->uobject->context != file->ucontext) ··· 1073 1074 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 1075 1076 + mutex_lock(&file->mutex); 1077 list_del(&uobj->uevent.uobject.list); 1078 + mutex_unlock(&file->mutex); 1079 1080 ib_uverbs_release_uevent(file, &uobj->uevent); 1081 ··· 1088 ret = -EFAULT; 1089 1090 out: 1091 + mutex_unlock(&ib_uverbs_idr_mutex); 1092 1093 return ret ? ret : in_len; 1094 } ··· 1119 if (!user_wr) 1120 return -ENOMEM; 1121 1122 + mutex_lock(&ib_uverbs_idr_mutex); 1123 1124 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1125 if (!qp || qp->uobject->context != file->ucontext) ··· 1224 ret = -EFAULT; 1225 1226 out: 1227 + mutex_unlock(&ib_uverbs_idr_mutex); 1228 1229 while (wr) { 1230 next = wr->next; ··· 1341 if (IS_ERR(wr)) 1342 return PTR_ERR(wr); 1343 1344 + mutex_lock(&ib_uverbs_idr_mutex); 1345 1346 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1347 if (!qp || qp->uobject->context != file->ucontext) ··· 1362 ret = -EFAULT; 1363 1364 out: 1365 + mutex_unlock(&ib_uverbs_idr_mutex); 1366 1367 while (wr) { 1368 next = wr->next; ··· 1392 if (IS_ERR(wr)) 1393 return PTR_ERR(wr); 1394 1395 + mutex_lock(&ib_uverbs_idr_mutex); 1396 1397 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1398 if (!srq || srq->uobject->context != file->ucontext) ··· 1413 ret = -EFAULT; 1414 1415 out: 1416 + mutex_unlock(&ib_uverbs_idr_mutex); 1417 1418 while (wr) { 1419 next = wr->next; ··· 1446 if (!uobj) 1447 return -ENOMEM; 1448 1449 + mutex_lock(&ib_uverbs_idr_mutex); 1450 1451 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1452 if (!pd || pd->uobject->context != file->ucontext) { ··· 1498 goto err_idr; 1499 } 1500 1501 + mutex_lock(&file->mutex); 1502 list_add_tail(&uobj->list, &file->ucontext->ah_list); 1503 + mutex_unlock(&file->mutex); 1504 1505 + mutex_unlock(&ib_uverbs_idr_mutex); 1506 1507 return in_len; 1508 ··· 1513 ib_destroy_ah(ah); 1514 1515 err_up: 1516 + mutex_unlock(&ib_uverbs_idr_mutex); 1517 1518 kfree(uobj); 1519 return ret; ··· 1530 if (copy_from_user(&cmd, buf, sizeof cmd)) 1531 return -EFAULT; 1532 1533 + mutex_lock(&ib_uverbs_idr_mutex); 1534 1535 ah = idr_find(&ib_uverbs_ah_idr, cmd.ah_handle); 1536 if (!ah || ah->uobject->context != file->ucontext) ··· 1544 1545 idr_remove(&ib_uverbs_ah_idr, cmd.ah_handle); 1546 1547 + mutex_lock(&file->mutex); 1548 list_del(&uobj->list); 1549 + mutex_unlock(&file->mutex); 1550 1551 kfree(uobj); 1552 1553 out: 1554 + mutex_unlock(&ib_uverbs_idr_mutex); 1555 1556 return ret ? ret : in_len; 1557 } ··· 1569 if (copy_from_user(&cmd, buf, sizeof cmd)) 1570 return -EFAULT; 1571 1572 + mutex_lock(&ib_uverbs_idr_mutex); 1573 1574 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1575 if (!qp || qp->uobject->context != file->ucontext) ··· 1602 kfree(mcast); 1603 1604 out: 1605 + mutex_unlock(&ib_uverbs_idr_mutex); 1606 1607 return ret ? ret : in_len; 1608 } ··· 1620 if (copy_from_user(&cmd, buf, sizeof cmd)) 1621 return -EFAULT; 1622 1623 + mutex_lock(&ib_uverbs_idr_mutex); 1624 1625 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1626 if (!qp || qp->uobject->context != file->ucontext) ··· 1641 } 1642 1643 out: 1644 + mutex_unlock(&ib_uverbs_idr_mutex); 1645 1646 return ret ? ret : in_len; 1647 } ··· 1673 if (!uobj) 1674 return -ENOMEM; 1675 1676 + mutex_lock(&ib_uverbs_idr_mutex); 1677 1678 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1679 ··· 1730 goto err_idr; 1731 } 1732 1733 + mutex_lock(&file->mutex); 1734 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); 1735 + mutex_unlock(&file->mutex); 1736 1737 + mutex_unlock(&ib_uverbs_idr_mutex); 1738 1739 return in_len; 1740 ··· 1746 atomic_dec(&pd->usecnt); 1747 1748 err_up: 1749 + mutex_unlock(&ib_uverbs_idr_mutex); 1750 1751 kfree(uobj); 1752 return ret; ··· 1764 if (copy_from_user(&cmd, buf, sizeof cmd)) 1765 return -EFAULT; 1766 1767 + mutex_lock(&ib_uverbs_idr_mutex); 1768 1769 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1770 if (!srq || srq->uobject->context != file->ucontext) { ··· 1778 ret = ib_modify_srq(srq, &attr, cmd.attr_mask); 1779 1780 out: 1781 + mutex_unlock(&ib_uverbs_idr_mutex); 1782 1783 return ret ? ret : in_len; 1784 } ··· 1796 if (copy_from_user(&cmd, buf, sizeof cmd)) 1797 return -EFAULT; 1798 1799 + mutex_lock(&ib_uverbs_idr_mutex); 1800 1801 memset(&resp, 0, sizeof resp); 1802 ··· 1812 1813 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1814 1815 + mutex_lock(&file->mutex); 1816 list_del(&uobj->uobject.list); 1817 + mutex_unlock(&file->mutex); 1818 1819 ib_uverbs_release_uevent(file, uobj); 1820 ··· 1827 ret = -EFAULT; 1828 1829 out: 1830 + mutex_unlock(&ib_uverbs_idr_mutex); 1831 1832 return ret ? ret : in_len; 1833 }
+4 -4
drivers/infiniband/core/uverbs_main.c
··· 66 67 static struct class *uverbs_class; 68 69 - DECLARE_MUTEX(ib_uverbs_idr_mutex); 70 DEFINE_IDR(ib_uverbs_pd_idr); 71 DEFINE_IDR(ib_uverbs_mr_idr); 72 DEFINE_IDR(ib_uverbs_mw_idr); ··· 180 if (!context) 181 return 0; 182 183 - down(&ib_uverbs_idr_mutex); 184 185 list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) { 186 struct ib_ah *ah = idr_find(&ib_uverbs_ah_idr, uobj->id); ··· 250 kfree(uobj); 251 } 252 253 - up(&ib_uverbs_idr_mutex); 254 255 return context->device->dealloc_ucontext(context); 256 } ··· 653 file->ucontext = NULL; 654 file->async_file = NULL; 655 kref_init(&file->ref); 656 - init_MUTEX(&file->mutex); 657 658 filp->private_data = file; 659
··· 66 67 static struct class *uverbs_class; 68 69 + DEFINE_MUTEX(ib_uverbs_idr_mutex); 70 DEFINE_IDR(ib_uverbs_pd_idr); 71 DEFINE_IDR(ib_uverbs_mr_idr); 72 DEFINE_IDR(ib_uverbs_mw_idr); ··· 180 if (!context) 181 return 0; 182 183 + mutex_lock(&ib_uverbs_idr_mutex); 184 185 list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) { 186 struct ib_ah *ah = idr_find(&ib_uverbs_ah_idr, uobj->id); ··· 250 kfree(uobj); 251 } 252 253 + mutex_unlock(&ib_uverbs_idr_mutex); 254 255 return context->device->dealloc_ucontext(context); 256 } ··· 653 file->ucontext = NULL; 654 file->async_file = NULL; 655 kref_init(&file->ref); 656 + mutex_init(&file->mutex); 657 658 filp->private_data = file; 659
+3 -3
drivers/infiniband/ulp/ipoib/ipoib.h
··· 45 #include <linux/config.h> 46 #include <linux/kref.h> 47 #include <linux/if_infiniband.h> 48 49 #include <net/neighbour.h> 50 51 #include <asm/atomic.h> 52 - #include <asm/semaphore.h> 53 54 #include <rdma/ib_verbs.h> 55 #include <rdma/ib_pack.h> ··· 123 124 unsigned long flags; 125 126 - struct semaphore mcast_mutex; 127 - struct semaphore vlan_mutex; 128 129 struct rb_root path_tree; 130 struct list_head path_list;
··· 45 #include <linux/config.h> 46 #include <linux/kref.h> 47 #include <linux/if_infiniband.h> 48 + #include <linux/mutex.h> 49 50 #include <net/neighbour.h> 51 52 #include <asm/atomic.h> 53 54 #include <rdma/ib_verbs.h> 55 #include <rdma/ib_pack.h> ··· 123 124 unsigned long flags; 125 126 + struct mutex mcast_mutex; 127 + struct mutex vlan_mutex; 128 129 struct rb_root path_tree; 130 struct list_head path_list;
+9 -9
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 52 53 #define IPOIB_OP_RECV (1ul << 31) 54 55 - static DECLARE_MUTEX(pkey_sem); 56 57 struct ipoib_ah *ipoib_create_ah(struct net_device *dev, 58 struct ib_pd *pd, struct ib_ah_attr *attr) ··· 445 446 /* Shutdown the P_Key thread if still active */ 447 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 448 - down(&pkey_sem); 449 set_bit(IPOIB_PKEY_STOP, &priv->flags); 450 cancel_delayed_work(&priv->pkey_task); 451 - up(&pkey_sem); 452 flush_workqueue(ipoib_workqueue); 453 } 454 ··· 599 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 600 ipoib_ib_dev_up(dev); 601 602 - down(&priv->vlan_mutex); 603 604 /* Flush any child interfaces too */ 605 list_for_each_entry(cpriv, &priv->child_intfs, list) 606 ipoib_ib_dev_flush(&cpriv->dev); 607 608 - up(&priv->vlan_mutex); 609 } 610 611 void ipoib_ib_dev_cleanup(struct net_device *dev) ··· 651 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 652 ipoib_open(dev); 653 else { 654 - down(&pkey_sem); 655 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) 656 queue_delayed_work(ipoib_workqueue, 657 &priv->pkey_task, 658 HZ); 659 - up(&pkey_sem); 660 } 661 } 662 ··· 670 671 /* P_Key value not assigned yet - start polling */ 672 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 673 - down(&pkey_sem); 674 clear_bit(IPOIB_PKEY_STOP, &priv->flags); 675 queue_delayed_work(ipoib_workqueue, 676 &priv->pkey_task, 677 HZ); 678 - up(&pkey_sem); 679 return 1; 680 } 681
··· 52 53 #define IPOIB_OP_RECV (1ul << 31) 54 55 + static DEFINE_MUTEX(pkey_mutex); 56 57 struct ipoib_ah *ipoib_create_ah(struct net_device *dev, 58 struct ib_pd *pd, struct ib_ah_attr *attr) ··· 445 446 /* Shutdown the P_Key thread if still active */ 447 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 448 + mutex_lock(&pkey_mutex); 449 set_bit(IPOIB_PKEY_STOP, &priv->flags); 450 cancel_delayed_work(&priv->pkey_task); 451 + mutex_unlock(&pkey_mutex); 452 flush_workqueue(ipoib_workqueue); 453 } 454 ··· 599 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 600 ipoib_ib_dev_up(dev); 601 602 + mutex_lock(&priv->vlan_mutex); 603 604 /* Flush any child interfaces too */ 605 list_for_each_entry(cpriv, &priv->child_intfs, list) 606 ipoib_ib_dev_flush(&cpriv->dev); 607 608 + mutex_unlock(&priv->vlan_mutex); 609 } 610 611 void ipoib_ib_dev_cleanup(struct net_device *dev) ··· 651 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 652 ipoib_open(dev); 653 else { 654 + mutex_lock(&pkey_mutex); 655 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) 656 queue_delayed_work(ipoib_workqueue, 657 &priv->pkey_task, 658 HZ); 659 + mutex_unlock(&pkey_mutex); 660 } 661 } 662 ··· 670 671 /* P_Key value not assigned yet - start polling */ 672 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 673 + mutex_lock(&pkey_mutex); 674 clear_bit(IPOIB_PKEY_STOP, &priv->flags); 675 queue_delayed_work(ipoib_workqueue, 676 &priv->pkey_task, 677 HZ); 678 + mutex_unlock(&pkey_mutex); 679 return 1; 680 } 681
+6 -6
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 105 struct ipoib_dev_priv *cpriv; 106 107 /* Bring up any child interfaces too */ 108 - down(&priv->vlan_mutex); 109 list_for_each_entry(cpriv, &priv->child_intfs, list) { 110 int flags; 111 ··· 115 116 dev_change_flags(cpriv->dev, flags | IFF_UP); 117 } 118 - up(&priv->vlan_mutex); 119 } 120 121 netif_start_queue(dev); ··· 140 struct ipoib_dev_priv *cpriv; 141 142 /* Bring down any child interfaces too */ 143 - down(&priv->vlan_mutex); 144 list_for_each_entry(cpriv, &priv->child_intfs, list) { 145 int flags; 146 ··· 150 151 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 152 } 153 - up(&priv->vlan_mutex); 154 } 155 156 return 0; ··· 892 spin_lock_init(&priv->lock); 893 spin_lock_init(&priv->tx_lock); 894 895 - init_MUTEX(&priv->mcast_mutex); 896 - init_MUTEX(&priv->vlan_mutex); 897 898 INIT_LIST_HEAD(&priv->path_list); 899 INIT_LIST_HEAD(&priv->child_intfs);
··· 105 struct ipoib_dev_priv *cpriv; 106 107 /* Bring up any child interfaces too */ 108 + mutex_lock(&priv->vlan_mutex); 109 list_for_each_entry(cpriv, &priv->child_intfs, list) { 110 int flags; 111 ··· 115 116 dev_change_flags(cpriv->dev, flags | IFF_UP); 117 } 118 + mutex_unlock(&priv->vlan_mutex); 119 } 120 121 netif_start_queue(dev); ··· 140 struct ipoib_dev_priv *cpriv; 141 142 /* Bring down any child interfaces too */ 143 + mutex_lock(&priv->vlan_mutex); 144 list_for_each_entry(cpriv, &priv->child_intfs, list) { 145 int flags; 146 ··· 150 151 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 152 } 153 + mutex_unlock(&priv->vlan_mutex); 154 } 155 156 return 0; ··· 892 spin_lock_init(&priv->lock); 893 spin_lock_init(&priv->tx_lock); 894 895 + mutex_init(&priv->mcast_mutex); 896 + mutex_init(&priv->vlan_mutex); 897 898 INIT_LIST_HEAD(&priv->path_list); 899 INIT_LIST_HEAD(&priv->child_intfs);
+13 -13
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 55 "Enable multicast debug tracing if > 0"); 56 #endif 57 58 - static DECLARE_MUTEX(mcast_mutex); 59 60 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ 61 struct ipoib_mcast { ··· 385 386 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) { 387 mcast->backoff = 1; 388 - down(&mcast_mutex); 389 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 390 queue_work(ipoib_workqueue, &priv->mcast_task); 391 - up(&mcast_mutex); 392 complete(&mcast->done); 393 return; 394 } ··· 418 419 mcast->query = NULL; 420 421 - down(&mcast_mutex); 422 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { 423 if (status == -ETIMEDOUT) 424 queue_work(ipoib_workqueue, &priv->mcast_task); ··· 427 mcast->backoff * HZ); 428 } else 429 complete(&mcast->done); 430 - up(&mcast_mutex); 431 432 return; 433 } ··· 482 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 483 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 484 485 - down(&mcast_mutex); 486 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 487 queue_delayed_work(ipoib_workqueue, 488 &priv->mcast_task, 489 mcast->backoff * HZ); 490 - up(&mcast_mutex); 491 } else 492 mcast->query_id = ret; 493 } ··· 520 priv->broadcast = ipoib_mcast_alloc(dev, 1); 521 if (!priv->broadcast) { 522 ipoib_warn(priv, "failed to allocate broadcast group\n"); 523 - down(&mcast_mutex); 524 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 525 queue_delayed_work(ipoib_workqueue, 526 &priv->mcast_task, HZ); 527 - up(&mcast_mutex); 528 return; 529 } 530 ··· 580 581 ipoib_dbg_mcast(priv, "starting multicast thread\n"); 582 583 - down(&mcast_mutex); 584 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 585 queue_work(ipoib_workqueue, &priv->mcast_task); 586 - up(&mcast_mutex); 587 588 return 0; 589 } ··· 595 596 ipoib_dbg_mcast(priv, "stopping multicast thread\n"); 597 598 - down(&mcast_mutex); 599 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 600 cancel_delayed_work(&priv->mcast_task); 601 - up(&mcast_mutex); 602 603 if (flush) 604 flush_workqueue(ipoib_workqueue);
··· 55 "Enable multicast debug tracing if > 0"); 56 #endif 57 58 + static DEFINE_MUTEX(mcast_mutex); 59 60 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ 61 struct ipoib_mcast { ··· 385 386 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) { 387 mcast->backoff = 1; 388 + mutex_lock(&mcast_mutex); 389 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 390 queue_work(ipoib_workqueue, &priv->mcast_task); 391 + mutex_unlock(&mcast_mutex); 392 complete(&mcast->done); 393 return; 394 } ··· 418 419 mcast->query = NULL; 420 421 + mutex_lock(&mcast_mutex); 422 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { 423 if (status == -ETIMEDOUT) 424 queue_work(ipoib_workqueue, &priv->mcast_task); ··· 427 mcast->backoff * HZ); 428 } else 429 complete(&mcast->done); 430 + mutex_unlock(&mcast_mutex); 431 432 return; 433 } ··· 482 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 483 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 484 485 + mutex_lock(&mcast_mutex); 486 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 487 queue_delayed_work(ipoib_workqueue, 488 &priv->mcast_task, 489 mcast->backoff * HZ); 490 + mutex_unlock(&mcast_mutex); 491 } else 492 mcast->query_id = ret; 493 } ··· 520 priv->broadcast = ipoib_mcast_alloc(dev, 1); 521 if (!priv->broadcast) { 522 ipoib_warn(priv, "failed to allocate broadcast group\n"); 523 + mutex_lock(&mcast_mutex); 524 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 525 queue_delayed_work(ipoib_workqueue, 526 &priv->mcast_task, HZ); 527 + mutex_unlock(&mcast_mutex); 528 return; 529 } 530 ··· 580 581 ipoib_dbg_mcast(priv, "starting multicast thread\n"); 582 583 + mutex_lock(&mcast_mutex); 584 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 585 queue_work(ipoib_workqueue, &priv->mcast_task); 586 + mutex_unlock(&mcast_mutex); 587 588 return 0; 589 } ··· 595 596 ipoib_dbg_mcast(priv, "stopping multicast thread\n"); 597 598 + mutex_lock(&mcast_mutex); 599 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 600 cancel_delayed_work(&priv->mcast_task); 601 + mutex_unlock(&mcast_mutex); 602 603 if (flush) 604 flush_workqueue(ipoib_workqueue);
+4 -4
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
··· 65 } 66 67 /* attach QP to multicast group */ 68 - down(&priv->mcast_mutex); 69 ret = ib_attach_mcast(priv->qp, mgid, mlid); 70 - up(&priv->mcast_mutex); 71 if (ret) 72 ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret); 73 ··· 81 struct ipoib_dev_priv *priv = netdev_priv(dev); 82 int ret; 83 84 - down(&priv->mcast_mutex); 85 ret = ib_detach_mcast(priv->qp, mgid, mlid); 86 - up(&priv->mcast_mutex); 87 if (ret) 88 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret); 89
··· 65 } 66 67 /* attach QP to multicast group */ 68 + mutex_lock(&priv->mcast_mutex); 69 ret = ib_attach_mcast(priv->qp, mgid, mlid); 70 + mutex_unlock(&priv->mcast_mutex); 71 if (ret) 72 ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret); 73 ··· 81 struct ipoib_dev_priv *priv = netdev_priv(dev); 82 int ret; 83 84 + mutex_lock(&priv->mcast_mutex); 85 ret = ib_detach_mcast(priv->qp, mgid, mlid); 86 + mutex_unlock(&priv->mcast_mutex); 87 if (ret) 88 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret); 89
+5 -5
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
··· 63 64 ppriv = netdev_priv(pdev); 65 66 - down(&ppriv->vlan_mutex); 67 68 /* 69 * First ensure this isn't a duplicate. We check the parent device and ··· 124 125 list_add_tail(&priv->list, &ppriv->child_intfs); 126 127 - up(&ppriv->vlan_mutex); 128 129 return 0; 130 ··· 139 free_netdev(priv->dev); 140 141 err: 142 - up(&ppriv->vlan_mutex); 143 return result; 144 } 145 ··· 153 154 ppriv = netdev_priv(pdev); 155 156 - down(&ppriv->vlan_mutex); 157 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 158 if (priv->pkey == pkey) { 159 unregister_netdev(priv->dev); ··· 167 break; 168 } 169 } 170 - up(&ppriv->vlan_mutex); 171 172 return ret; 173 }
··· 63 64 ppriv = netdev_priv(pdev); 65 66 + mutex_lock(&ppriv->vlan_mutex); 67 68 /* 69 * First ensure this isn't a duplicate. We check the parent device and ··· 124 125 list_add_tail(&priv->list, &ppriv->child_intfs); 126 127 + mutex_unlock(&ppriv->vlan_mutex); 128 129 return 0; 130 ··· 139 free_netdev(priv->dev); 140 141 err: 142 + mutex_unlock(&ppriv->vlan_mutex); 143 return result; 144 } 145 ··· 153 154 ppriv = netdev_priv(pdev); 155 156 + mutex_lock(&ppriv->vlan_mutex); 157 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 158 if (priv->pkey == pkey) { 159 unregister_netdev(priv->dev); ··· 167 break; 168 } 169 } 170 + mutex_unlock(&ppriv->vlan_mutex); 171 172 return ret; 173 }