Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

+287 -352
+5 -24
drivers/infiniband/core/cm.c
··· 3163 3163 } 3164 3164 EXPORT_SYMBOL(ib_cm_init_qp_attr); 3165 3165 3166 - static __be64 cm_get_ca_guid(struct ib_device *device) 3167 - { 3168 - struct ib_device_attr *device_attr; 3169 - __be64 guid; 3170 - int ret; 3171 - 3172 - device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 3173 - if (!device_attr) 3174 - return 0; 3175 - 3176 - ret = ib_query_device(device, device_attr); 3177 - guid = ret ? 0 : device_attr->node_guid; 3178 - kfree(device_attr); 3179 - return guid; 3180 - } 3181 - 3182 3166 static void cm_add_one(struct ib_device *device) 3183 3167 { 3184 3168 struct cm_device *cm_dev; ··· 3184 3200 return; 3185 3201 3186 3202 cm_dev->device = device; 3187 - cm_dev->ca_guid = cm_get_ca_guid(device); 3188 - if (!cm_dev->ca_guid) 3189 - goto error1; 3203 + cm_dev->ca_guid = device->node_guid; 3190 3204 3191 3205 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3192 3206 for (i = 1; i <= device->phys_port_cnt; i++) { ··· 3199 3217 cm_recv_handler, 3200 3218 port); 3201 3219 if (IS_ERR(port->mad_agent)) 3202 - goto error2; 3220 + goto error1; 3203 3221 3204 3222 ret = ib_modify_port(device, i, 0, &port_modify); 3205 3223 if (ret) 3206 - goto error3; 3224 + goto error2; 3207 3225 } 3208 3226 ib_set_client_data(device, &cm_client, cm_dev); 3209 3227 ··· 3212 3230 write_unlock_irqrestore(&cm.device_lock, flags); 3213 3231 return; 3214 3232 3215 - error3: 3216 - ib_unregister_mad_agent(port->mad_agent); 3217 3233 error2: 3234 + ib_unregister_mad_agent(port->mad_agent); 3235 + error1: 3218 3236 port_modify.set_port_cap_mask = 0; 3219 3237 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 3220 3238 while (--i) { ··· 3222 3240 ib_modify_port(device, port->port_num, 0, &port_modify); 3223 3241 ib_unregister_mad_agent(port->mad_agent); 3224 3242 } 3225 - error1: 3226 3243 kfree(cm_dev); 3227 3244 } 3228 3245
+11 -12
drivers/infiniband/core/device.c
··· 38 38 #include <linux/errno.h> 39 39 #include <linux/slab.h> 40 40 #include <linux/init.h> 41 - 42 - #include <asm/semaphore.h> 41 + #include <linux/mutex.h> 43 42 44 43 #include "core_priv.h" 45 44 ··· 56 57 static LIST_HEAD(client_list); 57 58 58 59 /* 59 - * device_sem protects access to both device_list and client_list. 60 + * device_mutex protects access to both device_list and client_list. 60 61 * There's no real point to using multiple locks or something fancier 61 62 * like an rwsem: we always access both lists, and we're always 62 63 * modifying one list or the other list. In any case this is not a 63 64 * hot path so there's no point in trying to optimize. 64 65 */ 65 - static DECLARE_MUTEX(device_sem); 66 + static DEFINE_MUTEX(device_mutex); 66 67 67 68 static int ib_device_check_mandatory(struct ib_device *device) 68 69 { ··· 220 221 { 221 222 int ret; 222 223 223 - down(&device_sem); 224 + mutex_lock(&device_mutex); 224 225 225 226 if (strchr(device->name, '%')) { 226 227 ret = alloc_name(device->name); ··· 258 259 } 259 260 260 261 out: 261 - up(&device_sem); 262 + mutex_unlock(&device_mutex); 262 263 return ret; 263 264 } 264 265 EXPORT_SYMBOL(ib_register_device); ··· 275 276 struct ib_client_data *context, *tmp; 276 277 unsigned long flags; 277 278 278 - down(&device_sem); 279 + mutex_lock(&device_mutex); 279 280 280 281 list_for_each_entry_reverse(client, &client_list, list) 281 282 if (client->remove) ··· 283 284 284 285 list_del(&device->core_list); 285 286 286 - up(&device_sem); 287 + mutex_unlock(&device_mutex); 287 288 288 289 spin_lock_irqsave(&device->client_data_lock, flags); 289 290 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) ··· 311 312 { 312 313 struct ib_device *device; 313 314 314 - down(&device_sem); 315 + mutex_lock(&device_mutex); 315 316 316 317 list_add_tail(&client->list, &client_list); 317 318 list_for_each_entry(device, &device_list, core_list) 318 319 if (client->add && !add_client_context(device, client)) 319 320 client->add(device); 320 321 321 - up(&device_sem); 322 + mutex_unlock(&device_mutex); 322 323 323 324 return 0; 324 325 } ··· 338 339 struct ib_device *device; 339 340 unsigned long flags; 340 341 341 - down(&device_sem); 342 + mutex_lock(&device_mutex); 342 343 343 344 list_for_each_entry(device, &device_list, core_list) { 344 345 if (client->remove) ··· 354 355 } 355 356 list_del(&client->list); 356 357 357 - up(&device_sem); 358 + mutex_unlock(&device_mutex); 358 359 } 359 360 EXPORT_SYMBOL(ib_unregister_client); 360 361
+5 -17
drivers/infiniband/core/sysfs.c
··· 445 445 return -ENOMEM; 446 446 447 447 /* 448 - * It might be nice to pass the node GUID with the event, but 449 - * right now the only way to get it is to query the device 450 - * provider, and this can crash during device removal because 451 - * we are will be running after driver removal has started. 452 - * We could add a node_guid field to struct ib_device, or we 453 - * could just let userspace read the node GUID from sysfs when 454 - * devices are added. 448 + * It would be nice to pass the node GUID with the event... 455 449 */ 456 450 457 451 envp[i] = NULL; ··· 617 623 static ssize_t show_node_guid(struct class_device *cdev, char *buf) 618 624 { 619 625 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); 620 - struct ib_device_attr attr; 621 - ssize_t ret; 622 626 623 627 if (!ibdev_is_alive(dev)) 624 628 return -ENODEV; 625 629 626 - ret = ib_query_device(dev, &attr); 627 - if (ret) 628 - return ret; 629 - 630 630 return sprintf(buf, "%04x:%04x:%04x:%04x\n", 631 - be16_to_cpu(((__be16 *) &attr.node_guid)[0]), 632 - be16_to_cpu(((__be16 *) &attr.node_guid)[1]), 633 - be16_to_cpu(((__be16 *) &attr.node_guid)[2]), 634 - be16_to_cpu(((__be16 *) &attr.node_guid)[3])); 631 + be16_to_cpu(((__be16 *) &dev->node_guid)[0]), 632 + be16_to_cpu(((__be16 *) &dev->node_guid)[1]), 633 + be16_to_cpu(((__be16 *) &dev->node_guid)[2]), 634 + be16_to_cpu(((__be16 *) &dev->node_guid)[3])); 635 635 } 636 636 637 637 static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+12 -11
drivers/infiniband/core/ucm.c
··· 42 42 #include <linux/mount.h> 43 43 #include <linux/cdev.h> 44 44 #include <linux/idr.h> 45 + #include <linux/mutex.h> 45 46 46 47 #include <asm/uaccess.h> 47 48 ··· 114 113 .remove = ib_ucm_remove_one 115 114 }; 116 115 117 - static DECLARE_MUTEX(ctx_id_mutex); 116 + static DEFINE_MUTEX(ctx_id_mutex); 118 117 static DEFINE_IDR(ctx_id_table); 119 118 static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES); 120 119 ··· 122 121 { 123 122 struct ib_ucm_context *ctx; 124 123 125 - down(&ctx_id_mutex); 124 + mutex_lock(&ctx_id_mutex); 126 125 ctx = idr_find(&ctx_id_table, id); 127 126 if (!ctx) 128 127 ctx = ERR_PTR(-ENOENT); ··· 130 129 ctx = ERR_PTR(-EINVAL); 131 130 else 132 131 atomic_inc(&ctx->ref); 133 - up(&ctx_id_mutex); 132 + mutex_unlock(&ctx_id_mutex); 134 133 135 134 return ctx; 136 135 } ··· 187 186 if (!result) 188 187 goto error; 189 188 190 - down(&ctx_id_mutex); 189 + mutex_lock(&ctx_id_mutex); 191 190 result = idr_get_new(&ctx_id_table, ctx, &ctx->id); 192 - up(&ctx_id_mutex); 191 + mutex_unlock(&ctx_id_mutex); 193 192 } while (result == -EAGAIN); 194 193 195 194 if (result) ··· 551 550 err2: 552 551 ib_destroy_cm_id(ctx->cm_id); 553 552 err1: 554 - down(&ctx_id_mutex); 553 + mutex_lock(&ctx_id_mutex); 555 554 idr_remove(&ctx_id_table, ctx->id); 556 - up(&ctx_id_mutex); 555 + mutex_unlock(&ctx_id_mutex); 557 556 kfree(ctx); 558 557 return result; 559 558 } ··· 573 572 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 574 573 return -EFAULT; 575 574 576 - down(&ctx_id_mutex); 575 + mutex_lock(&ctx_id_mutex); 577 576 ctx = idr_find(&ctx_id_table, cmd.id); 578 577 if (!ctx) 579 578 ctx = ERR_PTR(-ENOENT); ··· 581 580 ctx = ERR_PTR(-EINVAL); 582 581 else 583 582 idr_remove(&ctx_id_table, ctx->id); 584 - up(&ctx_id_mutex); 583 + mutex_unlock(&ctx_id_mutex); 585 584 586 585 if (IS_ERR(ctx)) 587 586 return PTR_ERR(ctx); ··· 1281 1280 struct ib_ucm_context, file_list); 1282 1281 up(&file->mutex); 1283 1282 1284 - down(&ctx_id_mutex); 1283 + mutex_lock(&ctx_id_mutex); 1285 1284 idr_remove(&ctx_id_table, ctx->id); 1286 - up(&ctx_id_mutex); 1285 + mutex_unlock(&ctx_id_mutex); 1287 1286 1288 1287 ib_destroy_cm_id(ctx->cm_id); 1289 1288 ib_ucm_cleanup_events(ctx);
+3 -2
drivers/infiniband/core/uverbs.h
··· 41 41 42 42 #include <linux/kref.h> 43 43 #include <linux/idr.h> 44 + #include <linux/mutex.h> 44 45 45 46 #include <rdma/ib_verbs.h> 46 47 #include <rdma/ib_user_verbs.h> ··· 89 88 90 89 struct ib_uverbs_file { 91 90 struct kref ref; 92 - struct semaphore mutex; 91 + struct mutex mutex; 93 92 struct ib_uverbs_device *device; 94 93 struct ib_ucontext *ucontext; 95 94 struct ib_event_handler event_handler; ··· 132 131 u32 async_events_reported; 133 132 }; 134 133 135 - extern struct semaphore ib_uverbs_idr_mutex; 134 + extern struct mutex ib_uverbs_idr_mutex; 136 135 extern struct idr ib_uverbs_pd_idr; 137 136 extern struct idr ib_uverbs_mr_idr; 138 137 extern struct idr ib_uverbs_mw_idr;
+76 -76
drivers/infiniband/core/uverbs_cmd.c
··· 67 67 if (copy_from_user(&cmd, buf, sizeof cmd)) 68 68 return -EFAULT; 69 69 70 - down(&file->mutex); 70 + mutex_lock(&file->mutex); 71 71 72 72 if (file->ucontext) { 73 73 ret = -EINVAL; ··· 119 119 120 120 fd_install(resp.async_fd, filp); 121 121 122 - up(&file->mutex); 122 + mutex_unlock(&file->mutex); 123 123 124 124 return in_len; 125 125 ··· 131 131 ibdev->dealloc_ucontext(ucontext); 132 132 133 133 err: 134 - up(&file->mutex); 134 + mutex_unlock(&file->mutex); 135 135 return ret; 136 136 } 137 137 ··· 157 157 memset(&resp, 0, sizeof resp); 158 158 159 159 resp.fw_ver = attr.fw_ver; 160 - resp.node_guid = attr.node_guid; 160 + resp.node_guid = file->device->ib_dev->node_guid; 161 161 resp.sys_image_guid = attr.sys_image_guid; 162 162 resp.max_mr_size = attr.max_mr_size; 163 163 resp.page_size_cap = attr.page_size_cap; ··· 290 290 pd->uobject = uobj; 291 291 atomic_set(&pd->usecnt, 0); 292 292 293 - down(&ib_uverbs_idr_mutex); 293 + mutex_lock(&ib_uverbs_idr_mutex); 294 294 295 295 retry: 296 296 if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) { ··· 314 314 goto err_idr; 315 315 } 316 316 317 - down(&file->mutex); 317 + mutex_lock(&file->mutex); 318 318 list_add_tail(&uobj->list, &file->ucontext->pd_list); 319 - up(&file->mutex); 319 + mutex_unlock(&file->mutex); 320 320 321 - up(&ib_uverbs_idr_mutex); 321 + mutex_unlock(&ib_uverbs_idr_mutex); 322 322 323 323 return in_len; 324 324 ··· 326 326 idr_remove(&ib_uverbs_pd_idr, uobj->id); 327 327 328 328 err_up: 329 - up(&ib_uverbs_idr_mutex); 329 + mutex_unlock(&ib_uverbs_idr_mutex); 330 330 ib_dealloc_pd(pd); 331 331 332 332 err: ··· 346 346 if (copy_from_user(&cmd, buf, sizeof cmd)) 347 347 return -EFAULT; 348 348 349 - down(&ib_uverbs_idr_mutex); 349 + mutex_lock(&ib_uverbs_idr_mutex); 350 350 351 351 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 352 352 if (!pd || pd->uobject->context != file->ucontext) ··· 360 360 361 361 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle); 362 362 363 - down(&file->mutex); 363 + mutex_lock(&file->mutex); 364 364 list_del(&uobj->list); 365 - up(&file->mutex); 365 + mutex_unlock(&file->mutex); 366 366 367 367 kfree(uobj); 368 368 369 369 out: 370 - up(&ib_uverbs_idr_mutex); 370 + mutex_unlock(&ib_uverbs_idr_mutex); 371 371 372 372 return ret ? ret : in_len; 373 373 } ··· 426 426 427 427 obj->umem.virt_base = cmd.hca_va; 428 428 429 - down(&ib_uverbs_idr_mutex); 429 + mutex_lock(&ib_uverbs_idr_mutex); 430 430 431 431 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 432 432 if (!pd || pd->uobject->context != file->ucontext) { ··· 476 476 goto err_idr; 477 477 } 478 478 479 - down(&file->mutex); 479 + mutex_lock(&file->mutex); 480 480 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); 481 - up(&file->mutex); 481 + mutex_unlock(&file->mutex); 482 482 483 - up(&ib_uverbs_idr_mutex); 483 + mutex_unlock(&ib_uverbs_idr_mutex); 484 484 485 485 return in_len; 486 486 ··· 492 492 atomic_dec(&pd->usecnt); 493 493 494 494 err_up: 495 - up(&ib_uverbs_idr_mutex); 495 + mutex_unlock(&ib_uverbs_idr_mutex); 496 496 497 497 ib_umem_release(file->device->ib_dev, &obj->umem); 498 498 ··· 513 513 if (copy_from_user(&cmd, buf, sizeof cmd)) 514 514 return -EFAULT; 515 515 516 - down(&ib_uverbs_idr_mutex); 516 + mutex_lock(&ib_uverbs_idr_mutex); 517 517 518 518 mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle); 519 519 if (!mr || mr->uobject->context != file->ucontext) ··· 527 527 528 528 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle); 529 529 530 - down(&file->mutex); 530 + mutex_lock(&file->mutex); 531 531 list_del(&memobj->uobject.list); 532 - up(&file->mutex); 532 + mutex_unlock(&file->mutex); 533 533 534 534 ib_umem_release(file->device->ib_dev, &memobj->umem); 535 535 kfree(memobj); 536 536 537 537 out: 538 - up(&ib_uverbs_idr_mutex); 538 + mutex_unlock(&ib_uverbs_idr_mutex); 539 539 540 540 return ret ? ret : in_len; 541 541 } ··· 628 628 cq->cq_context = ev_file; 629 629 atomic_set(&cq->usecnt, 0); 630 630 631 - down(&ib_uverbs_idr_mutex); 631 + mutex_lock(&ib_uverbs_idr_mutex); 632 632 633 633 retry: 634 634 if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) { ··· 653 653 goto err_idr; 654 654 } 655 655 656 - down(&file->mutex); 656 + mutex_lock(&file->mutex); 657 657 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); 658 - up(&file->mutex); 658 + mutex_unlock(&file->mutex); 659 659 660 - up(&ib_uverbs_idr_mutex); 660 + mutex_unlock(&ib_uverbs_idr_mutex); 661 661 662 662 return in_len; 663 663 ··· 665 665 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); 666 666 667 667 err_up: 668 - up(&ib_uverbs_idr_mutex); 668 + mutex_unlock(&ib_uverbs_idr_mutex); 669 669 ib_destroy_cq(cq); 670 670 671 671 err: ··· 701 701 goto out_wc; 702 702 } 703 703 704 - down(&ib_uverbs_idr_mutex); 704 + mutex_lock(&ib_uverbs_idr_mutex); 705 705 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 706 706 if (!cq || cq->uobject->context != file->ucontext) { 707 707 ret = -EINVAL; ··· 731 731 ret = -EFAULT; 732 732 733 733 out: 734 - up(&ib_uverbs_idr_mutex); 734 + mutex_unlock(&ib_uverbs_idr_mutex); 735 735 kfree(resp); 736 736 737 737 out_wc: ··· 750 750 if (copy_from_user(&cmd, buf, sizeof cmd)) 751 751 return -EFAULT; 752 752 753 - down(&ib_uverbs_idr_mutex); 753 + mutex_lock(&ib_uverbs_idr_mutex); 754 754 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 755 755 if (cq && cq->uobject->context == file->ucontext) { 756 756 ib_req_notify_cq(cq, cmd.solicited_only ? 757 757 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 758 758 ret = in_len; 759 759 } 760 - up(&ib_uverbs_idr_mutex); 760 + mutex_unlock(&ib_uverbs_idr_mutex); 761 761 762 762 return ret; 763 763 } ··· 779 779 780 780 memset(&resp, 0, sizeof resp); 781 781 782 - down(&ib_uverbs_idr_mutex); 782 + mutex_lock(&ib_uverbs_idr_mutex); 783 783 784 784 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle); 785 785 if (!cq || cq->uobject->context != file->ucontext) ··· 795 795 796 796 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 797 797 798 - down(&file->mutex); 798 + mutex_lock(&file->mutex); 799 799 list_del(&uobj->uobject.list); 800 - up(&file->mutex); 800 + mutex_unlock(&file->mutex); 801 801 802 802 ib_uverbs_release_ucq(file, ev_file, uobj); 803 803 ··· 811 811 ret = -EFAULT; 812 812 813 813 out: 814 - up(&ib_uverbs_idr_mutex); 814 + mutex_unlock(&ib_uverbs_idr_mutex); 815 815 816 816 return ret ? ret : in_len; 817 817 } ··· 845 845 if (!uobj) 846 846 return -ENOMEM; 847 847 848 - down(&ib_uverbs_idr_mutex); 848 + mutex_lock(&ib_uverbs_idr_mutex); 849 849 850 850 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 851 851 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle); ··· 930 930 goto err_idr; 931 931 } 932 932 933 - down(&file->mutex); 933 + mutex_lock(&file->mutex); 934 934 list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list); 935 - up(&file->mutex); 935 + mutex_unlock(&file->mutex); 936 936 937 - up(&ib_uverbs_idr_mutex); 937 + mutex_unlock(&ib_uverbs_idr_mutex); 938 938 939 939 return in_len; 940 940 ··· 950 950 atomic_dec(&attr.srq->usecnt); 951 951 952 952 err_up: 953 - up(&ib_uverbs_idr_mutex); 953 + mutex_unlock(&ib_uverbs_idr_mutex); 954 954 955 955 kfree(uobj); 956 956 return ret; ··· 972 972 if (!attr) 973 973 return -ENOMEM; 974 974 975 - down(&ib_uverbs_idr_mutex); 975 + mutex_lock(&ib_uverbs_idr_mutex); 976 976 977 977 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 978 978 if (!qp || qp->uobject->context != file->ucontext) { ··· 1033 1033 ret = in_len; 1034 1034 1035 1035 out: 1036 - up(&ib_uverbs_idr_mutex); 1036 + mutex_unlock(&ib_uverbs_idr_mutex); 1037 1037 kfree(attr); 1038 1038 1039 1039 return ret; ··· 1054 1054 1055 1055 memset(&resp, 0, sizeof resp); 1056 1056 1057 - down(&ib_uverbs_idr_mutex); 1057 + mutex_lock(&ib_uverbs_idr_mutex); 1058 1058 1059 1059 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1060 1060 if (!qp || qp->uobject->context != file->ucontext) ··· 1073 1073 1074 1074 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 1075 1075 1076 - down(&file->mutex); 1076 + mutex_lock(&file->mutex); 1077 1077 list_del(&uobj->uevent.uobject.list); 1078 - up(&file->mutex); 1078 + mutex_unlock(&file->mutex); 1079 1079 1080 1080 ib_uverbs_release_uevent(file, &uobj->uevent); 1081 1081 ··· 1088 1088 ret = -EFAULT; 1089 1089 1090 1090 out: 1091 - up(&ib_uverbs_idr_mutex); 1091 + mutex_unlock(&ib_uverbs_idr_mutex); 1092 1092 1093 1093 return ret ? ret : in_len; 1094 1094 } ··· 1119 1119 if (!user_wr) 1120 1120 return -ENOMEM; 1121 1121 1122 - down(&ib_uverbs_idr_mutex); 1122 + mutex_lock(&ib_uverbs_idr_mutex); 1123 1123 1124 1124 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1125 1125 if (!qp || qp->uobject->context != file->ucontext) ··· 1224 1224 ret = -EFAULT; 1225 1225 1226 1226 out: 1227 - up(&ib_uverbs_idr_mutex); 1227 + mutex_unlock(&ib_uverbs_idr_mutex); 1228 1228 1229 1229 while (wr) { 1230 1230 next = wr->next; ··· 1341 1341 if (IS_ERR(wr)) 1342 1342 return PTR_ERR(wr); 1343 1343 1344 - down(&ib_uverbs_idr_mutex); 1344 + mutex_lock(&ib_uverbs_idr_mutex); 1345 1345 1346 1346 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1347 1347 if (!qp || qp->uobject->context != file->ucontext) ··· 1362 1362 ret = -EFAULT; 1363 1363 1364 1364 out: 1365 - up(&ib_uverbs_idr_mutex); 1365 + mutex_unlock(&ib_uverbs_idr_mutex); 1366 1366 1367 1367 while (wr) { 1368 1368 next = wr->next; ··· 1392 1392 if (IS_ERR(wr)) 1393 1393 return PTR_ERR(wr); 1394 1394 1395 - down(&ib_uverbs_idr_mutex); 1395 + mutex_lock(&ib_uverbs_idr_mutex); 1396 1396 1397 1397 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1398 1398 if (!srq || srq->uobject->context != file->ucontext) ··· 1413 1413 ret = -EFAULT; 1414 1414 1415 1415 out: 1416 - up(&ib_uverbs_idr_mutex); 1416 + mutex_unlock(&ib_uverbs_idr_mutex); 1417 1417 1418 1418 while (wr) { 1419 1419 next = wr->next; ··· 1446 1446 if (!uobj) 1447 1447 return -ENOMEM; 1448 1448 1449 - down(&ib_uverbs_idr_mutex); 1449 + mutex_lock(&ib_uverbs_idr_mutex); 1450 1450 1451 1451 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1452 1452 if (!pd || pd->uobject->context != file->ucontext) { ··· 1498 1498 goto err_idr; 1499 1499 } 1500 1500 1501 - down(&file->mutex); 1501 + mutex_lock(&file->mutex); 1502 1502 list_add_tail(&uobj->list, &file->ucontext->ah_list); 1503 - up(&file->mutex); 1503 + mutex_unlock(&file->mutex); 1504 1504 1505 - up(&ib_uverbs_idr_mutex); 1505 + mutex_unlock(&ib_uverbs_idr_mutex); 1506 1506 1507 1507 return in_len; 1508 1508 ··· 1513 1513 ib_destroy_ah(ah); 1514 1514 1515 1515 err_up: 1516 - up(&ib_uverbs_idr_mutex); 1516 + mutex_unlock(&ib_uverbs_idr_mutex); 1517 1517 1518 1518 kfree(uobj); 1519 1519 return ret; ··· 1530 1530 if (copy_from_user(&cmd, buf, sizeof cmd)) 1531 1531 return -EFAULT; 1532 1532 1533 - down(&ib_uverbs_idr_mutex); 1533 + mutex_lock(&ib_uverbs_idr_mutex); 1534 1534 1535 1535 ah = idr_find(&ib_uverbs_ah_idr, cmd.ah_handle); 1536 1536 if (!ah || ah->uobject->context != file->ucontext) ··· 1544 1544 1545 1545 idr_remove(&ib_uverbs_ah_idr, cmd.ah_handle); 1546 1546 1547 - down(&file->mutex); 1547 + mutex_lock(&file->mutex); 1548 1548 list_del(&uobj->list); 1549 - up(&file->mutex); 1549 + mutex_unlock(&file->mutex); 1550 1550 1551 1551 kfree(uobj); 1552 1552 1553 1553 out: 1554 - up(&ib_uverbs_idr_mutex); 1554 + mutex_unlock(&ib_uverbs_idr_mutex); 1555 1555 1556 1556 return ret ? ret : in_len; 1557 1557 } ··· 1569 1569 if (copy_from_user(&cmd, buf, sizeof cmd)) 1570 1570 return -EFAULT; 1571 1571 1572 - down(&ib_uverbs_idr_mutex); 1572 + mutex_lock(&ib_uverbs_idr_mutex); 1573 1573 1574 1574 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1575 1575 if (!qp || qp->uobject->context != file->ucontext) ··· 1602 1602 kfree(mcast); 1603 1603 1604 1604 out: 1605 - up(&ib_uverbs_idr_mutex); 1605 + mutex_unlock(&ib_uverbs_idr_mutex); 1606 1606 1607 1607 return ret ? ret : in_len; 1608 1608 } ··· 1620 1620 if (copy_from_user(&cmd, buf, sizeof cmd)) 1621 1621 return -EFAULT; 1622 1622 1623 - down(&ib_uverbs_idr_mutex); 1623 + mutex_lock(&ib_uverbs_idr_mutex); 1624 1624 1625 1625 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1626 1626 if (!qp || qp->uobject->context != file->ucontext) ··· 1641 1641 } 1642 1642 1643 1643 out: 1644 - up(&ib_uverbs_idr_mutex); 1644 + mutex_unlock(&ib_uverbs_idr_mutex); 1645 1645 1646 1646 return ret ? ret : in_len; 1647 1647 } ··· 1673 1673 if (!uobj) 1674 1674 return -ENOMEM; 1675 1675 1676 - down(&ib_uverbs_idr_mutex); 1676 + mutex_lock(&ib_uverbs_idr_mutex); 1677 1677 1678 1678 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1679 1679 ··· 1730 1730 goto err_idr; 1731 1731 } 1732 1732 1733 - down(&file->mutex); 1733 + mutex_lock(&file->mutex); 1734 1734 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); 1735 - up(&file->mutex); 1735 + mutex_unlock(&file->mutex); 1736 1736 1737 - up(&ib_uverbs_idr_mutex); 1737 + mutex_unlock(&ib_uverbs_idr_mutex); 1738 1738 1739 1739 return in_len; 1740 1740 ··· 1746 1746 atomic_dec(&pd->usecnt); 1747 1747 1748 1748 err_up: 1749 - up(&ib_uverbs_idr_mutex); 1749 + mutex_unlock(&ib_uverbs_idr_mutex); 1750 1750 1751 1751 kfree(uobj); 1752 1752 return ret; ··· 1764 1764 if (copy_from_user(&cmd, buf, sizeof cmd)) 1765 1765 return -EFAULT; 1766 1766 1767 - down(&ib_uverbs_idr_mutex); 1767 + mutex_lock(&ib_uverbs_idr_mutex); 1768 1768 1769 1769 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1770 1770 if (!srq || srq->uobject->context != file->ucontext) { ··· 1778 1778 ret = ib_modify_srq(srq, &attr, cmd.attr_mask); 1779 1779 1780 1780 out: 1781 - up(&ib_uverbs_idr_mutex); 1781 + mutex_unlock(&ib_uverbs_idr_mutex); 1782 1782 1783 1783 return ret ? ret : in_len; 1784 1784 } ··· 1796 1796 if (copy_from_user(&cmd, buf, sizeof cmd)) 1797 1797 return -EFAULT; 1798 1798 1799 - down(&ib_uverbs_idr_mutex); 1799 + mutex_lock(&ib_uverbs_idr_mutex); 1800 1800 1801 1801 memset(&resp, 0, sizeof resp); 1802 1802 ··· 1812 1812 1813 1813 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1814 1814 1815 - down(&file->mutex); 1815 + mutex_lock(&file->mutex); 1816 1816 list_del(&uobj->uobject.list); 1817 - up(&file->mutex); 1817 + mutex_unlock(&file->mutex); 1818 1818 1819 1819 ib_uverbs_release_uevent(file, uobj); 1820 1820 ··· 1827 1827 ret = -EFAULT; 1828 1828 1829 1829 out: 1830 - up(&ib_uverbs_idr_mutex); 1830 + mutex_unlock(&ib_uverbs_idr_mutex); 1831 1831 1832 1832 return ret ? ret : in_len; 1833 1833 }
+4 -4
drivers/infiniband/core/uverbs_main.c
··· 66 66 67 67 static struct class *uverbs_class; 68 68 69 - DECLARE_MUTEX(ib_uverbs_idr_mutex); 69 + DEFINE_MUTEX(ib_uverbs_idr_mutex); 70 70 DEFINE_IDR(ib_uverbs_pd_idr); 71 71 DEFINE_IDR(ib_uverbs_mr_idr); 72 72 DEFINE_IDR(ib_uverbs_mw_idr); ··· 180 180 if (!context) 181 181 return 0; 182 182 183 - down(&ib_uverbs_idr_mutex); 183 + mutex_lock(&ib_uverbs_idr_mutex); 184 184 185 185 list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) { 186 186 struct ib_ah *ah = idr_find(&ib_uverbs_ah_idr, uobj->id); ··· 250 250 kfree(uobj); 251 251 } 252 252 253 - up(&ib_uverbs_idr_mutex); 253 + mutex_unlock(&ib_uverbs_idr_mutex); 254 254 255 255 return context->device->dealloc_ucontext(context); 256 256 } ··· 653 653 file->ucontext = NULL; 654 654 file->async_file = NULL; 655 655 kref_init(&file->ref); 656 - init_MUTEX(&file->mutex); 656 + mutex_init(&file->mutex); 657 657 658 658 filp->private_data = file; 659 659
+6 -4
drivers/infiniband/hw/mthca/mthca_av.c
··· 163 163 return 0; 164 164 } 165 165 166 + int mthca_ah_grh_present(struct mthca_ah *ah) 167 + { 168 + return !!(ah->av->g_slid & 0x80); 169 + } 170 + 166 171 int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, 167 172 struct ib_ud_header *header) 168 173 { ··· 177 172 header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; 178 173 header->lrh.destination_lid = ah->av->dlid; 179 174 header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f); 180 - if (ah->av->g_slid & 0x80) { 181 - header->grh_present = 1; 175 + if (mthca_ah_grh_present(ah)) { 182 176 header->grh.traffic_class = 183 177 (be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff; 184 178 header->grh.flow_label = ··· 188 184 &header->grh.source_gid); 189 185 memcpy(header->grh.destination_gid.raw, 190 186 ah->av->dgid, 16); 191 - } else { 192 - header->grh_present = 0; 193 187 } 194 188 195 189 return 0;
+4 -3
drivers/infiniband/hw/mthca/mthca_cmd.c
··· 606 606 err = -EINVAL; 607 607 goto out; 608 608 } 609 - for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i) { 609 + for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) { 610 610 if (virt != -1) { 611 611 pages[nent * 2] = cpu_to_be64(virt); 612 612 virt += 1 << lg; ··· 727 727 * system pages needed. 728 728 */ 729 729 dev->fw.arbel.fw_pages = 730 - (dev->fw.arbel.fw_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> 731 - (PAGE_SHIFT - 12); 730 + ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >> 731 + (PAGE_SHIFT - 12); 732 732 733 733 mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n", 734 734 (unsigned long long) dev->fw.arbel.clr_int_base, ··· 1445 1445 * pages needed. 1446 1446 */ 1447 1447 *aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12); 1448 + *aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12); 1448 1449 1449 1450 return 0; 1450 1451 }
+1
drivers/infiniband/hw/mthca/mthca_dev.h
··· 520 520 int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah); 521 521 int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, 522 522 struct ib_ud_header *header); 523 + int mthca_ah_grh_present(struct mthca_ah *ah); 523 524 524 525 int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 525 526 int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+15 -13
drivers/infiniband/hw/mthca/mthca_eq.c
··· 45 45 enum { 46 46 MTHCA_NUM_ASYNC_EQE = 0x80, 47 47 MTHCA_NUM_CMD_EQE = 0x80, 48 + MTHCA_NUM_SPARE_EQE = 0x80, 48 49 MTHCA_EQ_ENTRY_SIZE = 0x20 49 50 }; 50 51 ··· 278 277 { 279 278 struct mthca_eqe *eqe; 280 279 int disarm_cqn; 281 - int eqes_found = 0; 280 + int eqes_found = 0; 281 + int set_ci = 0; 282 282 283 283 while ((eqe = next_eqe_sw(eq))) { 284 - int set_ci = 0; 285 - 286 284 /* 287 285 * Make sure we read EQ entry contents after we've 288 286 * checked the ownership bit. ··· 345 345 be16_to_cpu(eqe->event.cmd.token), 346 346 eqe->event.cmd.status, 347 347 be64_to_cpu(eqe->event.cmd.out_param)); 348 - /* 349 - * cmd_event() may add more commands. 350 - * The card will think the queue has overflowed if 351 - * we don't tell it we've been processing events. 352 - */ 353 - set_ci = 1; 354 348 break; 355 349 356 350 case MTHCA_EVENT_TYPE_PORT_CHANGE: ··· 379 385 set_eqe_hw(eqe); 380 386 ++eq->cons_index; 381 387 eqes_found = 1; 388 + ++set_ci; 382 389 383 - if (unlikely(set_ci)) { 390 + /* 391 + * The HCA will think the queue has overflowed if we 392 + * don't tell it we've been processing events. We 393 + * create our EQs with MTHCA_NUM_SPARE_EQE extra 394 + * entries, so we must update our consumer index at 395 + * least that often. 396 + */ 397 + if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) { 384 398 /* 385 399 * Conditional on hca_type is OK here because 386 400 * this is a rare case, not the fast path. ··· 864 862 intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ? 865 863 128 : dev->eq_table.inta_pin; 866 864 867 - err = mthca_create_eq(dev, dev->limits.num_cqs, 865 + err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE, 868 866 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, 869 867 &dev->eq_table.eq[MTHCA_EQ_COMP]); 870 868 if (err) 871 869 goto err_out_unmap; 872 870 873 - err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE, 871 + err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE, 874 872 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr, 875 873 &dev->eq_table.eq[MTHCA_EQ_ASYNC]); 876 874 if (err) 877 875 goto err_out_comp; 878 876 879 - err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE, 877 + err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE, 880 878 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr, 881 879 &dev->eq_table.eq[MTHCA_EQ_CMD]); 882 880 if (err)
+78 -54
drivers/infiniband/hw/mthca/mthca_provider.c
··· 33 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 34 * SOFTWARE. 35 35 * 36 - * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $ 36 + * $Id: mthca_provider.c 4859 2006-01-09 21:55:10Z roland $ 37 37 */ 38 38 39 39 #include <rdma/ib_smi.h> ··· 45 45 #include "mthca_user.h" 46 46 #include "mthca_memfree.h" 47 47 48 + static void init_query_mad(struct ib_smp *mad) 49 + { 50 + mad->base_version = 1; 51 + mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 52 + mad->class_version = 1; 53 + mad->method = IB_MGMT_METHOD_GET; 54 + } 55 + 48 56 static int mthca_query_device(struct ib_device *ibdev, 49 57 struct ib_device_attr *props) 50 58 { ··· 63 55 64 56 u8 status; 65 57 66 - in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); 58 + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 67 59 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 68 60 if (!in_mad || !out_mad) 69 61 goto out; ··· 72 64 73 65 props->fw_ver = mdev->fw_ver; 74 66 75 - memset(in_mad, 0, sizeof *in_mad); 76 - in_mad->base_version = 1; 77 - in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 78 - in_mad->class_version = 1; 79 - in_mad->method = IB_MGMT_METHOD_GET; 80 - in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 67 + init_query_mad(in_mad); 68 + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 81 69 82 70 err = mthca_MAD_IFC(mdev, 1, 1, 83 71 1, NULL, NULL, in_mad, out_mad, ··· 91 87 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); 92 88 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); 93 89 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 94 - memcpy(&props->node_guid, out_mad->data + 12, 8); 95 90 96 91 props->max_mr_size = ~0ull; 97 92 props->page_size_cap = mdev->limits.page_size_cap; ··· 131 128 int err = -ENOMEM; 132 129 u8 status; 133 130 134 - in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); 131 + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 135 132 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 136 133 if (!in_mad || !out_mad) 137 134 goto out; 138 135 139 136 memset(props, 0, sizeof *props); 140 137 141 - memset(in_mad, 0, sizeof *in_mad); 142 - in_mad->base_version = 1; 143 - in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 144 - in_mad->class_version = 1; 145 - in_mad->method = IB_MGMT_METHOD_GET; 146 - in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 147 - in_mad->attr_mod = cpu_to_be32(port); 138 + init_query_mad(in_mad); 139 + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 140 + in_mad->attr_mod = cpu_to_be32(port); 148 141 149 142 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 150 143 port, NULL, NULL, in_mad, out_mad, ··· 219 220 int err = -ENOMEM; 220 221 u8 status; 221 222 222 - in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); 223 + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 223 224 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 224 225 if (!in_mad || !out_mad) 225 226 goto out; 226 227 227 - memset(in_mad, 0, sizeof *in_mad); 228 - in_mad->base_version = 1; 229 - in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 230 - in_mad->class_version = 1; 231 - in_mad->method = IB_MGMT_METHOD_GET; 232 - in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 233 - in_mad->attr_mod = cpu_to_be32(index / 32); 228 + init_query_mad(in_mad); 229 + in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 230 + in_mad->attr_mod = cpu_to_be32(index / 32); 234 231 235 232 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 236 233 port, NULL, NULL, in_mad, out_mad, ··· 254 259 int err = -ENOMEM; 255 260 u8 status; 256 261 257 - in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); 262 + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 258 263 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 259 264 if (!in_mad || !out_mad) 260 265 goto out; 261 266 262 - memset(in_mad, 0, sizeof *in_mad); 263 - in_mad->base_version = 1; 264 - in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 265 - in_mad->class_version = 1; 266 - in_mad->method = IB_MGMT_METHOD_GET; 267 - in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 268 - in_mad->attr_mod = cpu_to_be32(port); 267 + init_query_mad(in_mad); 268 + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 269 + in_mad->attr_mod = cpu_to_be32(port); 269 270 270 271 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 271 272 port, NULL, NULL, in_mad, out_mad, ··· 275 284 276 285 memcpy(gid->raw, out_mad->data + 8, 8); 277 286 278 - memset(in_mad, 0, sizeof *in_mad); 279 - in_mad->base_version = 1; 280 - in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 281 - in_mad->class_version = 1; 282 - in_mad->method = IB_MGMT_METHOD_GET; 283 - in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 284 - in_mad->attr_mod = cpu_to_be32(index / 8); 287 + init_query_mad(in_mad); 288 + in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 289 + in_mad->attr_mod = cpu_to_be32(index / 8); 285 290 286 291 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 287 292 port, NULL, NULL, in_mad, out_mad, ··· 445 458 if (pd->uobject) { 446 459 context = to_mucontext(pd->uobject->context); 447 460 448 - if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 449 - return ERR_PTR(-EFAULT); 461 + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 462 + err = -EFAULT; 463 + goto err_free; 464 + } 450 465 451 466 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, 452 467 context->db_tab, ucmd.db_index, ··· 524 535 if (pd->uobject) { 525 536 context = to_mucontext(pd->uobject->context); 526 537 527 - if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 538 + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 539 + kfree(qp); 528 540 return ERR_PTR(-EFAULT); 541 + } 529 542 530 543 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, 531 544 context->db_tab, ··· 774 783 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) 775 784 return ERR_PTR(-EINVAL); 776 785 777 - if (num_phys_buf > 1 && 778 - ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) 779 - return ERR_PTR(-EINVAL); 780 - 781 786 mask = 0; 782 787 total_size = 0; 783 788 for (i = 0; i < num_phys_buf; ++i) { 784 - if (i != 0 && buffer_list[i].addr & ~PAGE_MASK) 785 - return ERR_PTR(-EINVAL); 786 - if (i != 0 && i != num_phys_buf - 1 && 787 - (buffer_list[i].size & ~PAGE_MASK)) 788 - return ERR_PTR(-EINVAL); 789 + if (i != 0) 790 + mask |= buffer_list[i].addr; 791 + if (i != num_phys_buf - 1) 792 + mask |= buffer_list[i].addr + buffer_list[i].size; 789 793 790 794 total_size += buffer_list[i].size; 791 - if (i > 0) 792 - mask |= buffer_list[i].addr; 793 795 } 796 + 797 + if (mask & ~PAGE_MASK) 798 + return ERR_PTR(-EINVAL); 794 799 795 800 /* Find largest page shift we can use to cover buffers */ 796 801 for (shift = PAGE_SHIFT; shift < 31; ++shift) ··· 1057 1070 &class_device_attr_board_id 1058 1071 }; 1059 1072 1073 + static int mthca_init_node_data(struct mthca_dev *dev) 1074 + { 1075 + struct ib_smp *in_mad = NULL; 1076 + struct ib_smp *out_mad = NULL; 1077 + int err = -ENOMEM; 1078 + u8 status; 1079 + 1080 + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 1081 + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 1082 + if (!in_mad || !out_mad) 1083 + goto out; 1084 + 1085 + init_query_mad(in_mad); 1086 + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 1087 + 1088 + err = mthca_MAD_IFC(dev, 1, 1, 1089 + 1, NULL, NULL, in_mad, out_mad, 1090 + &status); 1091 + if (err) 1092 + goto out; 1093 + if (status) { 1094 + err = -EINVAL; 1095 + goto out; 1096 + } 1097 + 1098 + memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 1099 + 1100 + out: 1101 + kfree(in_mad); 1102 + kfree(out_mad); 1103 + return err; 1104 + } 1105 + 1060 1106 int mthca_register_device(struct mthca_dev *dev) 1061 1107 { 1062 1108 int ret; 1063 1109 int i; 1110 + 1111 + ret = mthca_init_node_data(dev); 1112 + if (ret) 1113 + return ret; 1064 1114 1065 1115 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX); 1066 1116 dev->ib_dev.owner = THIS_MODULE;
+1 -1
drivers/infiniband/hw/mthca/mthca_qp.c
··· 1434 1434 u16 pkey; 1435 1435 1436 1436 ib_ud_header_init(256, /* assume a MAD */ 1437 - sqp->ud_header.grh_present, 1437 + mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 1438 1438 &sqp->ud_header); 1439 1439 1440 1440 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
+3 -3
drivers/infiniband/ulp/ipoib/ipoib.h
··· 45 45 #include <linux/config.h> 46 46 #include <linux/kref.h> 47 47 #include <linux/if_infiniband.h> 48 + #include <linux/mutex.h> 48 49 49 50 #include <net/neighbour.h> 50 51 51 52 #include <asm/atomic.h> 52 - #include <asm/semaphore.h> 53 53 54 54 #include <rdma/ib_verbs.h> 55 55 #include <rdma/ib_pack.h> ··· 123 123 124 124 unsigned long flags; 125 125 126 - struct semaphore mcast_mutex; 127 - struct semaphore vlan_mutex; 126 + struct mutex mcast_mutex; 127 + struct mutex vlan_mutex; 128 128 129 129 struct rb_root path_tree; 130 130 struct list_head path_list;
+10 -21
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 52 52 53 53 #define IPOIB_OP_RECV (1ul << 31) 54 54 55 - static DECLARE_MUTEX(pkey_sem); 55 + static DEFINE_MUTEX(pkey_mutex); 56 56 57 57 struct ipoib_ah *ipoib_create_ah(struct net_device *dev, 58 58 struct ib_pd *pd, struct ib_ah_attr *attr) ··· 445 445 446 446 /* Shutdown the P_Key thread if still active */ 447 447 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 448 - down(&pkey_sem); 448 + mutex_lock(&pkey_mutex); 449 449 set_bit(IPOIB_PKEY_STOP, &priv->flags); 450 450 cancel_delayed_work(&priv->pkey_task); 451 - up(&pkey_sem); 451 + mutex_unlock(&pkey_mutex); 452 452 flush_workqueue(ipoib_workqueue); 453 453 } 454 454 455 455 ipoib_mcast_stop_thread(dev, 1); 456 - 457 - /* 458 - * Flush the multicast groups first so we stop any multicast joins. The 459 - * completion thread may have already died and we may deadlock waiting 460 - * for the completion thread to finish some multicast joins. 461 - */ 462 456 ipoib_mcast_dev_flush(dev); 463 - 464 - /* Delete broadcast and local addresses since they will be recreated */ 465 - ipoib_mcast_dev_down(dev); 466 457 467 458 ipoib_flush_paths(dev); 468 459 ··· 599 608 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 600 609 ipoib_ib_dev_up(dev); 601 610 602 - down(&priv->vlan_mutex); 611 + mutex_lock(&priv->vlan_mutex); 603 612 604 613 /* Flush any child interfaces too */ 605 614 list_for_each_entry(cpriv, &priv->child_intfs, list) 606 615 ipoib_ib_dev_flush(&cpriv->dev); 607 616 608 - up(&priv->vlan_mutex); 617 + mutex_unlock(&priv->vlan_mutex); 609 618 } 610 619 611 620 void ipoib_ib_dev_cleanup(struct net_device *dev) ··· 615 624 ipoib_dbg(priv, "cleaning up ib_dev\n"); 616 625 617 626 ipoib_mcast_stop_thread(dev, 1); 618 - 619 - /* Delete the broadcast address and the local address */ 620 - ipoib_mcast_dev_down(dev); 627 + ipoib_mcast_dev_flush(dev); 621 628 622 629 ipoib_transport_dev_cleanup(dev); 623 630 } ··· 651 662 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 652 663 ipoib_open(dev); 653 664 else { 654 - down(&pkey_sem); 665 + mutex_lock(&pkey_mutex); 655 666 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) 656 667 queue_delayed_work(ipoib_workqueue, 657 668 &priv->pkey_task, 658 669 HZ); 659 - up(&pkey_sem); 670 + mutex_unlock(&pkey_mutex); 660 671 } 661 672 } 662 673 ··· 670 681 671 682 /* P_Key value not assigned yet - start polling */ 672 683 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 673 - down(&pkey_sem); 684 + mutex_lock(&pkey_mutex); 674 685 clear_bit(IPOIB_PKEY_STOP, &priv->flags); 675 686 queue_delayed_work(ipoib_workqueue, 676 687 &priv->pkey_task, 677 688 HZ); 678 - up(&pkey_sem); 689 + mutex_unlock(&pkey_mutex); 679 690 return 1; 680 691 } 681 692
+6 -6
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 105 105 struct ipoib_dev_priv *cpriv; 106 106 107 107 /* Bring up any child interfaces too */ 108 - down(&priv->vlan_mutex); 108 + mutex_lock(&priv->vlan_mutex); 109 109 list_for_each_entry(cpriv, &priv->child_intfs, list) { 110 110 int flags; 111 111 ··· 115 115 116 116 dev_change_flags(cpriv->dev, flags | IFF_UP); 117 117 } 118 - up(&priv->vlan_mutex); 118 + mutex_unlock(&priv->vlan_mutex); 119 119 } 120 120 121 121 netif_start_queue(dev); ··· 140 140 struct ipoib_dev_priv *cpriv; 141 141 142 142 /* Bring down any child interfaces too */ 143 - down(&priv->vlan_mutex); 143 + mutex_lock(&priv->vlan_mutex); 144 144 list_for_each_entry(cpriv, &priv->child_intfs, list) { 145 145 int flags; 146 146 ··· 150 150 151 151 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 152 152 } 153 - up(&priv->vlan_mutex); 153 + mutex_unlock(&priv->vlan_mutex); 154 154 } 155 155 156 156 return 0; ··· 892 892 spin_lock_init(&priv->lock); 893 893 spin_lock_init(&priv->tx_lock); 894 894 895 - init_MUTEX(&priv->mcast_mutex); 896 - init_MUTEX(&priv->vlan_mutex); 895 + mutex_init(&priv->mcast_mutex); 896 + mutex_init(&priv->vlan_mutex); 897 897 898 898 INIT_LIST_HEAD(&priv->path_list); 899 899 INIT_LIST_HEAD(&priv->child_intfs);
+33 -72
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 55 55 "Enable multicast debug tracing if > 0"); 56 56 #endif 57 57 58 - static DECLARE_MUTEX(mcast_mutex); 58 + static DEFINE_MUTEX(mcast_mutex); 59 59 60 60 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ 61 61 struct ipoib_mcast { ··· 97 97 struct ipoib_dev_priv *priv = netdev_priv(dev); 98 98 struct ipoib_neigh *neigh, *tmp; 99 99 unsigned long flags; 100 - LIST_HEAD(ah_list); 101 - struct ipoib_ah *ah, *tah; 102 100 103 101 ipoib_dbg_mcast(netdev_priv(dev), 104 102 "deleting multicast group " IPOIB_GID_FMT "\n", ··· 105 107 spin_lock_irqsave(&priv->lock, flags); 106 108 107 109 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { 110 + /* 111 + * It's safe to call ipoib_put_ah() inside priv->lock 112 + * here, because we know that mcast->ah will always 113 + * hold one more reference, so ipoib_put_ah() will 114 + * never do more than decrement the ref count. 115 + */ 108 116 if (neigh->ah) 109 - list_add_tail(&neigh->ah->list, &ah_list); 117 + ipoib_put_ah(neigh->ah); 110 118 *to_ipoib_neigh(neigh->neighbour) = NULL; 111 119 neigh->neighbour->ops->destructor = NULL; 112 120 kfree(neigh); 113 121 } 114 122 115 123 spin_unlock_irqrestore(&priv->lock, flags); 116 - 117 - list_for_each_entry_safe(ah, tah, &ah_list, list) 118 - ipoib_put_ah(ah); 119 124 120 125 if (mcast->ah) 121 126 ipoib_put_ah(mcast->ah); ··· 385 384 386 385 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) { 387 386 mcast->backoff = 1; 388 - down(&mcast_mutex); 387 + mutex_lock(&mcast_mutex); 389 388 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 390 389 queue_work(ipoib_workqueue, &priv->mcast_task); 391 - up(&mcast_mutex); 390 + mutex_unlock(&mcast_mutex); 392 391 complete(&mcast->done); 393 392 return; 394 393 } ··· 418 417 419 418 mcast->query = NULL; 420 419 421 - down(&mcast_mutex); 420 + mutex_lock(&mcast_mutex); 422 421 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { 423 422 if (status == -ETIMEDOUT) 424 423 queue_work(ipoib_workqueue, &priv->mcast_task); ··· 427 426 mcast->backoff * HZ); 428 427 } else 429 428 complete(&mcast->done); 430 - up(&mcast_mutex); 429 + mutex_unlock(&mcast_mutex); 431 430 432 431 return; 433 432 } ··· 482 481 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 483 482 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 484 483 485 - down(&mcast_mutex); 484 + mutex_lock(&mcast_mutex); 486 485 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 487 486 queue_delayed_work(ipoib_workqueue, 488 487 &priv->mcast_task, 489 488 mcast->backoff * HZ); 490 - up(&mcast_mutex); 489 + mutex_unlock(&mcast_mutex); 491 490 } else 492 491 mcast->query_id = ret; 493 492 } ··· 520 519 priv->broadcast = ipoib_mcast_alloc(dev, 1); 521 520 if (!priv->broadcast) { 522 521 ipoib_warn(priv, "failed to allocate broadcast group\n"); 523 - down(&mcast_mutex); 522 + mutex_lock(&mcast_mutex); 524 523 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 525 524 queue_delayed_work(ipoib_workqueue, 526 525 &priv->mcast_task, HZ); 527 - up(&mcast_mutex); 526 + mutex_unlock(&mcast_mutex); 528 527 return; 529 528 } 530 529 ··· 580 579 581 580 ipoib_dbg_mcast(priv, "starting multicast thread\n"); 582 581 583 - down(&mcast_mutex); 582 + mutex_lock(&mcast_mutex); 584 583 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 585 584 queue_work(ipoib_workqueue, &priv->mcast_task); 586 - up(&mcast_mutex); 585 + mutex_unlock(&mcast_mutex); 587 586 588 587 return 0; 589 588 } ··· 595 594 596 595 ipoib_dbg_mcast(priv, "stopping multicast thread\n"); 597 596 598 - down(&mcast_mutex); 597 + mutex_lock(&mcast_mutex); 599 598 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 600 599 cancel_delayed_work(&priv->mcast_task); 601 - up(&mcast_mutex); 600 + mutex_unlock(&mcast_mutex); 602 601 603 602 if (flush) 604 603 flush_workqueue(ipoib_workqueue); ··· 742 741 { 743 742 struct ipoib_dev_priv *priv = netdev_priv(dev); 744 743 LIST_HEAD(remove_list); 745 - struct ipoib_mcast *mcast, *tmcast, *nmcast; 744 + struct ipoib_mcast *mcast, *tmcast; 746 745 unsigned long flags; 747 746 748 747 ipoib_dbg_mcast(priv, "flushing multicast list\n"); 749 748 750 749 spin_lock_irqsave(&priv->lock, flags); 750 + 751 751 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { 752 - nmcast = ipoib_mcast_alloc(dev, 0); 753 - if (nmcast) { 754 - nmcast->flags = 755 - mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY); 756 - 757 - nmcast->mcmember.mgid = mcast->mcmember.mgid; 758 - 759 - /* Add the new group in before the to-be-destroyed group */ 760 - list_add_tail(&nmcast->list, &mcast->list); 761 - list_del_init(&mcast->list); 762 - 763 - rb_replace_node(&mcast->rb_node, &nmcast->rb_node, 764 - &priv->multicast_tree); 765 - 766 - list_add_tail(&mcast->list, &remove_list); 767 - } else { 768 - ipoib_warn(priv, "could not reallocate multicast group " 769 - IPOIB_GID_FMT "\n", 770 - IPOIB_GID_ARG(mcast->mcmember.mgid)); 771 - } 752 + list_del(&mcast->list); 753 + rb_erase(&mcast->rb_node, &priv->multicast_tree); 754 + list_add_tail(&mcast->list, &remove_list); 772 755 } 773 756 774 757 if (priv->broadcast) { 775 - nmcast = ipoib_mcast_alloc(dev, 0); 776 - if (nmcast) { 777 - nmcast->mcmember.mgid = priv->broadcast->mcmember.mgid; 778 - 779 - rb_replace_node(&priv->broadcast->rb_node, 780 - &nmcast->rb_node, 781 - &priv->multicast_tree); 782 - 783 - list_add_tail(&priv->broadcast->list, &remove_list); 784 - } 785 - 786 - priv->broadcast = nmcast; 758 + rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); 759 + list_add_tail(&priv->broadcast->list, &remove_list); 760 + priv->broadcast = NULL; 787 761 } 788 762 789 763 spin_unlock_irqrestore(&priv->lock, flags); ··· 766 790 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 767 791 ipoib_mcast_leave(dev, mcast); 768 792 ipoib_mcast_free(mcast); 769 - } 770 - } 771 - 772 - void ipoib_mcast_dev_down(struct net_device *dev) 773 - { 774 - struct ipoib_dev_priv *priv = netdev_priv(dev); 775 - unsigned long flags; 776 - 777 - /* Delete broadcast since it will be recreated */ 778 - if (priv->broadcast) { 779 - ipoib_dbg_mcast(priv, "deleting broadcast group\n"); 780 - 781 - spin_lock_irqsave(&priv->lock, flags); 782 - rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); 783 - spin_unlock_irqrestore(&priv->lock, flags); 784 - ipoib_mcast_leave(dev, priv->broadcast); 785 - ipoib_mcast_free(priv->broadcast); 786 - priv->broadcast = NULL; 787 793 } 788 794 } 789 795 ··· 782 824 783 825 ipoib_mcast_stop_thread(dev, 0); 784 826 785 - spin_lock_irqsave(&priv->lock, flags); 827 + spin_lock_irqsave(&dev->xmit_lock, flags); 828 + spin_lock(&priv->lock); 786 829 787 830 /* 788 831 * Unfortunately, the networking core only gives us a list of all of ··· 855 896 list_add_tail(&mcast->list, &remove_list); 856 897 } 857 898 } 858 - spin_unlock_irqrestore(&priv->lock, flags); 899 + 900 + spin_unlock(&priv->lock); 901 + spin_unlock_irqrestore(&dev->xmit_lock, flags); 859 902 860 903 /* We have to cancel outside of the spinlock */ 861 904 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
+4 -4
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
··· 65 65 } 66 66 67 67 /* attach QP to multicast group */ 68 - down(&priv->mcast_mutex); 68 + mutex_lock(&priv->mcast_mutex); 69 69 ret = ib_attach_mcast(priv->qp, mgid, mlid); 70 - up(&priv->mcast_mutex); 70 + mutex_unlock(&priv->mcast_mutex); 71 71 if (ret) 72 72 ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret); 73 73 ··· 81 81 struct ipoib_dev_priv *priv = netdev_priv(dev); 82 82 int ret; 83 83 84 - down(&priv->mcast_mutex); 84 + mutex_lock(&priv->mcast_mutex); 85 85 ret = ib_detach_mcast(priv->qp, mgid, mlid); 86 - up(&priv->mcast_mutex); 86 + mutex_unlock(&priv->mcast_mutex); 87 87 if (ret) 88 88 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret); 89 89
+5 -5
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
··· 63 63 64 64 ppriv = netdev_priv(pdev); 65 65 66 - down(&ppriv->vlan_mutex); 66 + mutex_lock(&ppriv->vlan_mutex); 67 67 68 68 /* 69 69 * First ensure this isn't a duplicate. We check the parent device and ··· 124 124 125 125 list_add_tail(&priv->list, &ppriv->child_intfs); 126 126 127 - up(&ppriv->vlan_mutex); 127 + mutex_unlock(&ppriv->vlan_mutex); 128 128 129 129 return 0; 130 130 ··· 139 139 free_netdev(priv->dev); 140 140 141 141 err: 142 - up(&ppriv->vlan_mutex); 142 + mutex_unlock(&ppriv->vlan_mutex); 143 143 return result; 144 144 } 145 145 ··· 153 153 154 154 ppriv = netdev_priv(pdev); 155 155 156 - down(&ppriv->vlan_mutex); 156 + mutex_lock(&ppriv->vlan_mutex); 157 157 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 158 158 if (priv->pkey == pkey) { 159 159 unregister_netdev(priv->dev); ··· 167 167 break; 168 168 } 169 169 } 170 - up(&ppriv->vlan_mutex); 170 + mutex_unlock(&ppriv->vlan_mutex); 171 171 172 172 return ret; 173 173 }
+4 -19
drivers/infiniband/ulp/srp/ib_srp.c
··· 1516 1516 1517 1517 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 1518 1518 1519 - static struct srp_host *srp_add_port(struct ib_device *device, 1520 - __be64 node_guid, u8 port) 1519 + static struct srp_host *srp_add_port(struct ib_device *device, u8 port) 1521 1520 { 1522 1521 struct srp_host *host; 1523 1522 ··· 1531 1532 host->port = port; 1532 1533 1533 1534 host->initiator_port_id[7] = port; 1534 - memcpy(host->initiator_port_id + 8, &node_guid, 8); 1535 + memcpy(host->initiator_port_id + 8, &device->node_guid, 8); 1535 1536 1536 1537 host->pd = ib_alloc_pd(device); 1537 1538 if (IS_ERR(host->pd)) ··· 1579 1580 { 1580 1581 struct list_head *dev_list; 1581 1582 struct srp_host *host; 1582 - struct ib_device_attr *dev_attr; 1583 1583 int s, e, p; 1584 - 1585 - dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 1586 - if (!dev_attr) 1587 - return; 1588 - 1589 - if (ib_query_device(device, dev_attr)) { 1590 - printk(KERN_WARNING PFX "Couldn't query node GUID for %s.\n", 1591 - device->name); 1592 - goto out; 1593 - } 1594 1584 1595 1585 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1596 1586 if (!dev_list) 1597 - goto out; 1587 + return; 1598 1588 1599 1589 INIT_LIST_HEAD(dev_list); 1600 1590 ··· 1596 1608 } 1597 1609 1598 1610 for (p = s; p <= e; ++p) { 1599 - host = srp_add_port(device, dev_attr->node_guid, p); 1611 + host = srp_add_port(device, p); 1600 1612 if (host) 1601 1613 list_add_tail(&host->list, dev_list); 1602 1614 } 1603 1615 1604 1616 ib_set_client_data(device, &srp_client, dev_list); 1605 - 1606 - out: 1607 - kfree(dev_attr); 1608 1617 } 1609 1618 1610 1619 static void srp_remove_one(struct ib_device *device)
+1 -1
include/rdma/ib_verbs.h
··· 88 88 89 89 struct ib_device_attr { 90 90 u64 fw_ver; 91 - __be64 node_guid; 92 91 __be64 sys_image_guid; 93 92 u64 max_mr_size; 94 93 u64 page_size_cap; ··· 950 951 u64 uverbs_cmd_mask; 951 952 int uverbs_abi_ver; 952 953 954 + __be64 node_guid; 953 955 u8 node_type; 954 956 u8 phys_port_cnt; 955 957 };