Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA: Allow ib_client's to fail when add() is called

When a client is added it isn't allowed to fail, but all the client's have
various failure paths within their add routines.

This creates the very fringe condition where the client was added, failed
during add and didn't set the client_data. The core code will then still
call other client_data centric ops like remove(), rename(), get_nl_info(),
and get_net_dev_by_params() with NULL client_data - which is confusing and
unexpected.

If the add() callback fails, then do not call any more client ops for the
device, even remove.

Remove all the now redundant checks for NULL client_data in ops callbacks.

Update all the add() callbacks to return error codes
appropriately. EOPNOTSUPP is used for cases where the ULP does not support
the ib_device - eg because it only works with IB.

Link: https://lore.kernel.org/r/20200421172440.387069-1-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Acked-by: Ursula Braun <ubraun@linux.ibm.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>

+142 -124
+14 -10
drivers/infiniband/core/cm.c
··· 81 81 EXPORT_SYMBOL(ibcm_reject_msg); 82 82 83 83 struct cm_id_private; 84 - static void cm_add_one(struct ib_device *device); 84 + static int cm_add_one(struct ib_device *device); 85 85 static void cm_remove_one(struct ib_device *device, void *client_data); 86 86 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, 87 87 struct ib_cm_sidr_rep_param *param); ··· 4382 4382 4383 4383 } 4384 4384 4385 - static void cm_add_one(struct ib_device *ib_device) 4385 + static int cm_add_one(struct ib_device *ib_device) 4386 4386 { 4387 4387 struct cm_device *cm_dev; 4388 4388 struct cm_port *port; ··· 4401 4401 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt), 4402 4402 GFP_KERNEL); 4403 4403 if (!cm_dev) 4404 - return; 4404 + return -ENOMEM; 4405 4405 4406 4406 cm_dev->ib_device = ib_device; 4407 4407 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay; ··· 4413 4413 continue; 4414 4414 4415 4415 port = kzalloc(sizeof *port, GFP_KERNEL); 4416 - if (!port) 4416 + if (!port) { 4417 + ret = -ENOMEM; 4417 4418 goto error1; 4419 + } 4418 4420 4419 4421 cm_dev->port[i-1] = port; 4420 4422 port->cm_dev = cm_dev; ··· 4437 4435 cm_recv_handler, 4438 4436 port, 4439 4437 0); 4440 - if (IS_ERR(port->mad_agent)) 4438 + if (IS_ERR(port->mad_agent)) { 4439 + ret = PTR_ERR(port->mad_agent); 4441 4440 goto error2; 4441 + } 4442 4442 4443 4443 ret = ib_modify_port(ib_device, i, 0, &port_modify); 4444 4444 if (ret) ··· 4449 4445 count++; 4450 4446 } 4451 4447 4452 - if (!count) 4448 + if (!count) { 4449 + ret = -EOPNOTSUPP; 4453 4450 goto free; 4451 + } 4454 4452 4455 4453 ib_set_client_data(ib_device, &cm_client, cm_dev); 4456 4454 4457 4455 write_lock_irqsave(&cm.device_lock, flags); 4458 4456 list_add_tail(&cm_dev->list, &cm.device_list); 4459 4457 write_unlock_irqrestore(&cm.device_lock, flags); 4460 - return; 4458 + return 0; 4461 4459 4462 4460 error3: 4463 4461 ib_unregister_mad_agent(port->mad_agent); ··· 4481 4475 } 4482 4476 free: 4483 4477 kfree(cm_dev); 4478 + return ret; 4484 4479 } 4485 4480 4486 4481 static void cm_remove_one(struct ib_device *ib_device, void *client_data) ··· 4495 4488 }; 4496 4489 unsigned long flags; 4497 4490 int i; 4498 - 4499 - if (!cm_dev) 4500 - return; 4501 4491 4502 4492 write_lock_irqsave(&cm.device_lock, flags); 4503 4493 list_del(&cm_dev->list);
+12 -11
drivers/infiniband/core/cma.c
··· 153 153 } 154 154 EXPORT_SYMBOL(rdma_res_to_id); 155 155 156 - static void cma_add_one(struct ib_device *device); 156 + static int cma_add_one(struct ib_device *device); 157 157 static void cma_remove_one(struct ib_device *device, void *client_data); 158 158 159 159 static struct ib_client cma_client = { ··· 4638 4638 .notifier_call = cma_netdev_callback 4639 4639 }; 4640 4640 4641 - static void cma_add_one(struct ib_device *device) 4641 + static int cma_add_one(struct ib_device *device) 4642 4642 { 4643 4643 struct cma_device *cma_dev; 4644 4644 struct rdma_id_private *id_priv; 4645 4645 unsigned int i; 4646 4646 unsigned long supported_gids = 0; 4647 + int ret; 4647 4648 4648 4649 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); 4649 4650 if (!cma_dev) 4650 - return; 4651 + return -ENOMEM; 4651 4652 4652 4653 cma_dev->device = device; 4653 4654 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, 4654 4655 sizeof(*cma_dev->default_gid_type), 4655 4656 GFP_KERNEL); 4656 - if (!cma_dev->default_gid_type) 4657 + if (!cma_dev->default_gid_type) { 4658 + ret = -ENOMEM; 4657 4659 goto free_cma_dev; 4660 + } 4658 4661 4659 4662 cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, 4660 4663 sizeof(*cma_dev->default_roce_tos), 4661 4664 GFP_KERNEL); 4662 - if (!cma_dev->default_roce_tos) 4665 + if (!cma_dev->default_roce_tos) { 4666 + ret = -ENOMEM; 4663 4667 goto free_gid_type; 4668 + } 4664 4669 4665 4670 rdma_for_each_port (device, i) { 4666 4671 supported_gids = roce_gid_type_mask_support(device, i); ··· 4691 4686 mutex_unlock(&lock); 4692 4687 4693 4688 trace_cm_add_one(device); 4694 - return; 4689 + return 0; 4695 4690 4696 4691 free_gid_type: 4697 4692 kfree(cma_dev->default_gid_type); 4698 4693 4699 4694 free_cma_dev: 4700 4695 kfree(cma_dev); 4701 - 4702 - return; 4696 + return ret; 4703 4697 } 4704 4698 4705 4699 static int cma_remove_id_dev(struct rdma_id_private *id_priv) ··· 4759 4755 struct cma_device *cma_dev = client_data; 4760 4756 4761 4757 trace_cm_remove_one(device); 4762 - 4763 - if (!cma_dev) 4764 - return; 4765 4758 4766 4759 mutex_lock(&lock); 4767 4760 list_del(&cma_dev->list);
+14 -2
drivers/infiniband/core/device.c
··· 677 677 if (ret) 678 678 goto out; 679 679 downgrade_write(&device->client_data_rwsem); 680 - if (client->add) 681 - client->add(device); 680 + if (client->add) { 681 + if (client->add(device)) { 682 + /* 683 + * If a client fails to add then the error code is 684 + * ignored, but we won't call any more ops on this 685 + * client. 686 + */ 687 + xa_erase(&device->client_data, client->client_id); 688 + up_read(&device->client_data_rwsem); 689 + ib_device_put(device); 690 + ib_client_put(client); 691 + return 0; 692 + } 693 + } 682 694 683 695 /* Readers shall not see a client until add has been completed */ 684 696 xa_set_mark(&device->client_data, client->client_id,
+13 -4
drivers/infiniband/core/mad.c
··· 3076 3076 return 0; 3077 3077 } 3078 3078 3079 - static void ib_mad_init_device(struct ib_device *device) 3079 + static int ib_mad_init_device(struct ib_device *device) 3080 3080 { 3081 3081 int start, i; 3082 + unsigned int count = 0; 3083 + int ret; 3082 3084 3083 3085 start = rdma_start_port(device); 3084 3086 ··· 3088 3086 if (!rdma_cap_ib_mad(device, i)) 3089 3087 continue; 3090 3088 3091 - if (ib_mad_port_open(device, i)) { 3089 + ret = ib_mad_port_open(device, i); 3090 + if (ret) { 3092 3091 dev_err(&device->dev, "Couldn't open port %d\n", i); 3093 3092 goto error; 3094 3093 } 3095 - if (ib_agent_port_open(device, i)) { 3094 + ret = ib_agent_port_open(device, i); 3095 + if (ret) { 3096 3096 dev_err(&device->dev, 3097 3097 "Couldn't open port %d for agents\n", i); 3098 3098 goto error_agent; 3099 3099 } 3100 + count++; 3100 3101 } 3101 - return; 3102 + if (!count) 3103 + return -EOPNOTSUPP; 3104 + 3105 + return 0; 3102 3106 3103 3107 error_agent: 3104 3108 if (ib_mad_port_close(device, i)) ··· 3121 3113 if (ib_mad_port_close(device, i)) 3122 3114 dev_err(&device->dev, "Couldn't close port %d\n", i); 3123 3115 } 3116 + return ret; 3124 3117 } 3125 3118 3126 3119 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
+5 -7
drivers/infiniband/core/multicast.c
··· 42 42 #include <rdma/ib_cache.h> 43 43 #include "sa.h" 44 44 45 - static void mcast_add_one(struct ib_device *device); 45 + static int mcast_add_one(struct ib_device *device); 46 46 static void mcast_remove_one(struct ib_device *device, void *client_data); 47 47 48 48 static struct ib_client mcast_client = { ··· 815 815 } 816 816 } 817 817 818 - static void mcast_add_one(struct ib_device *device) 818 + static int mcast_add_one(struct ib_device *device) 819 819 { 820 820 struct mcast_device *dev; 821 821 struct mcast_port *port; ··· 825 825 dev = kmalloc(struct_size(dev, port, device->phys_port_cnt), 826 826 GFP_KERNEL); 827 827 if (!dev) 828 - return; 828 + return -ENOMEM; 829 829 830 830 dev->start_port = rdma_start_port(device); 831 831 dev->end_port = rdma_end_port(device); ··· 845 845 846 846 if (!count) { 847 847 kfree(dev); 848 - return; 848 + return -EOPNOTSUPP; 849 849 } 850 850 851 851 dev->device = device; ··· 853 853 854 854 INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler); 855 855 ib_register_event_handler(&dev->event_handler); 856 + return 0; 856 857 } 857 858 858 859 static void mcast_remove_one(struct ib_device *device, void *client_data) ··· 861 860 struct mcast_device *dev = client_data; 862 861 struct mcast_port *port; 863 862 int i; 864 - 865 - if (!dev) 866 - return; 867 863 868 864 ib_unregister_event_handler(&dev->event_handler); 869 865 flush_workqueue(mcast_wq);
+12 -10
drivers/infiniband/core/sa_query.c
··· 174 174 }; 175 175 176 176 177 - static void ib_sa_add_one(struct ib_device *device); 177 + static int ib_sa_add_one(struct ib_device *device); 178 178 static void ib_sa_remove_one(struct ib_device *device, void *client_data); 179 179 180 180 static struct ib_client sa_client = { ··· 2322 2322 } 2323 2323 } 2324 2324 2325 - static void ib_sa_add_one(struct ib_device *device) 2325 + static int ib_sa_add_one(struct ib_device *device) 2326 2326 { 2327 2327 struct ib_sa_device *sa_dev; 2328 2328 int s, e, i; 2329 2329 int count = 0; 2330 + int ret; 2330 2331 2331 2332 s = rdma_start_port(device); 2332 2333 e = rdma_end_port(device); 2333 2334 2334 2335 sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL); 2335 2336 if (!sa_dev) 2336 - return; 2337 + return -ENOMEM; 2337 2338 2338 2339 sa_dev->start_port = s; 2339 2340 sa_dev->end_port = e; ··· 2354 2353 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 2355 2354 NULL, 0, send_handler, 2356 2355 recv_handler, sa_dev, 0); 2357 - if (IS_ERR(sa_dev->port[i].agent)) 2356 + if (IS_ERR(sa_dev->port[i].agent)) { 2357 + ret = PTR_ERR(sa_dev->port[i].agent); 2358 2358 goto err; 2359 + } 2359 2360 2360 2361 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 2361 2362 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work, ··· 2366 2363 count++; 2367 2364 } 2368 2365 2369 - if (!count) 2366 + if (!count) { 2367 + ret = -EOPNOTSUPP; 2370 2368 goto free; 2369 + } 2371 2370 2372 2371 ib_set_client_data(device, &sa_client, sa_dev); 2373 2372 ··· 2388 2383 update_sm_ah(&sa_dev->port[i].update_task); 2389 2384 } 2390 2385 2391 - return; 2386 + return 0; 2392 2387 2393 2388 err: 2394 2389 while (--i >= 0) { ··· 2397 2392 } 2398 2393 free: 2399 2394 kfree(sa_dev); 2400 - return; 2395 + return ret; 2401 2396 } 2402 2397 2403 2398 static void ib_sa_remove_one(struct ib_device *device, void *client_data) 2404 2399 { 2405 2400 struct ib_sa_device *sa_dev = client_data; 2406 2401 int i; 2407 - 2408 - if (!sa_dev) 2409 - return; 2410 2402 2411 2403 ib_unregister_event_handler(&sa_dev->event_handler); 2412 2404 flush_workqueue(ib_wq);
+12 -10
drivers/infiniband/core/user_mad.c
··· 142 142 143 143 static DEFINE_IDA(umad_ida); 144 144 145 - static void ib_umad_add_one(struct ib_device *device); 145 + static int ib_umad_add_one(struct ib_device *device); 146 146 static void ib_umad_remove_one(struct ib_device *device, void *client_data); 147 147 148 148 static void ib_umad_dev_free(struct kref *kref) ··· 1352 1352 put_device(&port->dev); 1353 1353 } 1354 1354 1355 - static void ib_umad_add_one(struct ib_device *device) 1355 + static int ib_umad_add_one(struct ib_device *device) 1356 1356 { 1357 1357 struct ib_umad_device *umad_dev; 1358 1358 int s, e, i; 1359 1359 int count = 0; 1360 + int ret; 1360 1361 1361 1362 s = rdma_start_port(device); 1362 1363 e = rdma_end_port(device); 1363 1364 1364 1365 umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL); 1365 1366 if (!umad_dev) 1366 - return; 1367 + return -ENOMEM; 1367 1368 1368 1369 kref_init(&umad_dev->kref); 1369 1370 for (i = s; i <= e; ++i) { 1370 1371 if (!rdma_cap_ib_mad(device, i)) 1371 1372 continue; 1372 1373 1373 - if (ib_umad_init_port(device, i, umad_dev, 1374 - &umad_dev->ports[i - s])) 1374 + ret = ib_umad_init_port(device, i, umad_dev, 1375 + &umad_dev->ports[i - s]); 1376 + if (ret) 1375 1377 goto err; 1376 1378 1377 1379 count++; 1378 1380 } 1379 1381 1380 - if (!count) 1382 + if (!count) { 1383 + ret = -EOPNOTSUPP; 1381 1384 goto free; 1385 + } 1382 1386 1383 1387 ib_set_client_data(device, &umad_client, umad_dev); 1384 1388 1385 - return; 1389 + return 0; 1386 1390 1387 1391 err: 1388 1392 while (--i >= s) { ··· 1398 1394 free: 1399 1395 /* balances kref_init */ 1400 1396 ib_umad_dev_put(umad_dev); 1397 + return ret; 1401 1398 } 1402 1399 1403 1400 static void ib_umad_remove_one(struct ib_device *device, void *client_data) 1404 1401 { 1405 1402 struct ib_umad_device *umad_dev = client_data; 1406 1403 unsigned int i; 1407 - 1408 - if (!umad_dev) 1409 - return; 1410 1404 1411 1405 rdma_for_each_port (device, i) { 1412 1406 if (rdma_cap_ib_mad(device, i))
+12 -12
drivers/infiniband/core/uverbs_main.c
··· 75 75 static struct class *uverbs_class; 76 76 77 77 static DEFINE_IDA(uverbs_ida); 78 - static void ib_uverbs_add_one(struct ib_device *device); 78 + static int ib_uverbs_add_one(struct ib_device *device); 79 79 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data); 80 80 81 81 /* ··· 1091 1091 return 0; 1092 1092 } 1093 1093 1094 - static void ib_uverbs_add_one(struct ib_device *device) 1094 + static int ib_uverbs_add_one(struct ib_device *device) 1095 1095 { 1096 1096 int devnum; 1097 1097 dev_t base; ··· 1099 1099 int ret; 1100 1100 1101 1101 if (!device->ops.alloc_ucontext) 1102 - return; 1102 + return -EOPNOTSUPP; 1103 1103 1104 1104 uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL); 1105 1105 if (!uverbs_dev) 1106 - return; 1106 + return -ENOMEM; 1107 1107 1108 1108 ret = init_srcu_struct(&uverbs_dev->disassociate_srcu); 1109 1109 if (ret) { 1110 1110 kfree(uverbs_dev); 1111 - return; 1111 + return -ENOMEM; 1112 1112 } 1113 1113 1114 1114 device_initialize(&uverbs_dev->dev); ··· 1128 1128 1129 1129 devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1, 1130 1130 GFP_KERNEL); 1131 - if (devnum < 0) 1131 + if (devnum < 0) { 1132 + ret = -ENOMEM; 1132 1133 goto err; 1134 + } 1133 1135 uverbs_dev->devnum = devnum; 1134 1136 if (devnum >= IB_UVERBS_NUM_FIXED_MINOR) 1135 1137 base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR; 1136 1138 else 1137 1139 base = IB_UVERBS_BASE_DEV + devnum; 1138 1140 1139 - if (ib_uverbs_create_uapi(device, uverbs_dev)) 1141 + ret = ib_uverbs_create_uapi(device, uverbs_dev); 1142 + if (ret) 1140 1143 goto err_uapi; 1141 1144 1142 1145 uverbs_dev->dev.devt = base; ··· 1154 1151 goto err_uapi; 1155 1152 1156 1153 ib_set_client_data(device, &uverbs_client, uverbs_dev); 1157 - return; 1154 + return 0; 1158 1155 1159 1156 err_uapi: 1160 1157 ida_free(&uverbs_ida, devnum); ··· 1163 1160 ib_uverbs_comp_dev(uverbs_dev); 1164 1161 wait_for_completion(&uverbs_dev->comp); 1165 1162 put_device(&uverbs_dev->dev); 1166 - return; 1163 + return ret; 1167 1164 } 1168 1165 1169 1166 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, ··· 1205 1202 { 1206 1203 struct ib_uverbs_device *uverbs_dev = client_data; 1207 1204 int wait_clients = 1; 1208 - 1209 - if (!uverbs_dev) 1210 - return; 1211 1205 1212 1206 cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev); 1213 1207 ida_free(&uverbs_ida, uverbs_dev->devnum);
+5 -10
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 86 86 87 87 struct ib_sa_client ipoib_sa_client; 88 88 89 - static void ipoib_add_one(struct ib_device *device); 89 + static int ipoib_add_one(struct ib_device *device); 90 90 static void ipoib_remove_one(struct ib_device *device, void *client_data); 91 91 static void ipoib_neigh_reclaim(struct rcu_head *rp); 92 92 static struct net_device *ipoib_get_net_dev_by_params( ··· 477 477 478 478 ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index); 479 479 if (ret) 480 - return NULL; 481 - 482 - if (!dev_list) 483 480 return NULL; 484 481 485 482 /* See if we can find a unique device matching the L2 parameters */ ··· 2511 2514 return ERR_PTR(-ENOMEM); 2512 2515 } 2513 2516 2514 - static void ipoib_add_one(struct ib_device *device) 2517 + static int ipoib_add_one(struct ib_device *device) 2515 2518 { 2516 2519 struct list_head *dev_list; 2517 2520 struct net_device *dev; ··· 2521 2524 2522 2525 dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL); 2523 2526 if (!dev_list) 2524 - return; 2527 + return -ENOMEM; 2525 2528 2526 2529 INIT_LIST_HEAD(dev_list); 2527 2530 ··· 2538 2541 2539 2542 if (!count) { 2540 2543 kfree(dev_list); 2541 - return; 2544 + return -EOPNOTSUPP; 2542 2545 } 2543 2546 2544 2547 ib_set_client_data(device, &ipoib_client, dev_list); 2548 + return 0; 2545 2549 } 2546 2550 2547 2551 static void ipoib_remove_one(struct ib_device *device, void *client_data) 2548 2552 { 2549 2553 struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; 2550 2554 struct list_head *dev_list = client_data; 2551 - 2552 - if (!dev_list) 2553 - return; 2554 2555 2555 2556 list_for_each_entry_safe(priv, tmp, dev_list, list) { 2556 2557 LIST_HEAD(head);
+5 -7
drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
··· 113 113 struct mutex lock; 114 114 }; 115 115 116 - static void opa_vnic_vema_add_one(struct ib_device *device); 116 + static int opa_vnic_vema_add_one(struct ib_device *device); 117 117 static void opa_vnic_vema_rem_one(struct ib_device *device, 118 118 void *client_data); 119 119 ··· 989 989 * 990 990 * Allocate the vnic control port and initialize it. 991 991 */ 992 - static void opa_vnic_vema_add_one(struct ib_device *device) 992 + static int opa_vnic_vema_add_one(struct ib_device *device) 993 993 { 994 994 struct opa_vnic_ctrl_port *cport; 995 995 int rc, size = sizeof(*cport); 996 996 997 997 if (!rdma_cap_opa_vnic(device)) 998 - return; 998 + return -EOPNOTSUPP; 999 999 1000 1000 size += device->phys_port_cnt * sizeof(struct opa_vnic_vema_port); 1001 1001 cport = kzalloc(size, GFP_KERNEL); 1002 1002 if (!cport) 1003 - return; 1003 + return -ENOMEM; 1004 1004 1005 1005 cport->num_ports = device->phys_port_cnt; 1006 1006 cport->ibdev = device; ··· 1012 1012 1013 1013 ib_set_client_data(device, &opa_vnic_client, cport); 1014 1014 opa_vnic_ctrl_config_dev(cport, true); 1015 + return 0; 1015 1016 } 1016 1017 1017 1018 /** ··· 1026 1025 void *client_data) 1027 1026 { 1028 1027 struct opa_vnic_ctrl_port *cport = client_data; 1029 - 1030 - if (!cport) 1031 - return; 1032 1028 1033 1029 c_info("removing VNIC client\n"); 1034 1030 opa_vnic_ctrl_config_dev(cport, false);
+10 -11
drivers/infiniband/ulp/srp/ib_srp.c
··· 146 146 MODULE_PARM_DESC(ch_count, 147 147 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); 148 148 149 - static void srp_add_one(struct ib_device *device); 149 + static int srp_add_one(struct ib_device *device); 150 150 static void srp_remove_one(struct ib_device *device, void *client_data); 151 151 static void srp_rename_dev(struct ib_device *device, void *client_data); 152 152 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc); ··· 4132 4132 } 4133 4133 } 4134 4134 4135 - static void srp_add_one(struct ib_device *device) 4135 + static int srp_add_one(struct ib_device *device) 4136 4136 { 4137 4137 struct srp_device *srp_dev; 4138 4138 struct ib_device_attr *attr = &device->attrs; ··· 4144 4144 4145 4145 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL); 4146 4146 if (!srp_dev) 4147 - return; 4147 + return -ENOMEM; 4148 4148 4149 4149 /* 4150 4150 * Use the smallest page size supported by the HCA, down to a ··· 4197 4197 4198 4198 srp_dev->dev = device; 4199 4199 srp_dev->pd = ib_alloc_pd(device, flags); 4200 - if (IS_ERR(srp_dev->pd)) 4201 - goto free_dev; 4200 + if (IS_ERR(srp_dev->pd)) { 4201 + int ret = PTR_ERR(srp_dev->pd); 4202 + 4203 + kfree(srp_dev); 4204 + return ret; 4205 + } 4202 4206 4203 4207 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { 4204 4208 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey; ··· 4216 4212 } 4217 4213 4218 4214 ib_set_client_data(device, &srp_client, srp_dev); 4219 - return; 4220 - 4221 - free_dev: 4222 - kfree(srp_dev); 4215 + return 0; 4223 4216 } 4224 4217 4225 4218 static void srp_remove_one(struct ib_device *device, void *client_data) ··· 4226 4225 struct srp_target_port *target; 4227 4226 4228 4227 srp_dev = client_data; 4229 - if (!srp_dev) 4230 - return; 4231 4228 4232 4229 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 4233 4230 device_unregister(&host->dev);
+10 -15
drivers/infiniband/ulp/srpt/ib_srpt.c
··· 3101 3101 * srpt_add_one - InfiniBand device addition callback function 3102 3102 * @device: Describes a HCA. 3103 3103 */ 3104 - static void srpt_add_one(struct ib_device *device) 3104 + static int srpt_add_one(struct ib_device *device) 3105 3105 { 3106 3106 struct srpt_device *sdev; 3107 3107 struct srpt_port *sport; ··· 3112 3112 sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt), 3113 3113 GFP_KERNEL); 3114 3114 if (!sdev) 3115 - goto err; 3115 + return -ENOMEM; 3116 3116 3117 3117 sdev->device = device; 3118 3118 mutex_init(&sdev->sdev_mutex); 3119 3119 3120 3120 sdev->pd = ib_alloc_pd(device, 0); 3121 - if (IS_ERR(sdev->pd)) 3121 + if (IS_ERR(sdev->pd)) { 3122 + ret = PTR_ERR(sdev->pd); 3122 3123 goto free_dev; 3124 + } 3123 3125 3124 3126 sdev->lkey = sdev->pd->local_dma_lkey; 3125 3127 ··· 3137 3135 if (IS_ERR(sdev->cm_id)) { 3138 3136 pr_info("ib_create_cm_id() failed: %ld\n", 3139 3137 PTR_ERR(sdev->cm_id)); 3138 + ret = PTR_ERR(sdev->cm_id); 3140 3139 sdev->cm_id = NULL; 3141 3140 if (!rdma_cm_id) 3142 3141 goto err_ring; ··· 3182 3179 mutex_init(&sport->port_gid_id.mutex); 3183 3180 INIT_LIST_HEAD(&sport->port_gid_id.tpg_list); 3184 3181 3185 - if (srpt_refresh_port(sport)) { 3182 + ret = srpt_refresh_port(sport); 3183 + if (ret) { 3186 3184 pr_err("MAD registration failed for %s-%d.\n", 3187 3185 dev_name(&sdev->device->dev), i); 3188 3186 goto err_event; ··· 3194 3190 list_add_tail(&sdev->list, &srpt_dev_list); 3195 3191 spin_unlock(&srpt_dev_lock); 3196 3192 3197 - out: 3198 3193 ib_set_client_data(device, &srpt_client, sdev); 3199 3194 pr_debug("added %s.\n", dev_name(&device->dev)); 3200 - return; 3195 + return 0; 3201 3196 3202 3197 err_event: 3203 3198 ib_unregister_event_handler(&sdev->event_handler); ··· 3208 3205 ib_dealloc_pd(sdev->pd); 3209 3206 free_dev: 3210 3207 kfree(sdev); 3211 - err: 3212 - sdev = NULL; 3213 3208 pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev)); 3214 - goto out; 3209 + return ret; 3215 3210 } 3216 3211 3217 3212 /** ··· 3221 3220 { 3222 3221 struct srpt_device *sdev = client_data; 3223 3222 int i; 3224 - 3225 - if (!sdev) { 3226 - pr_info("%s(%s): nothing to do.\n", __func__, 3227 - dev_name(&device->dev)); 3228 - return; 3229 - } 3230 3223 3231 3224 srpt_unregister_mad_agent(sdev); 3232 3225
+1 -1
include/rdma/ib_verbs.h
··· 2722 2722 struct ib_client_nl_info; 2723 2723 struct ib_client { 2724 2724 const char *name; 2725 - void (*add) (struct ib_device *); 2725 + int (*add)(struct ib_device *ibdev); 2726 2726 void (*remove)(struct ib_device *, void *client_data); 2727 2727 void (*rename)(struct ib_device *dev, void *client_data); 2728 2728 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
+13 -8
net/rds/ib.c
··· 127 127 queue_work(rds_wq, &rds_ibdev->free_work); 128 128 } 129 129 130 - static void rds_ib_add_one(struct ib_device *device) 130 + static int rds_ib_add_one(struct ib_device *device) 131 131 { 132 132 struct rds_ib_device *rds_ibdev; 133 133 bool has_fr, has_fmr; 134 + int ret; 134 135 135 136 /* Only handle IB (no iWARP) devices */ 136 137 if (device->node_type != RDMA_NODE_IB_CA) 137 - return; 138 + return -EOPNOTSUPP; 138 139 139 140 rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL, 140 141 ibdev_to_node(device)); 141 142 if (!rds_ibdev) 142 - return; 143 + return -ENOMEM; 143 144 144 145 spin_lock_init(&rds_ibdev->spinlock); 145 146 refcount_set(&rds_ibdev->refcount, 1); ··· 183 182 if (!rds_ibdev->vector_load) { 184 183 pr_err("RDS/IB: %s failed to allocate vector memory\n", 185 184 __func__); 185 + ret = -ENOMEM; 186 186 goto put_dev; 187 187 } 188 188 189 189 rds_ibdev->dev = device; 190 190 rds_ibdev->pd = ib_alloc_pd(device, 0); 191 191 if (IS_ERR(rds_ibdev->pd)) { 192 + ret = PTR_ERR(rds_ibdev->pd); 192 193 rds_ibdev->pd = NULL; 193 194 goto put_dev; 194 195 } ··· 198 195 device->dma_device, 199 196 sizeof(struct rds_header), 200 197 L1_CACHE_BYTES, 0); 201 - if (!rds_ibdev->rid_hdrs_pool) 198 + if (!rds_ibdev->rid_hdrs_pool) { 199 + ret = -ENOMEM; 202 200 goto put_dev; 201 + } 203 202 204 203 rds_ibdev->mr_1m_pool = 205 204 rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL); 206 205 if (IS_ERR(rds_ibdev->mr_1m_pool)) { 206 + ret = PTR_ERR(rds_ibdev->mr_1m_pool); 207 207 rds_ibdev->mr_1m_pool = NULL; 208 208 goto put_dev; 209 209 } ··· 214 208 rds_ibdev->mr_8k_pool = 215 209 rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_8K_POOL); 216 210 if (IS_ERR(rds_ibdev->mr_8k_pool)) { 211 + ret = PTR_ERR(rds_ibdev->mr_8k_pool); 217 212 rds_ibdev->mr_8k_pool = NULL; 218 213 goto put_dev; 219 214 } ··· 234 227 refcount_inc(&rds_ibdev->refcount); 235 228 236 229 ib_set_client_data(device, &rds_ib_client, rds_ibdev); 237 - refcount_inc(&rds_ibdev->refcount); 238 230 239 231 rds_ib_nodev_connect(); 232 + return 0; 240 233 241 234 put_dev: 242 235 rds_ib_dev_put(rds_ibdev); 236 + return ret; 243 237 } 244 238 245 239 /* ··· 281 273 static void rds_ib_remove_one(struct ib_device *device, void *client_data) 282 274 { 283 275 struct rds_ib_device *rds_ibdev = client_data; 284 - 285 - if (!rds_ibdev) 286 - return; 287 276 288 277 rds_ib_dev_shutdown(rds_ibdev); 289 278
+4 -6
net/smc/smc_ib.c
··· 547 547 static struct ib_client smc_ib_client; 548 548 549 549 /* callback function for ib_register_client() */ 550 - static void smc_ib_add_dev(struct ib_device *ibdev) 550 + static int smc_ib_add_dev(struct ib_device *ibdev) 551 551 { 552 552 struct smc_ib_device *smcibdev; 553 553 u8 port_cnt; 554 554 int i; 555 555 556 556 if (ibdev->node_type != RDMA_NODE_IB_CA) 557 - return; 557 + return -EOPNOTSUPP; 558 558 559 559 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL); 560 560 if (!smcibdev) 561 - return; 561 + return -ENOMEM; 562 562 563 563 smcibdev->ibdev = ibdev; 564 564 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work); ··· 583 583 smcibdev->pnetid[i]); 584 584 } 585 585 schedule_work(&smcibdev->port_event_work); 586 + return 0; 586 587 } 587 588 588 589 /* callback function for ib_unregister_client() */ ··· 591 590 { 592 591 struct smc_ib_device *smcibdev = client_data; 593 592 594 - if (!smcibdev || smcibdev->ibdev != ibdev) 595 - return; 596 - ib_set_client_data(ibdev, &smc_ib_client, NULL); 597 593 spin_lock(&smc_ib_devices.lock); 598 594 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ 599 595 spin_unlock(&smc_ib_devices.lock);