Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/cma: Split apart the multiple uses of the same list heads

Two list heads in the rdma_id_private are being used for multiple
purposes, to save a few bytes of memory. Give the different purposes
different names and union the memory that is clearly exclusive.

list splits into device_item and listen_any_item. device_item is threaded
onto the cma_device's list and listen_any goes onto the
listen_any_list. IDs doing any listen cannot have devices.

listen_list splits into listen_item and listen_list. listen_list is on the
parent listen any rdma_id_private and listen_item is on child listen that
is bound to a specific cma_dev.

Which name should be used in which case depends on the state and other
factors of the rdma_id_private. Remap all the confusing references to make
sense with the new names, so at least there is some hope of matching the
necessary preconditions with each access.

Link: https://lore.kernel.org/r/0-v1-a5ead4a0c19d+c3a-cma_list_head_jgg@nvidia.com
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

+27 -18
+18 -16
drivers/infiniband/core/cma.c
··· 453 453 id_priv->id.device = cma_dev->device; 454 454 id_priv->id.route.addr.dev_addr.transport = 455 455 rdma_node_get_transport(cma_dev->device->node_type); 456 - list_add_tail(&id_priv->list, &cma_dev->id_list); 456 + list_add_tail(&id_priv->device_item, &cma_dev->id_list); 457 457 458 458 trace_cm_id_attach(id_priv, cma_dev->device); 459 459 } ··· 470 470 static void cma_release_dev(struct rdma_id_private *id_priv) 471 471 { 472 472 mutex_lock(&lock); 473 - list_del(&id_priv->list); 473 + list_del_init(&id_priv->device_item); 474 474 cma_dev_put(id_priv->cma_dev); 475 475 id_priv->cma_dev = NULL; 476 476 id_priv->id.device = NULL; ··· 854 854 init_completion(&id_priv->comp); 855 855 refcount_set(&id_priv->refcount, 1); 856 856 mutex_init(&id_priv->handler_mutex); 857 + INIT_LIST_HEAD(&id_priv->device_item); 857 858 INIT_LIST_HEAD(&id_priv->listen_list); 858 859 INIT_LIST_HEAD(&id_priv->mc_list); 859 860 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); ··· 1648 1647 return id_priv; 1649 1648 list_for_each_entry(id_priv_dev, 1650 1649 &id_priv->listen_list, 1651 - listen_list) { 1650 + listen_item) { 1652 1651 if (id_priv_dev->id.device == cm_id->device && 1653 1652 cma_match_net_dev(&id_priv_dev->id, 1654 1653 net_dev, req)) ··· 1757 1756 * Remove from listen_any_list to prevent added devices from spawning 1758 1757 * additional listen requests. 1759 1758 */ 1760 - list_del(&id_priv->list); 1759 + list_del_init(&id_priv->listen_any_item); 1761 1760 1762 1761 while (!list_empty(&id_priv->listen_list)) { 1763 - dev_id_priv = list_entry(id_priv->listen_list.next, 1764 - struct rdma_id_private, listen_list); 1762 + dev_id_priv = 1763 + list_first_entry(&id_priv->listen_list, 1764 + struct rdma_id_private, listen_item); 1765 1765 /* sync with device removal to avoid duplicate destruction */ 1766 - list_del_init(&dev_id_priv->list); 1767 - list_del(&dev_id_priv->listen_list); 1766 + list_del_init(&dev_id_priv->device_item); 1767 + list_del_init(&dev_id_priv->listen_item); 1768 1768 mutex_unlock(&lock); 1769 1769 1770 1770 rdma_destroy_id(&dev_id_priv->id); ··· 2566 2564 ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); 2567 2565 if (ret) 2568 2566 goto err_listen; 2569 - list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 2567 + list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list); 2570 2568 return 0; 2571 2569 err_listen: 2572 2570 /* Caller must destroy this after releasing lock */ ··· 2582 2580 int ret; 2583 2581 2584 2582 mutex_lock(&lock); 2585 - list_add_tail(&id_priv->list, &listen_any_list); 2583 + list_add_tail(&id_priv->listen_any_item, &listen_any_list); 2586 2584 list_for_each_entry(cma_dev, &dev_list, list) { 2587 2585 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); 2588 2586 if (ret) { 2589 2587 /* Prevent racing with cma_process_remove() */ 2590 2588 if (to_destroy) 2591 - list_del_init(&to_destroy->list); 2589 + list_del_init(&to_destroy->device_item); 2592 2590 goto err_listen; 2593 2591 } 2594 2592 } ··· 4897 4895 4898 4896 mutex_lock(&lock); 4899 4897 list_for_each_entry(cma_dev, &dev_list, list) 4900 - list_for_each_entry(id_priv, &cma_dev->id_list, list) { 4898 + list_for_each_entry(id_priv, &cma_dev->id_list, device_item) { 4901 4899 ret = cma_netdev_change(ndev, id_priv); 4902 4900 if (ret) 4903 4901 goto out; ··· 4957 4955 mutex_lock(&lock); 4958 4956 while (!list_empty(&cma_dev->id_list)) { 4959 4957 struct rdma_id_private *id_priv = list_first_entry( 4960 - &cma_dev->id_list, struct rdma_id_private, list); 4958 + &cma_dev->id_list, struct rdma_id_private, device_item); 4961 4959 4962 - list_del(&id_priv->listen_list); 4963 - list_del_init(&id_priv->list); 4960 + list_del_init(&id_priv->listen_item); 4961 + list_del_init(&id_priv->device_item); 4964 4962 cma_id_get(id_priv); 4965 4963 mutex_unlock(&lock); 4966 4964 ··· 5037 5035 5038 5036 mutex_lock(&lock); 5039 5037 list_add_tail(&cma_dev->list, &dev_list); 5040 - list_for_each_entry(id_priv, &listen_any_list, list) { 5038 + list_for_each_entry(id_priv, &listen_any_list, listen_any_item) { 5041 5039 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); 5042 5040 if (ret) 5043 5041 goto free_listen;
+9 -2
drivers/infiniband/core/cma_priv.h
··· 55 55 56 56 struct rdma_bind_list *bind_list; 57 57 struct hlist_node node; 58 - struct list_head list; /* listen_any_list or cma_device.list */ 59 - struct list_head listen_list; /* per device listens */ 58 + union { 59 + struct list_head device_item; /* On cma_device->id_list */ 60 + struct list_head listen_any_item; /* On listen_any_list */ 61 + }; 62 + union { 63 + /* On rdma_id_private->listen_list */ 64 + struct list_head listen_item; 65 + struct list_head listen_list; 66 + }; 60 67 struct cma_device *cma_dev; 61 68 struct list_head mc_list; 62 69