Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/core: Add support to optional-counters binding configuration

Whenever a new counter is created, save inside it the user requested
configuration for optional-counters binding, for manual configuration it
is requested directly by the user and for the automatic configuration it
depends on if the automatic binding was enabled with or without
optional-counters binding.

This argument will later be used by the driver to determine if to bind the
optional-counters as well or not when trying to bind this counter to a QP.

It indicates that when binding counters to a QP we also want the
currently enabled link optional-counters to be bound as well.

Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Link: https://patch.msgid.link/82f1c357606a16932979ef9a5910122675c74a3a.1741875070.git.leon@kernel.org
Signed-off-by: Leon Romanovsky <leon@kernel.org>

authored by

Patrisious Haddad and committed by
Leon Romanovsky
da371107 7e53b31a

+41 -12
+19 -9
drivers/infiniband/core/counters.c
··· 12 12 13 13 static int __counter_set_mode(struct rdma_port_counter *port_counter, 14 14 enum rdma_nl_counter_mode new_mode, 15 - enum rdma_nl_counter_mask new_mask) 15 + enum rdma_nl_counter_mask new_mask, 16 + bool bind_opcnt) 16 17 { 17 18 if (new_mode == RDMA_COUNTER_MODE_AUTO) { 18 19 if (new_mask & (~ALL_AUTO_MODE_MASKS)) ··· 24 23 25 24 port_counter->mode.mode = new_mode; 26 25 port_counter->mode.mask = new_mask; 26 + port_counter->mode.bind_opcnt = bind_opcnt; 27 27 return 0; 28 28 } 29 29 ··· 43 41 */ 44 42 int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port, 45 43 enum rdma_nl_counter_mask mask, 44 + bool bind_opcnt, 46 45 struct netlink_ext_ack *extack) 47 46 { 48 47 struct rdma_port_counter *port_counter; ··· 62 59 RDMA_COUNTER_MODE_NONE; 63 60 64 61 if (port_counter->mode.mode == mode && 65 - port_counter->mode.mask == mask) { 62 + port_counter->mode.mask == mask && 63 + port_counter->mode.bind_opcnt == bind_opcnt) { 66 64 ret = 0; 67 65 goto out; 68 66 } 69 67 70 - ret = __counter_set_mode(port_counter, mode, mask); 68 + ret = __counter_set_mode(port_counter, mode, mask, bind_opcnt); 71 69 72 70 out: 73 71 mutex_unlock(&port_counter->lock); ··· 144 140 145 141 static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port, 146 142 struct ib_qp *qp, 147 - enum rdma_nl_counter_mode mode) 143 + enum rdma_nl_counter_mode mode, 144 + bool bind_opcnt) 148 145 { 149 146 struct rdma_port_counter *port_counter; 150 147 struct rdma_counter *counter; ··· 173 168 switch (mode) { 174 169 case RDMA_COUNTER_MODE_MANUAL: 175 170 ret = __counter_set_mode(port_counter, RDMA_COUNTER_MODE_MANUAL, 176 - 0); 171 + 0, bind_opcnt); 177 172 if (ret) { 178 173 mutex_unlock(&port_counter->lock); 179 174 goto err_mode; ··· 192 187 mutex_unlock(&port_counter->lock); 193 188 194 189 counter->mode.mode = mode; 190 + counter->mode.bind_opcnt = bind_opcnt; 195 191 kref_init(&counter->kref); 196 192 mutex_init(&counter->lock); 197 193 ··· 221 215 port_counter->num_counters--; 222 216 if (!port_counter->num_counters && 223 217 (port_counter->mode.mode == RDMA_COUNTER_MODE_MANUAL)) 224 - __counter_set_mode(port_counter, RDMA_COUNTER_MODE_NONE, 0); 218 + __counter_set_mode(port_counter, RDMA_COUNTER_MODE_NONE, 0, 219 + false); 225 220 226 221 mutex_unlock(&port_counter->lock); 227 222 ··· 354 347 return ret; 355 348 } 356 349 } else { 357 - counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_AUTO); 350 + counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_AUTO, 351 + port_counter->mode.bind_opcnt); 358 352 if (!counter) 359 353 return -ENOMEM; 360 354 } ··· 568 560 goto err; 569 561 } 570 562 571 - counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_MANUAL); 563 + counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_MANUAL, true); 572 564 if (!counter) { 573 565 ret = -ENOMEM; 574 566 goto err; ··· 623 615 624 616 int rdma_counter_get_mode(struct ib_device *dev, u32 port, 625 617 enum rdma_nl_counter_mode *mode, 626 - enum rdma_nl_counter_mask *mask) 618 + enum rdma_nl_counter_mask *mask, 619 + bool *opcnt) 627 620 { 628 621 struct rdma_port_counter *port_counter; 629 622 630 623 port_counter = &dev->port_data[port].port_counter; 631 624 *mode = port_counter->mode.mode; 632 625 *mask = port_counter->mode.mask; 626 + *opcnt = port_counter->mode.bind_opcnt; 633 627 634 628 return 0; 635 629 }
+16 -2
drivers/infiniband/core/nldev.c
··· 171 171 [RDMA_NLDEV_ATTR_PARENT_NAME] = { .type = NLA_NUL_STRING }, 172 172 [RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE] = { .type = NLA_U8 }, 173 173 [RDMA_NLDEV_ATTR_EVENT_TYPE] = { .type = NLA_U8 }, 174 + [RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED] = { .type = NLA_U8 }, 174 175 }; 175 176 176 177 static int put_driver_name_print_type(struct sk_buff *msg, const char *name, ··· 2029 2028 struct ib_device *device, u32 port) 2030 2029 { 2031 2030 u32 mode, mask = 0, qpn, cntn = 0; 2031 + bool opcnt = false; 2032 2032 int ret; 2033 2033 2034 2034 /* Currently only counter for QP is supported */ ··· 2037 2035 nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) 2038 2036 return -EINVAL; 2039 2037 2038 + if (tb[RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED]) 2039 + opcnt = !!nla_get_u8( 2040 + tb[RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED]); 2041 + 2040 2042 mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]); 2041 2043 if (mode == RDMA_COUNTER_MODE_AUTO) { 2042 2044 if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]) 2043 2045 mask = nla_get_u32( 2044 2046 tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]); 2045 - return rdma_counter_set_auto_mode(device, port, mask, extack); 2047 + return rdma_counter_set_auto_mode(device, port, mask, opcnt, 2048 + extack); 2046 2049 } 2047 2050 2048 2051 if (!tb[RDMA_NLDEV_ATTR_RES_LQPN]) ··· 2365 2358 struct ib_device *device; 2366 2359 struct sk_buff *msg; 2367 2360 u32 index, port; 2361 + bool opcnt; 2368 2362 int ret; 2369 2363 2370 2364 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) ··· 2401 2393 goto err_msg; 2402 2394 } 2403 2395 2404 - ret = rdma_counter_get_mode(device, port, &mode, &mask); 2396 + ret = rdma_counter_get_mode(device, port, &mode, &mask, &opcnt); 2405 2397 if (ret) 2406 2398 goto err_msg; 2407 2399 ··· 2414 2406 2415 2407 if ((mode == RDMA_COUNTER_MODE_AUTO) && 2416 2408 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { 2409 + ret = -EMSGSIZE; 2410 + goto err_msg; 2411 + } 2412 + 2413 + if ((mode == RDMA_COUNTER_MODE_AUTO) && 2414 + nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED, opcnt)) { 2417 2415 ret = -EMSGSIZE; 2418 2416 goto err_msg; 2419 2417 }
+4 -1
include/rdma/rdma_counter.h
··· 23 23 enum rdma_nl_counter_mode mode; 24 24 enum rdma_nl_counter_mask mask; 25 25 struct auto_mode_param param; 26 + bool bind_opcnt; 26 27 }; 27 28 28 29 struct rdma_port_counter { ··· 48 47 void rdma_counter_release(struct ib_device *dev); 49 48 int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port, 50 49 enum rdma_nl_counter_mask mask, 50 + bool bind_opcnt, 51 51 struct netlink_ext_ack *extack); 52 52 int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port); 53 53 int rdma_counter_unbind_qp(struct ib_qp *qp, bool force); ··· 63 61 u32 qp_num, u32 counter_id); 64 62 int rdma_counter_get_mode(struct ib_device *dev, u32 port, 65 63 enum rdma_nl_counter_mode *mode, 66 - enum rdma_nl_counter_mask *mask); 64 + enum rdma_nl_counter_mask *mask, 65 + bool *opcnt); 67 66 68 67 int rdma_counter_modify(struct ib_device *dev, u32 port, 69 68 unsigned int index, bool enable);
+2
include/uapi/rdma/rdma_netlink.h
··· 580 580 RDMA_NLDEV_ATTR_EVENT_TYPE, /* u8 */ 581 581 582 582 RDMA_NLDEV_SYS_ATTR_MONITOR_MODE, /* u8 */ 583 + 584 + RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED, /* u8 */ 583 585 /* 584 586 * Always the end 585 587 */