Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-updates-2021-08-11' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 updates 2021-08-11

This series provides misc updates to mlx5.
For more information please see tag log below.

Please pull and let me know if there is any problem.

mlx5-updates-2021-08-11

Misc. cleanup for mlx5.

1) Typos and use of netdev_warn()
2) smatch cleanup
3) Minor fix to inner TTC table creation
4) Dynamic capability cache allocation
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+190 -107
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 877 877 ent->ret = -ETIMEDOUT; 878 878 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n", 879 879 ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 880 - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); 880 + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 881 881 882 882 out: 883 883 cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */ ··· 994 994 MLX5_SET(mbox_out, ent->out, status, status); 995 995 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); 996 996 997 - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); 997 + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 998 998 return; 999 999 } 1000 1000 ··· 1008 1008 poll_timeout(ent); 1009 1009 /* make sure we read the descriptor after ownership is SW */ 1010 1010 rmb(); 1011 - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT)); 1011 + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT)); 1012 1012 } 1013 1013 } 1014 1014 ··· 1068 1068 mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 1069 1069 1070 1070 ent->ret = -ETIMEDOUT; 1071 - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); 1071 + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 1072 1072 } 1073 1073 1074 1074 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
··· 520 520 e->out_dev = attr.out_dev; 521 521 e->route_dev_ifindex = attr.route_dev->ifindex; 522 522 523 - /* It's importent to add the neigh to the hash table before checking 523 + /* It's important to add the neigh to the hash table before checking 524 524 * the neigh validity state. So if we'll get a notification, in case the 525 525 * neigh changes it's validity state, we would find the relevant neigh 526 526 * in the hash.
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
··· 126 126 /* Create a separate SQ, so that when the buff pool is disabled, we could 127 127 * close this SQ safely and stop receiving CQEs. In other case, e.g., if 128 128 * the XDPSQ was used instead, we might run into trouble when the buff pool 129 - * is disabled and then reenabled, but the SQ continues receiving CQEs 129 + * is disabled and then re-enabled, but the SQ continues receiving CQEs 130 130 * from the old buff pool. 131 131 */ 132 132 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
··· 33 33 #include "en.h" 34 34 35 35 /* mlx5e global resources should be placed in this file. 36 - * Global resources are common to all the netdevices crated on the same nic. 36 + * Global resources are common to all the netdevices created on the same nic. 37 37 */ 38 38 39 39 void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
··· 1255 1255 return 0; 1256 1256 1257 1257 mlx5e_set_inner_ttc_params(priv, &ttc_params); 1258 - priv->fs.inner_ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); 1258 + priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev, 1259 + &ttc_params); 1259 1260 if (IS_ERR(priv->fs.inner_ttc)) 1260 1261 return PTR_ERR(priv->fs.inner_ttc); 1261 1262 return 0;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
··· 146 146 */ 147 147 refcount_t refcnt; 148 148 149 - /* Save the last reported time offloaded trafic pass over one of the 149 + /* Save the last reported time offloaded traffic pass over one of the 150 150 * neigh hash entry flows. Use it to periodically update the neigh 151 151 * 'used' value and avoid neigh deleting by the kernel. 152 152 */
+8 -5
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 97 97 [MARK_TO_REG] = mark_to_reg_ct, 98 98 [LABELS_TO_REG] = labels_to_reg_ct, 99 99 [FTEID_TO_REG] = fteid_to_reg_ct, 100 - /* For NIC rules we store the retore metadata directly 100 + /* For NIC rules we store the restore metadata directly 101 101 * into reg_b that is passed to SW since we don't 102 102 * jump between steering domains. 103 103 */ ··· 2448 2448 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3; 2449 2449 } 2450 2450 } 2451 - /* Currenlty supported only for MPLS over UDP */ 2451 + /* Currently supported only for MPLS over UDP */ 2452 2452 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && 2453 2453 !netif_is_bareudp(filter_dev)) { 2454 2454 NL_SET_ERR_MSG_MOD(extack, ··· 2702 2702 if (s_mask && a_mask) { 2703 2703 NL_SET_ERR_MSG_MOD(extack, 2704 2704 "can't set and add to the same HW field"); 2705 - printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field); 2705 + netdev_warn(priv->netdev, 2706 + "mlx5: can't set and add to the same HW field (%x)\n", 2707 + f->field); 2706 2708 return -EOPNOTSUPP; 2707 2709 } 2708 2710 ··· 2743 2741 if (first < next_z && next_z < last) { 2744 2742 NL_SET_ERR_MSG_MOD(extack, 2745 2743 "rewrite of few sub-fields isn't supported"); 2746 - printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", 2747 - mask); 2744 + netdev_warn(priv->netdev, 2745 + "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", 2746 + mask); 2748 2747 return -EOPNOTSUPP; 2749 2748 } 2750 2749
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1492 1492 /** 1493 1493 * mlx5_eswitch_enable - Enable eswitch 1494 1494 * @esw: Pointer to eswitch 1495 - * @num_vfs: Enable eswitch swich for given number of VFs. 1495 + * @num_vfs: Enable eswitch switch for given number of VFs. 1496 1496 * Caller must pass num_vfs > 0 when enabling eswitch for 1497 1497 * vf vports. 1498 1498 * mlx5_eswitch_enable() returns 0 on success or error code on failure.
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/events.c
··· 27 27 static int forward_event(struct notifier_block *, unsigned long, void *); 28 28 29 29 static struct mlx5_nb events_nbs_ref[] = { 30 - /* Events to be proccessed by mlx5_core */ 30 + /* Events to be processed by mlx5_core */ 31 31 {.nb.notifier_call = any_notifier, .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY }, 32 32 {.nb.notifier_call = temp_warn, .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT }, 33 33 {.nb.notifier_call = port_module, .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT },
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
··· 1516 1516 mutex_lock(&fpga_xfrm->lock); 1517 1517 1518 1518 if (!fpga_xfrm->sa_ctx) 1519 - /* Unbounded xfrm, chane only sw attrs */ 1519 + /* Unbounded xfrm, change only sw attrs */ 1520 1520 goto change_sw_xfrm_attrs; 1521 1521 1522 1522 /* copy original hw sa */
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 2343 2343 2344 2344 #define FLOW_TABLE_BIT_SZ 1 2345 2345 #define GET_FLOW_TABLE_CAP(dev, offset) \ 2346 - ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \ 2346 + ((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \ 2347 2347 offset / 32)) >> \ 2348 2348 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) 2349 2349 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) ··· 2493 2493 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); 2494 2494 2495 2495 /* If this a prio with chains, and we can jump from one chain 2496 - * (namepsace) to another, so we accumulate the levels 2496 + * (namespace) to another, so we accumulate the levels 2497 2497 */ 2498 2498 if (prio->node.type == FS_TYPE_PRIO_CHAINS) 2499 2499 acc_level = acc_level_ns;
+1 -5
drivers/net/ethernet/mellanox/mlx5/core/health.c
··· 170 170 171 171 /* The reset only needs to be issued by one PF. The health buffer is 172 172 * shared between all functions, and will be cleared during a reset. 173 - * Check again to avoid a redundant 2nd reset. If the fatal erros was 173 + * Check again to avoid a redundant 2nd reset. If the fatal errors was 174 174 * PCI related a reset won't help. 175 175 */ 176 176 fatal_error = mlx5_health_check_fatal_sensors(dev); ··· 213 213 mutex_lock(&dev->intf_state_mutex); 214 214 if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 215 215 goto unlock;/* a previous error is still being handled */ 216 - if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) { 217 - dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 218 - goto unlock; 219 - } 220 216 221 217 enter_error_state(dev, force); 222 218 unlock:
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
··· 749 749 } else { 750 750 ptp_event.type = PTP_CLOCK_EXTTS; 751 751 } 752 - /* TODOL clock->ptp can be NULL if ptp_clock_register failes */ 752 + /* TODOL clock->ptp can be NULL if ptp_clock_register fails */ 753 753 ptp_clock_event(clock->ptp, &ptp_event); 754 754 break; 755 755 case PTP_PF_PEROUT:
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
··· 40 40 41 41 struct mlx5_vxlan { 42 42 struct mlx5_core_dev *mdev; 43 - /* max_num_ports is usuallly 4, 16 buckets is more than enough */ 43 + /* max_num_ports is usually 4, 16 buckets is more than enough */ 44 44 DECLARE_HASHTABLE(htable, 4); 45 45 struct mutex sync_lock; /* sync add/del port HW operations */ 46 46 };
+67 -13
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 389 389 390 390 switch (cap_mode) { 391 391 case HCA_CAP_OPMOD_GET_MAX: 392 - memcpy(dev->caps.hca_max[cap_type], hca_caps, 392 + memcpy(dev->caps.hca[cap_type]->max, hca_caps, 393 393 MLX5_UN_SZ_BYTES(hca_cap_union)); 394 394 break; 395 395 case HCA_CAP_OPMOD_GET_CUR: 396 - memcpy(dev->caps.hca_cur[cap_type], hca_caps, 396 + memcpy(dev->caps.hca[cap_type]->cur, hca_caps, 397 397 MLX5_UN_SZ_BYTES(hca_cap_union)); 398 398 break; 399 399 default: ··· 469 469 return err; 470 470 471 471 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); 472 - memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP], 472 + memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur, 473 473 MLX5_ST_SZ_BYTES(odp_cap)); 474 474 475 475 #define ODP_CAP_SET_MAX(dev, field) \ ··· 514 514 515 515 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, 516 516 capability); 517 - memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL], 517 + memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL]->cur, 518 518 MLX5_ST_SZ_BYTES(cmd_hca_cap)); 519 519 520 520 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", ··· 596 596 return 0; 597 597 598 598 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); 599 - memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ROCE], 599 + memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ROCE]->cur, 600 600 MLX5_ST_SZ_BYTES(roce_cap)); 601 601 MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1); 602 602 ··· 748 748 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev, 749 749 const struct pci_device_id *id) 750 750 { 751 - struct mlx5_priv *priv = &dev->priv; 752 751 int err = 0; 753 752 754 753 mutex_init(&dev->pci_status_mutex); 755 754 pci_set_drvdata(dev->pdev, dev); 756 755 757 756 dev->bar_addr = pci_resource_start(pdev, 0); 758 - priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); 759 757 760 758 err = mlx5_pci_enable_device(dev); 761 759 if (err) { ··· 1247 1249 int err = 0; 1248 1250 1249 1251 mutex_lock(&dev->intf_state_mutex); 1250 - if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 1251 - mlx5_core_warn(dev, "interface is up, NOP\n"); 1252 - goto out; 1253 - } 1254 - /* remove any previous indication of internal error */ 1255 1252 dev->state = MLX5_DEVICE_STATE_UP; 1256 1253 1257 1254 err = mlx5_function_setup(dev, true); ··· 1287 1294 mlx5_function_teardown(dev, true); 1288 1295 err_function: 1289 1296 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1290 - out: 1291 1297 mutex_unlock(&dev->intf_state_mutex); 1292 1298 return err; 1293 1299 } ··· 1373 1381 mutex_unlock(&dev->intf_state_mutex); 1374 1382 } 1375 1383 1384 + static const int types[] = { 1385 + MLX5_CAP_GENERAL, 1386 + MLX5_CAP_GENERAL_2, 1387 + MLX5_CAP_ETHERNET_OFFLOADS, 1388 + MLX5_CAP_IPOIB_ENHANCED_OFFLOADS, 1389 + MLX5_CAP_ODP, 1390 + MLX5_CAP_ATOMIC, 1391 + MLX5_CAP_ROCE, 1392 + MLX5_CAP_IPOIB_OFFLOADS, 1393 + MLX5_CAP_FLOW_TABLE, 1394 + MLX5_CAP_ESWITCH_FLOW_TABLE, 1395 + MLX5_CAP_ESWITCH, 1396 + MLX5_CAP_VECTOR_CALC, 1397 + MLX5_CAP_QOS, 1398 + MLX5_CAP_DEBUG, 1399 + MLX5_CAP_DEV_MEM, 1400 + MLX5_CAP_DEV_EVENT, 1401 + MLX5_CAP_TLS, 1402 + MLX5_CAP_VDPA_EMULATION, 1403 + MLX5_CAP_IPSEC, 1404 + }; 1405 + 1406 + static void mlx5_hca_caps_free(struct mlx5_core_dev *dev) 1407 + { 1408 + int type; 1409 + int i; 1410 + 1411 + for (i = 0; i < ARRAY_SIZE(types); i++) { 1412 + type = types[i]; 1413 + kfree(dev->caps.hca[type]); 1414 + } 1415 + } 1416 + 1417 + static int mlx5_hca_caps_alloc(struct mlx5_core_dev *dev) 1418 + { 1419 + struct mlx5_hca_cap *cap; 1420 + int type; 1421 + int i; 1422 + 1423 + for (i = 0; i < ARRAY_SIZE(types); i++) { 1424 + cap = kzalloc(sizeof(*cap), GFP_KERNEL); 1425 + if (!cap) 1426 + goto err; 1427 + type = types[i]; 1428 + dev->caps.hca[type] = cap; 1429 + } 1430 + 1431 + return 0; 1432 + 1433 + err: 1434 + mlx5_hca_caps_free(dev); 1435 + return -ENOMEM; 1436 + } 1437 + 1376 1438 int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) 1377 1439 { 1378 1440 struct mlx5_priv *priv = &dev->priv; ··· 1446 1400 mutex_init(&priv->pgdir_mutex); 1447 1401 INIT_LIST_HEAD(&priv->pgdir_list); 1448 1402 1403 + priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); 1449 1404 priv->dbg_root = debugfs_create_dir(dev_name(dev->device), 1450 1405 mlx5_debugfs_root); 1451 1406 INIT_LIST_HEAD(&priv->traps); ··· 1463 1416 if (err) 1464 1417 goto err_adev_init; 1465 1418 1419 + err = mlx5_hca_caps_alloc(dev); 1420 + if (err) 1421 + goto err_hca_caps; 1422 + 1466 1423 return 0; 1467 1424 1425 + err_hca_caps: 1426 + mlx5_adev_cleanup(dev); 1468 1427 err_adev_init: 1469 1428 mlx5_pagealloc_cleanup(dev); 1470 1429 err_pagealloc_init: ··· 1489 1436 { 1490 1437 struct mlx5_priv *priv = &dev->priv; 1491 1438 1439 + mlx5_hca_caps_free(dev); 1492 1440 mlx5_adev_cleanup(dev); 1493 1441 mlx5_pagealloc_cleanup(dev); 1494 1442 mlx5_health_cleanup(dev);
+49 -26
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
··· 18 18 19 19 #define MLX5_SFS_PER_CTRL_IRQ 64 20 20 #define MLX5_IRQ_CTRL_SF_MAX 8 21 - /* min num of vectores for SFs to be enabled */ 21 + /* min num of vectors for SFs to be enabled */ 22 22 #define MLX5_IRQ_VEC_COMP_BASE_SF 2 23 23 24 24 #define MLX5_EQ_SHARE_IRQ_MAX_COMP (8) ··· 28 28 #define MLX5_EQ_REFS_PER_IRQ (2) 29 29 30 30 struct mlx5_irq { 31 - u32 index; 32 31 struct atomic_notifier_head nh; 33 32 cpumask_var_t mask; 34 33 char name[MLX5_MAX_IRQ_NAME]; 35 - struct kref kref; 36 - int irqn; 37 34 struct mlx5_irq_pool *pool; 35 + int refcount; 36 + u32 index; 37 + int irqn; 38 38 }; 39 39 40 40 struct mlx5_irq_pool { ··· 138 138 return ret; 139 139 } 140 140 141 - static void irq_release(struct kref *kref) 141 + static void irq_release(struct mlx5_irq *irq) 142 142 { 143 - struct mlx5_irq *irq = container_of(kref, struct mlx5_irq, kref); 144 143 struct mlx5_irq_pool *pool = irq->pool; 145 144 146 145 xa_erase(&pool->irqs, irq->index); ··· 158 159 struct mlx5_irq_pool *pool = irq->pool; 159 160 160 161 mutex_lock(&pool->lock); 161 - kref_put(&irq->kref, irq_release); 162 + irq->refcount--; 163 + if (!irq->refcount) 164 + irq_release(irq); 162 165 mutex_unlock(&pool->lock); 166 + } 167 + 168 + static int irq_get_locked(struct mlx5_irq *irq) 169 + { 170 + lockdep_assert_held(&irq->pool->lock); 171 + if (WARN_ON_ONCE(!irq->refcount)) 172 + return 0; 173 + irq->refcount++; 174 + return 1; 175 + } 176 + 177 + static int irq_get(struct mlx5_irq *irq) 178 + { 179 + int err; 180 + 181 + mutex_lock(&irq->pool->lock); 182 + err = irq_get_locked(irq); 183 + mutex_unlock(&irq->pool->lock); 184 + return err; 163 185 } 164 186 165 187 static irqreturn_t irq_int_handler(int irq, void *nh) ··· 234 214 err = -ENOMEM; 235 215 goto err_cpumask; 236 216 } 237 - kref_init(&irq->kref); 217 + irq->refcount = 1; 238 218 irq->index = i; 239 219 err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL)); 240 220 if (err) { ··· 255 235 256 236 int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb) 257 237 { 258 - int err; 238 + int ret; 259 239 260 - err = kref_get_unless_zero(&irq->kref); 261 - if (WARN_ON_ONCE(!err)) 240 + ret = irq_get(irq); 241 + if (!ret) 262 242 /* Something very bad happens here, we are enabling EQ 263 243 * on non-existing IRQ. 264 244 */ 265 245 return -ENOENT; 266 - err = atomic_notifier_chain_register(&irq->nh, nb); 267 - if (err) 246 + ret = atomic_notifier_chain_register(&irq->nh, nb); 247 + if (ret) 268 248 irq_put(irq); 269 - return err; 249 + return ret; 270 250 } 271 251 272 252 int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb) ··· 321 301 xa_for_each_range(&pool->irqs, index, iter, start, end) { 322 302 if (!cpumask_equal(iter->mask, affinity)) 323 303 continue; 324 - if (kref_read(&iter->kref) < pool->min_threshold) 304 + if (iter->refcount < pool->min_threshold) 325 305 return iter; 326 - if (!irq || kref_read(&iter->kref) < 327 - kref_read(&irq->kref)) 306 + if (!irq || iter->refcount < irq->refcount) 328 307 irq = iter; 329 308 } 330 309 return irq; ··· 338 319 mutex_lock(&pool->lock); 339 320 least_loaded_irq = irq_pool_find_least_loaded(pool, affinity); 340 321 if (least_loaded_irq && 341 - kref_read(&least_loaded_irq->kref) < pool->min_threshold) 322 + least_loaded_irq->refcount < pool->min_threshold) 342 323 goto out; 343 324 new_irq = irq_pool_create_irq(pool, affinity); 344 325 if (IS_ERR(new_irq)) { ··· 356 337 least_loaded_irq = new_irq; 357 338 goto unlock; 358 339 out: 359 - kref_get(&least_loaded_irq->kref); 360 - if (kref_read(&least_loaded_irq->kref) > pool->max_threshold) 340 + irq_get_locked(least_loaded_irq); 341 + if (least_loaded_irq->refcount > pool->max_threshold) 361 342 mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n", 362 343 least_loaded_irq->irqn, pool->name, 363 - kref_read(&least_loaded_irq->kref) / MLX5_EQ_REFS_PER_IRQ); 344 + least_loaded_irq->refcount / MLX5_EQ_REFS_PER_IRQ); 364 345 unlock: 365 346 mutex_unlock(&pool->lock); 366 347 return least_loaded_irq; ··· 376 357 mutex_lock(&pool->lock); 377 358 irq = xa_load(&pool->irqs, vecidx); 378 359 if (irq) { 379 - kref_get(&irq->kref); 360 + irq_get_locked(irq); 380 361 goto unlock; 381 362 } 382 363 irq = irq_request(pool, vecidx); ··· 443 424 return irq; 444 425 mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n", 445 426 irq->irqn, cpumask_pr_args(affinity), 446 - kref_read(&irq->kref) / MLX5_EQ_REFS_PER_IRQ); 427 + irq->refcount / MLX5_EQ_REFS_PER_IRQ); 447 428 return irq; 448 429 } 449 430 ··· 475 456 struct mlx5_irq *irq; 476 457 unsigned long index; 477 458 459 + /* There are cases in which we are destrying the irq_table before 460 + * freeing all the IRQs, fast teardown for example. Hence, free the irqs 461 + * which might not have been freed. 462 + */ 478 463 xa_for_each(&pool->irqs, index, irq) 479 - irq_release(&irq->kref); 464 + irq_release(irq); 480 465 xa_destroy(&pool->irqs); 481 466 kvfree(pool); 482 467 } ··· 502 479 if (!mlx5_sf_max_functions(dev)) 503 480 return 0; 504 481 if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) { 505 - mlx5_core_err(dev, "Not enough IRQs for SFs. SF may run at lower performance\n"); 482 + mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n"); 506 483 return 0; 507 484 } 508 485 ··· 620 597 return; 621 598 622 599 /* There are cases where IRQs still will be in used when we reaching 623 - * to here. Hence, making sure all the irqs are realeased. 600 + * to here. Hence, making sure all the irqs are released. 624 601 */ 625 602 irq_pools_destroy(table); 626 603 pci_free_irq_vectors(dev->pdev);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
··· 39 39 struct auxiliary_device *adev = container_of(dev, struct auxiliary_device, dev); 40 40 struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); 41 41 42 - return scnprintf(buf, PAGE_SIZE, "%u\n", sf_dev->sfnum); 42 + return sysfs_emit(buf, "%u\n", sf_dev->sfnum); 43 43 } 44 44 static DEVICE_ATTR_RO(sfnum); 45 45
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
··· 476 476 return; 477 477 478 478 /* Balances with refcount_set; drop the reference so that new user cmd cannot start 479 - * and new vhca event handler cannnot run. 479 + * and new vhca event handler cannot run. 480 480 */ 481 481 mlx5_sf_table_put(table); 482 482 wait_for_completion(&table->disable_complete);
+37 -34
include/linux/mlx5/device.h
··· 1038 1038 struct mlx5_mkey_seg { 1039 1039 /* This is a two bit field occupying bits 31-30. 1040 1040 * bit 31 is always 0, 1041 - * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation 1041 + * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation 1042 1042 */ 1043 1043 u8 status; 1044 1044 u8 pcie_control; ··· 1157 1157 HCA_CAP_OPMOD_GET_CUR = 1, 1158 1158 }; 1159 1159 1160 + /* Any new cap addition must update mlx5_hca_caps_alloc() to allocate 1161 + * capability memory. 1162 + */ 1160 1163 enum mlx5_cap_type { 1161 1164 MLX5_CAP_GENERAL = 0, 1162 1165 MLX5_CAP_ETHERNET_OFFLOADS, ··· 1216 1213 1217 1214 /* GET Dev Caps macros */ 1218 1215 #define MLX5_CAP_GEN(mdev, cap) \ 1219 - MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) 1216 + MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap) 1220 1217 1221 1218 #define MLX5_CAP_GEN_64(mdev, cap) \ 1222 - MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) 1219 + MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap) 1223 1220 1224 1221 #define MLX5_CAP_GEN_MAX(mdev, cap) \ 1225 - MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) 1222 + MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap) 1226 1223 1227 1224 #define MLX5_CAP_GEN_2(mdev, cap) \ 1228 - MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap) 1225 + MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap) 1229 1226 1230 1227 #define MLX5_CAP_GEN_2_64(mdev, cap) \ 1231 - MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap) 1228 + MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap) 1232 1229 1233 1230 #define MLX5_CAP_GEN_2_MAX(mdev, cap) \ 1234 - MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_max[MLX5_CAP_GENERAL_2], cap) 1231 + MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap) 1235 1232 1236 1233 #define MLX5_CAP_ETH(mdev, cap) \ 1237 1234 MLX5_GET(per_protocol_networking_offload_caps,\ 1238 - mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) 1235 + mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap) 1239 1236 1240 1237 #define MLX5_CAP_ETH_MAX(mdev, cap) \ 1241 1238 MLX5_GET(per_protocol_networking_offload_caps,\ 1242 - mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) 1239 + mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap) 1243 1240 1244 1241 #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \ 1245 1242 MLX5_GET(per_protocol_networking_offload_caps,\ 1246 - mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap) 1243 + mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap) 1247 1244 1248 1245 #define MLX5_CAP_ROCE(mdev, cap) \ 1249 - MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap) 1246 + MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap) 1250 1247 1251 1248 #define MLX5_CAP_ROCE_MAX(mdev, cap) \ 1252 - MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap) 1249 + MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap) 1253 1250 1254 1251 #define MLX5_CAP_ATOMIC(mdev, cap) \ 1255 - MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap) 1252 + MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap) 1256 1253 1257 1254 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ 1258 - MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap) 1255 + MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap) 1259 1256 1260 1257 #define MLX5_CAP_FLOWTABLE(mdev, cap) \ 1261 - MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) 1258 + MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap) 1262 1259 1263 1260 #define MLX5_CAP64_FLOWTABLE(mdev, cap) \ 1264 - MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) 1261 + MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap) 1265 1262 1266 1263 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ 1267 - MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) 1264 + MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap) 1268 1265 1269 1266 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ 1270 1267 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) ··· 1304 1301 1305 1302 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ 1306 1303 MLX5_GET(flow_table_eswitch_cap, \ 1307 - mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1304 + mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) 1308 1305 1309 1306 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ 1310 1307 MLX5_GET(flow_table_eswitch_cap, \ 1311 - mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1308 + mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap) 1312 1309 1313 1310 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ 1314 1311 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) ··· 1330 1327 1331 1328 #define MLX5_CAP_ESW(mdev, cap) \ 1332 1329 MLX5_GET(e_switch_cap, \ 1333 - mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) 1330 + mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap) 1334 1331 1335 1332 #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \ 1336 1333 MLX5_GET64(flow_table_eswitch_cap, \ 1337 - (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1334 + (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) 1338 1335 1339 1336 #define MLX5_CAP_ESW_MAX(mdev, cap) \ 1340 1337 MLX5_GET(e_switch_cap, \ 1341 - mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap) 1338 + mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap) 1342 1339 1343 1340 #define MLX5_CAP_ODP(mdev, cap)\ 1344 - MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) 1341 + MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap) 1345 1342 1346 1343 #define MLX5_CAP_ODP_MAX(mdev, cap)\ 1347 - MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap) 1344 + MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap) 1348 1345 1349 1346 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ 1350 1347 MLX5_GET(vector_calc_cap, \ 1351 - mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) 1348 + mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap) 1352 1349 1353 1350 #define MLX5_CAP_QOS(mdev, cap)\ 1354 - MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap) 1351 + MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap) 1355 1352 1356 1353 #define MLX5_CAP_DEBUG(mdev, cap)\ 1357 - MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap) 1354 + MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap) 1358 1355 1359 1356 #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ 1360 1357 MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) ··· 1390 1387 MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) 1391 1388 1392 1389 #define MLX5_CAP_DEV_MEM(mdev, cap)\ 1393 - MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) 1390 + MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap) 1394 1391 1395 1392 #define MLX5_CAP64_DEV_MEM(mdev, cap)\ 1396 - MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) 1393 + MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap) 1397 1394 1398 1395 #define MLX5_CAP_TLS(mdev, cap) \ 1399 - MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap) 1396 + MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap) 1400 1397 1401 1398 #define MLX5_CAP_DEV_EVENT(mdev, cap)\ 1402 - MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) 1399 + MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap) 1403 1400 1404 1401 #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\ 1405 1402 MLX5_GET(virtio_emulation_cap, \ 1406 - (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) 1403 + (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap) 1407 1404 1408 1405 #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\ 1409 1406 MLX5_GET64(virtio_emulation_cap, \ 1410 - (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) 1407 + (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap) 1411 1408 1412 1409 #define MLX5_CAP_IPSEC(mdev, cap)\ 1413 - MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap) 1410 + MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap) 1414 1411 1415 1412 enum { 1416 1413 MLX5_CMD_STAT_OK = 0x0,
+9 -6
include/linux/mlx5/driver.h
··· 581 581 /* end: qp staff */ 582 582 583 583 /* start: alloc staff */ 584 - /* protect buffer alocation according to numa node */ 584 + /* protect buffer allocation according to numa node */ 585 585 struct mutex alloc_mutex; 586 586 int numa_node; 587 587 ··· 623 623 }; 624 624 625 625 enum mlx5_device_state { 626 - MLX5_DEVICE_STATE_UNINITIALIZED, 627 - MLX5_DEVICE_STATE_UP, 626 + MLX5_DEVICE_STATE_UP = 1, 628 627 MLX5_DEVICE_STATE_INTERNAL_ERROR, 629 628 }; 630 629 ··· 729 730 } mr_cache[MAX_MR_CACHE_ENTRIES]; 730 731 }; 731 732 733 + struct mlx5_hca_cap { 734 + u32 cur[MLX5_UN_SZ_DW(hca_cap_union)]; 735 + u32 max[MLX5_UN_SZ_DW(hca_cap_union)]; 736 + }; 737 + 732 738 struct mlx5_core_dev { 733 739 struct device *device; 734 740 enum mlx5_coredev_type coredev_type; ··· 745 741 char board_id[MLX5_BOARD_ID_LEN]; 746 742 struct mlx5_cmd cmd; 747 743 struct { 748 - u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; 749 - u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; 744 + struct mlx5_hca_cap *hca[MLX5_CAP_NUM]; 750 745 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; 751 746 u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)]; 752 747 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; ··· 1114 1111 } 1115 1112 1116 1113 /* Async-atomic event notifier used by mlx5 core to forward FW 1117 - * evetns recived from event queue to mlx5 consumers. 1114 + * evetns received from event queue to mlx5 consumers. 1118 1115 * Optimise event queue dipatching. 1119 1116 */ 1120 1117 int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);