Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlxsw-refactor-reference-counting-code'

Petr Machata says:

====================
mlxsw: Refactor reference counting code

Amit Cohen writes:

This set converts all reference counters defined as 'unsigned int' to
refcount_t type. The reference counting of LAGs can be simplified, so first
refactor the related code and then change the type of the reference
counter.

Patch set overview:
Patches #1-#4 are preparations for LAG refactor
Patch #5 refactors LAG code and change the type of reference counter
Patch #6 converts the remaining reference counters in mlxsw driver
====================

Link: https://lore.kernel.org/r/cover.1706293430.git.petrm@nvidia.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+130 -121
+8 -8
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
··· 95 95 */ 96 96 has_trap:1, 97 97 has_police:1; 98 - unsigned int ref_count; 98 + refcount_t ref_count; 99 99 struct mlxsw_afa_set *next; /* Pointer to the next set. */ 100 100 struct mlxsw_afa_set *prev; /* Pointer to the previous set, 101 101 * note that set may have multiple ··· 120 120 struct rhash_head ht_node; 121 121 struct mlxsw_afa_fwd_entry_ht_key ht_key; 122 122 u32 kvdl_index; 123 - unsigned int ref_count; 123 + refcount_t ref_count; 124 124 }; 125 125 126 126 static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = { ··· 282 282 /* Need to initialize the set to pass by default */ 283 283 mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0); 284 284 set->ht_key.is_first = is_first; 285 - set->ref_count = 1; 285 + refcount_set(&set->ref_count, 1); 286 286 return set; 287 287 } 288 288 ··· 330 330 static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa, 331 331 struct mlxsw_afa_set *set) 332 332 { 333 - if (--set->ref_count) 333 + if (!refcount_dec_and_test(&set->ref_count)) 334 334 return; 335 335 if (set->shared) 336 336 mlxsw_afa_set_unshare(mlxsw_afa, set); ··· 350 350 set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key, 351 351 mlxsw_afa_set_ht_params); 352 352 if (set) { 353 - set->ref_count++; 353 + refcount_inc(&set->ref_count); 354 354 mlxsw_afa_set_put(mlxsw_afa, orig_set); 355 355 } else { 356 356 set = orig_set; ··· 564 564 if (!fwd_entry) 565 565 return ERR_PTR(-ENOMEM); 566 566 fwd_entry->ht_key.local_port = local_port; 567 - fwd_entry->ref_count = 1; 567 + refcount_set(&fwd_entry->ref_count, 1); 568 568 569 569 err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht, 570 570 &fwd_entry->ht_node, ··· 607 607 fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key, 608 608 mlxsw_afa_fwd_entry_ht_params); 609 609 if (fwd_entry) { 610 - fwd_entry->ref_count++; 610 + refcount_inc(&fwd_entry->ref_count); 611 611 return fwd_entry; 612 612 } 613 613 return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port); ··· 616 616 static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa, 617 617 struct mlxsw_afa_fwd_entry *fwd_entry) 618 618 { 619 - if (--fwd_entry->ref_count) 619 + if (!refcount_dec_and_test(&fwd_entry->ref_count)) 620 620 return; 621 621 mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry); 622 622 }
+5 -4
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
··· 5 5 #include <linux/slab.h> 6 6 #include <linux/list.h> 7 7 #include <linux/errno.h> 8 + #include <linux/refcount.h> 8 9 9 10 #include "item.h" 10 11 #include "core_acl_flex_keys.h" ··· 108 107 109 108 struct mlxsw_afk_key_info { 110 109 struct list_head list; 111 - unsigned int ref_count; 110 + refcount_t ref_count; 112 111 unsigned int blocks_count; 113 112 int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value 114 113 * is index inside "blocks" ··· 335 334 if (err) 336 335 goto err_picker; 337 336 list_add(&key_info->list, &mlxsw_afk->key_info_list); 338 - key_info->ref_count = 1; 337 + refcount_set(&key_info->ref_count, 1); 339 338 return key_info; 340 339 341 340 err_picker: ··· 357 356 358 357 key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage); 359 358 if (key_info) { 360 - key_info->ref_count++; 359 + refcount_inc(&key_info->ref_count); 361 360 return key_info; 362 361 } 363 362 return mlxsw_afk_key_info_create(mlxsw_afk, elusage); ··· 366 365 367 366 void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info) 368 367 { 369 - if (--key_info->ref_count) 368 + if (!refcount_dec_and_test(&key_info->ref_count)) 370 369 return; 371 370 mlxsw_afk_key_info_destroy(key_info); 372 371 }
+87 -73
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 2695 2695 static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp) 2696 2696 { 2697 2697 char sgcr_pl[MLXSW_REG_SGCR_LEN]; 2698 - u16 max_lag; 2699 2698 int err; 2700 2699 2701 2700 if (mlxsw_core_lag_mode(mlxsw_sp->core) != 2702 2701 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) 2703 2702 return 0; 2704 2703 2705 - err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); 2706 - if (err) 2707 - return err; 2708 - 2709 2704 /* In DDD mode, which we by default use, each LAG entry is 8 PGT 2710 2705 * entries. The LAG table address needs to be 8-aligned, but that ought 2711 2706 * to be the case, since the LAG table is allocated first. 2712 2707 */ 2713 2708 err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base, 2714 - max_lag * 8); 2709 + mlxsw_sp->max_lag * 8); 2715 2710 if (err) 2716 2711 return err; 2717 2712 if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) { ··· 2723 2728 2724 2729 err_mid_alloc_range: 2725 2730 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, 2726 - max_lag * 8); 2731 + mlxsw_sp->max_lag * 8); 2727 2732 return err; 2728 2733 } 2729 2734 2730 2735 static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp) 2731 2736 { 2732 - u16 max_lag; 2733 - int err; 2734 - 2735 2737 if (mlxsw_core_lag_mode(mlxsw_sp->core) != 2736 2738 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) 2737 2739 return; 2738 2740 2739 - err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); 2740 - if (err) 2741 - return; 2742 - 2743 2741 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, 2744 - max_lag * 8); 2742 + mlxsw_sp->max_lag * 8); 2745 2743 } 2746 2744 2747 2745 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe 2748 2746 2747 + struct mlxsw_sp_lag { 2748 + struct net_device *dev; 2749 + refcount_t ref_count; 2750 + u16 lag_id; 2751 + }; 2752 + 2749 2753 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) 2750 2754 { 2751 2755 char slcr_pl[MLXSW_REG_SLCR_LEN]; 2752 - u16 max_lag; 2753 2756 u32 seed; 2754 2757 int err; 2755 2758 ··· 2766 2773 if (err) 2767 2774 return err; 2768 2775 2769 - err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); 2776 + err = mlxsw_core_max_lag(mlxsw_sp->core, &mlxsw_sp->max_lag); 2770 2777 if (err) 2771 2778 return err; 2772 2779 ··· 2777 2784 if (err) 2778 2785 return err; 2779 2786 2780 - mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper), 2787 + mlxsw_sp->lags = kcalloc(mlxsw_sp->max_lag, sizeof(struct mlxsw_sp_lag), 2781 2788 GFP_KERNEL); 2782 2789 if (!mlxsw_sp->lags) { 2783 2790 err = -ENOMEM; ··· 4262 4269 } 4263 4270 } 4264 4271 4265 - static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4272 + static struct mlxsw_sp_lag * 4273 + mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev, 4274 + struct netlink_ext_ack *extack) 4266 4275 { 4267 4276 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4277 + struct mlxsw_sp_lag *lag; 4278 + u16 lag_id; 4279 + int i, err; 4268 4280 4281 + for (i = 0; i < mlxsw_sp->max_lag; i++) { 4282 + if (!mlxsw_sp->lags[i].dev) 4283 + break; 4284 + } 4285 + 4286 + if (i == mlxsw_sp->max_lag) { 4287 + NL_SET_ERR_MSG_MOD(extack, 4288 + "Exceeded number of supported LAG devices"); 4289 + return ERR_PTR(-EBUSY); 4290 + } 4291 + 4292 + lag_id = i; 4269 4293 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); 4270 - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4294 + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4295 + if (err) 4296 + return ERR_PTR(err); 4297 + 4298 + lag = &mlxsw_sp->lags[lag_id]; 4299 + lag->lag_id = lag_id; 4300 + lag->dev = lag_dev; 4301 + refcount_set(&lag->ref_count, 1); 4302 + 4303 + return lag; 4271 4304 } 4272 4305 4273 - static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 4306 + static int 4307 + mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag) 4274 4308 { 4275 4309 char sldr_pl[MLXSW_REG_SLDR_LEN]; 4276 4310 4277 - mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); 4311 + lag->dev = NULL; 4312 + 4313 + mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag->lag_id); 4278 4314 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); 4279 4315 } 4280 4316 ··· 4351 4329 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); 4352 4330 } 4353 4331 4354 - static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, 4355 - struct net_device *lag_dev, 4356 - u16 *p_lag_id) 4332 + static struct mlxsw_sp_lag * 4333 + mlxsw_sp_lag_find(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev) 4357 4334 { 4358 - struct mlxsw_sp_upper *lag; 4359 - int free_lag_id = -1; 4360 - u16 max_lag; 4361 - int err, i; 4335 + int i; 4362 4336 4363 - err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); 4364 - if (err) 4365 - return err; 4337 + for (i = 0; i < mlxsw_sp->max_lag; i++) { 4338 + if (!mlxsw_sp->lags[i].dev) 4339 + continue; 4366 4340 4367 - for (i = 0; i < max_lag; i++) { 4368 - lag = mlxsw_sp_lag_get(mlxsw_sp, i); 4369 - if (lag->ref_count) { 4370 - if (lag->dev == lag_dev) { 4371 - *p_lag_id = i; 4372 - return 0; 4373 - } 4374 - } else if (free_lag_id < 0) { 4375 - free_lag_id = i; 4376 - } 4341 + if (mlxsw_sp->lags[i].dev == lag_dev) 4342 + return &mlxsw_sp->lags[i]; 4377 4343 } 4378 - if (free_lag_id < 0) 4379 - return -EBUSY; 4380 - *p_lag_id = free_lag_id; 4381 - return 0; 4344 + 4345 + return NULL; 4346 + } 4347 + 4348 + static struct mlxsw_sp_lag * 4349 + mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev, 4350 + struct netlink_ext_ack *extack) 4351 + { 4352 + struct mlxsw_sp_lag *lag; 4353 + 4354 + lag = mlxsw_sp_lag_find(mlxsw_sp, lag_dev); 4355 + if (lag) { 4356 + refcount_inc(&lag->ref_count); 4357 + return lag; 4358 + } 4359 + 4360 + return mlxsw_sp_lag_create(mlxsw_sp, lag_dev, extack); 4361 + } 4362 + 4363 + static void 4364 + mlxsw_sp_lag_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag) 4365 + { 4366 + if (!refcount_dec_and_test(&lag->ref_count)) 4367 + return; 4368 + 4369 + mlxsw_sp_lag_destroy(mlxsw_sp, lag); 4382 4370 } 4383 4371 4384 4372 static bool ··· 4397 4365 struct netdev_lag_upper_info *lag_upper_info, 4398 4366 struct netlink_ext_ack *extack) 4399 4367 { 4400 - u16 lag_id; 4401 - 4402 - if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { 4403 - NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); 4404 - return false; 4405 - } 4406 4368 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 4407 4369 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 4408 4370 return false; ··· 4508 4482 struct netlink_ext_ack *extack) 4509 4483 { 4510 4484 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4511 - struct mlxsw_sp_upper *lag; 4485 + struct mlxsw_sp_lag *lag; 4512 4486 u16 lag_id; 4513 4487 u8 port_index; 4514 4488 int err; 4515 4489 4516 - err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); 4517 - if (err) 4518 - return err; 4519 - lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4520 - if (!lag->ref_count) { 4521 - err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); 4522 - if (err) 4523 - return err; 4524 - lag->dev = lag_dev; 4525 - } 4490 + lag = mlxsw_sp_lag_get(mlxsw_sp, lag_dev, extack); 4491 + if (IS_ERR(lag)) 4492 + return PTR_ERR(lag); 4526 4493 4494 + lag_id = lag->lag_id; 4527 4495 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); 4528 4496 if (err) 4529 4497 return err; ··· 4535 4515 mlxsw_sp_port->local_port); 4536 4516 mlxsw_sp_port->lag_id = lag_id; 4537 4517 mlxsw_sp_port->lagged = 1; 4538 - lag->ref_count++; 4539 4518 4540 4519 err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port); 4541 4520 if (err) ··· 4561 4542 err_router_join: 4562 4543 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port); 4563 4544 err_fid_port_join_lag: 4564 - lag->ref_count--; 4565 4545 mlxsw_sp_port->lagged = 0; 4566 4546 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4567 4547 mlxsw_sp_port->local_port); ··· 4568 4550 err_col_port_add: 4569 4551 mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev); 4570 4552 err_lag_uppers_bridge_join: 4571 - if (!lag->ref_count) 4572 - mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4553 + mlxsw_sp_lag_put(mlxsw_sp, lag); 4573 4554 return err; 4574 4555 } 4575 4556 ··· 4577 4560 { 4578 4561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 4579 4562 u16 lag_id = mlxsw_sp_port->lag_id; 4580 - struct mlxsw_sp_upper *lag; 4563 + struct mlxsw_sp_lag *lag; 4581 4564 4582 4565 if (!mlxsw_sp_port->lagged) 4583 4566 return; 4584 - lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); 4585 - WARN_ON(lag->ref_count == 0); 4567 + lag = &mlxsw_sp->lags[lag_id]; 4586 4568 4587 4569 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 4588 4570 ··· 4595 4579 4596 4580 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port); 4597 4581 4598 - if (lag->ref_count == 1) 4599 - mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 4582 + mlxsw_sp_lag_put(mlxsw_sp, lag); 4600 4583 4601 4584 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, 4602 4585 mlxsw_sp_port->local_port); 4603 4586 mlxsw_sp_port->lagged = 0; 4604 - lag->ref_count--; 4605 4587 4606 4588 /* Make sure untagged frames are allowed to ingress */ 4607 4589 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
+3 -12
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
··· 78 78 enum mlxsw_sp_l3proto; 79 79 union mlxsw_sp_l3addr; 80 80 81 - struct mlxsw_sp_upper { 82 - struct net_device *dev; 83 - unsigned int ref_count; 84 - }; 85 - 86 81 enum mlxsw_sp_rif_type { 87 82 MLXSW_SP_RIF_TYPE_SUBPORT, 88 83 MLXSW_SP_RIF_TYPE_VLAN, ··· 131 136 struct mlxsw_sp_qdisc_state; 132 137 struct mlxsw_sp_mall_entry; 133 138 struct mlxsw_sp_pgt; 139 + struct mlxsw_sp_lag; 134 140 135 141 struct mlxsw_sp_port_mapping { 136 142 u8 module; ··· 160 164 const struct mlxsw_bus_info *bus_info; 161 165 unsigned char base_mac[ETH_ALEN]; 162 166 const unsigned char *mac_mask; 163 - struct mlxsw_sp_upper *lags; 167 + struct mlxsw_sp_lag *lags; 168 + u16 max_lag; 164 169 struct mlxsw_sp_port_mapping *port_mapping; 165 170 struct mlxsw_sp_port_mapping_events port_mapping_events; 166 171 struct rhashtable sample_trigger_ht; ··· 253 256 int (*init)(struct mlxsw_sp *mlxsw_sp); 254 257 void (*fini)(struct mlxsw_sp *mlxsw_sp); 255 258 }; 256 - 257 - static inline struct mlxsw_sp_upper * 258 - mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id) 259 - { 260 - return &mlxsw_sp->lags[lag_id]; 261 - } 262 259 263 260 struct mlxsw_sp_port_pcpu_stats { 264 261 u64 rx_packets;
+6 -5
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
··· 9 9 #include <linux/rhashtable.h> 10 10 #include <linux/netdevice.h> 11 11 #include <linux/mutex.h> 12 + #include <linux/refcount.h> 12 13 #include <net/net_namespace.h> 13 14 #include <net/tc_act/tc_vlan.h> 14 15 ··· 56 55 struct rhash_head ht_node; /* Member of acl HT */ 57 56 struct mlxsw_sp_acl_ruleset_ht_key ht_key; 58 57 struct rhashtable rule_ht; 59 - unsigned int ref_count; 58 + refcount_t ref_count; 60 59 unsigned int min_prio; 61 60 unsigned int max_prio; 62 61 unsigned long priv[]; ··· 100 99 mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset) 101 100 { 102 101 /* We hold a reference on ruleset ourselves */ 103 - return ruleset->ref_count == 2; 102 + return refcount_read(&ruleset->ref_count) == 2; 104 103 } 105 104 106 105 int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, ··· 177 176 ruleset = kzalloc(alloc_size, GFP_KERNEL); 178 177 if (!ruleset) 179 178 return ERR_PTR(-ENOMEM); 180 - ruleset->ref_count = 1; 179 + refcount_set(&ruleset->ref_count, 1); 181 180 ruleset->ht_key.block = block; 182 181 ruleset->ht_key.chain_index = chain_index; 183 182 ruleset->ht_key.ops = ops; ··· 223 222 224 223 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset) 225 224 { 226 - ruleset->ref_count++; 225 + refcount_inc(&ruleset->ref_count); 227 226 } 228 227 229 228 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, 230 229 struct mlxsw_sp_acl_ruleset *ruleset) 231 230 { 232 - if (--ruleset->ref_count) 231 + if (!refcount_dec_and_test(&ruleset->ref_count)) 233 232 return; 234 233 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); 235 234 }
+9 -8
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
··· 9 9 #include <linux/rhashtable.h> 10 10 #include <linux/netdevice.h> 11 11 #include <linux/mutex.h> 12 + #include <linux/refcount.h> 12 13 #include <net/devlink.h> 13 14 #include <trace/events/mlxsw.h> 14 15 ··· 156 155 struct mlxsw_sp_acl_tcam_rehash_ctx ctx; 157 156 } rehash; 158 157 struct mlxsw_sp *mlxsw_sp; 159 - unsigned int ref_count; 158 + refcount_t ref_count; 160 159 }; 161 160 162 161 struct mlxsw_sp_acl_tcam_vchunk; ··· 177 176 unsigned int priority; /* Priority within the vregion and group */ 178 177 struct mlxsw_sp_acl_tcam_vgroup *vgroup; 179 178 struct mlxsw_sp_acl_tcam_vregion *vregion; 180 - unsigned int ref_count; 179 + refcount_t ref_count; 181 180 }; 182 181 183 182 struct mlxsw_sp_acl_tcam_entry { ··· 770 769 vregion->tcam = tcam; 771 770 vregion->mlxsw_sp = mlxsw_sp; 772 771 vregion->vgroup = vgroup; 773 - vregion->ref_count = 1; 772 + refcount_set(&vregion->ref_count, 1); 774 773 775 774 vregion->key_info = mlxsw_afk_key_info_get(afk, elusage); 776 775 if (IS_ERR(vregion->key_info)) { ··· 857 856 */ 858 857 return ERR_PTR(-EOPNOTSUPP); 859 858 } 860 - vregion->ref_count++; 859 + refcount_inc(&vregion->ref_count); 861 860 return vregion; 862 861 } 863 862 ··· 872 871 mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp, 873 872 struct mlxsw_sp_acl_tcam_vregion *vregion) 874 873 { 875 - if (--vregion->ref_count) 874 + if (!refcount_dec_and_test(&vregion->ref_count)) 876 875 return; 877 876 mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion); 878 877 } ··· 925 924 INIT_LIST_HEAD(&vchunk->ventry_list); 926 925 vchunk->priority = priority; 927 926 vchunk->vgroup = vgroup; 928 - vchunk->ref_count = 1; 927 + refcount_set(&vchunk->ref_count, 1); 929 928 930 929 vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup, 931 930 priority, elusage); ··· 1009 1008 if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info, 1010 1009 elusage))) 1011 1010 return ERR_PTR(-EINVAL); 1012 - vchunk->ref_count++; 1011 + refcount_inc(&vchunk->ref_count); 1013 1012 return vchunk; 1014 1013 } 1015 1014 return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup, ··· 1020 1019 mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp, 1021 1020 struct mlxsw_sp_acl_tcam_vchunk *vchunk) 1022 1021 { 1023 - if (--vchunk->ref_count) 1022 + if (!refcount_dec_and_test(&vchunk->ref_count)) 1024 1023 return; 1025 1024 mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk); 1026 1025 }
+8 -7
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 501 501 502 502 struct mlxsw_sp_lpm_tree { 503 503 u8 id; /* tree ID */ 504 - unsigned int ref_count; 504 + refcount_t ref_count; 505 505 enum mlxsw_sp_l3proto proto; 506 506 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; 507 507 struct mlxsw_sp_prefix_usage prefix_usage; ··· 578 578 579 579 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) { 580 580 lpm_tree = &mlxsw_sp->router->lpm.trees[i]; 581 - if (lpm_tree->ref_count == 0) 581 + if (refcount_read(&lpm_tree->ref_count) == 0) 582 582 return lpm_tree; 583 583 } 584 584 return NULL; ··· 654 654 sizeof(lpm_tree->prefix_usage)); 655 655 memset(&lpm_tree->prefix_ref_count, 0, 656 656 sizeof(lpm_tree->prefix_ref_count)); 657 - lpm_tree->ref_count = 1; 657 + refcount_set(&lpm_tree->ref_count, 1); 658 658 return lpm_tree; 659 659 660 660 err_left_struct_set: ··· 678 678 679 679 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) { 680 680 lpm_tree = &mlxsw_sp->router->lpm.trees[i]; 681 - if (lpm_tree->ref_count != 0 && 681 + if (refcount_read(&lpm_tree->ref_count) && 682 682 lpm_tree->proto == proto && 683 683 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, 684 684 prefix_usage)) { ··· 691 691 692 692 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree) 693 693 { 694 - lpm_tree->ref_count++; 694 + refcount_inc(&lpm_tree->ref_count); 695 695 } 696 696 697 697 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, 698 698 struct mlxsw_sp_lpm_tree *lpm_tree) 699 699 { 700 - if (--lpm_tree->ref_count == 0) 701 - mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); 700 + if (!refcount_dec_and_test(&lpm_tree->ref_count)) 701 + return; 702 + mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); 702 703 } 703 704 704 705 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
+4 -4
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
··· 61 61 struct mlxsw_sp_bridge_device *bridge_device; 62 62 struct list_head list; 63 63 struct list_head vlans_list; 64 - unsigned int ref_count; 64 + refcount_t ref_count; 65 65 u8 stp_state; 66 66 unsigned long flags; 67 67 bool mrouter; ··· 495 495 BR_MCAST_FLOOD; 496 496 INIT_LIST_HEAD(&bridge_port->vlans_list); 497 497 list_add(&bridge_port->list, &bridge_device->ports_list); 498 - bridge_port->ref_count = 1; 498 + refcount_set(&bridge_port->ref_count, 1); 499 499 500 500 err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev, 501 501 NULL, NULL, NULL, false, extack); ··· 531 531 532 532 bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev); 533 533 if (bridge_port) { 534 - bridge_port->ref_count++; 534 + refcount_inc(&bridge_port->ref_count); 535 535 return bridge_port; 536 536 } 537 537 ··· 558 558 { 559 559 struct mlxsw_sp_bridge_device *bridge_device; 560 560 561 - if (--bridge_port->ref_count != 0) 561 + if (!refcount_dec_and_test(&bridge_port->ref_count)) 562 562 return; 563 563 bridge_device = bridge_port->bridge_device; 564 564 mlxsw_sp_bridge_port_destroy(bridge_port);