Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'take-devlink-lock-on-mlx4-and-mlx5-callbacks'

Moshe Shemesh says:

====================
Take devlink lock on mlx4 and mlx5 callbacks

Prepare mlx4 and mlx5 drivers to have all devlink callbacks called with
devlink instance locked. Change mlx4 driver to use devl_ API where
needed to have devlink reload callbacks locked. Change mlx5 driver to
use devl_ API where needed to have devlink reload and devlink health
callbacks locked.

As mlx5 is the only driver which needed changes to enable calling health
callbacks with devlink instance locked, this patchset also removes
DEVLINK_NL_FLAG_NO_LOCK flag from devlink health callbacks.

This patchset will be followed by a patchset that will remove
DEVLINK_NL_FLAG_NO_LOCK flag from devlink and will remove devlink_mutex.
====================

Link: https://lore.kernel.org/r/1659023630-32006-1-git-send-email-moshe@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+227 -133
+5
drivers/net/ethernet/mellanox/mlx4/catas.c
··· 204 204 205 205 static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist) 206 206 { 207 + struct mlx4_dev *dev = persist->dev; 208 + struct devlink *devlink; 207 209 int err = 0; 208 210 209 211 mlx4_enter_error_state(persist); 212 + devlink = priv_to_devlink(mlx4_priv(dev)); 213 + devl_lock(devlink); 210 214 mutex_lock(&persist->interface_state_mutex); 211 215 if (persist->interface_state & MLX4_INTERFACE_STATE_UP && 212 216 !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) { ··· 219 215 err); 220 216 } 221 217 mutex_unlock(&persist->interface_state_mutex); 218 + devl_unlock(devlink); 222 219 } 223 220 224 221 static void dump_err_buf(struct mlx4_dev *dev)
+10 -10
drivers/net/ethernet/mellanox/mlx4/crdump.c
··· 226 226 227 227 /* Create cr-space region */ 228 228 crdump->region_crspace = 229 - devlink_region_create(devlink, 230 - &region_cr_space_ops, 231 - MAX_NUM_OF_DUMPS_TO_STORE, 232 - pci_resource_len(pdev, 0)); 229 + devl_region_create(devlink, 230 + &region_cr_space_ops, 231 + MAX_NUM_OF_DUMPS_TO_STORE, 232 + pci_resource_len(pdev, 0)); 233 233 if (IS_ERR(crdump->region_crspace)) 234 234 mlx4_warn(dev, "crdump: create devlink region %s err %ld\n", 235 235 region_cr_space_str, ··· 237 237 238 238 /* Create fw-health region */ 239 239 crdump->region_fw_health = 240 - devlink_region_create(devlink, 241 - &region_fw_health_ops, 242 - MAX_NUM_OF_DUMPS_TO_STORE, 243 - HEALTH_BUFFER_SIZE); 240 + devl_region_create(devlink, 241 + &region_fw_health_ops, 242 + MAX_NUM_OF_DUMPS_TO_STORE, 243 + HEALTH_BUFFER_SIZE); 244 244 if (IS_ERR(crdump->region_fw_health)) 245 245 mlx4_warn(dev, "crdump: create devlink region %s err %ld\n", 246 246 region_fw_health_str, ··· 253 253 { 254 254 struct mlx4_fw_crdump *crdump = &dev->persist->crdump; 255 255 256 - devlink_region_destroy(crdump->region_fw_health); 257 - devlink_region_destroy(crdump->region_crspace); 256 + devl_region_destroy(crdump->region_fw_health); 257 + devl_region_destroy(crdump->region_crspace); 258 258 }
+39 -5
drivers/net/ethernet/mellanox/mlx4/main.c
··· 3033 3033 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 3034 3034 int err; 3035 3035 3036 - err = devlink_port_register(devlink, &info->devlink_port, port); 3036 + err = devl_port_register(devlink, &info->devlink_port, port); 3037 3037 if (err) 3038 3038 return err; 3039 3039 ··· 3071 3071 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); 3072 3072 if (err) { 3073 3073 mlx4_err(dev, "Failed to create file for port %d\n", port); 3074 - devlink_port_unregister(&info->devlink_port); 3074 + devl_port_unregister(&info->devlink_port); 3075 3075 info->port = -1; 3076 3076 return err; 3077 3077 } ··· 3093 3093 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 3094 3094 device_remove_file(&info->dev->persist->pdev->dev, 3095 3095 &info->port_attr); 3096 - devlink_port_unregister(&info->devlink_port); 3096 + devl_port_unregister(&info->devlink_port); 3097 3097 info->port = -1; 3098 3098 return err; 3099 3099 } ··· 3109 3109 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 3110 3110 device_remove_file(&info->dev->persist->pdev->dev, 3111 3111 &info->port_mtu_attr); 3112 - devlink_port_unregister(&info->devlink_port); 3112 + devl_port_unregister(&info->devlink_port); 3113 3113 3114 3114 #ifdef CONFIG_RFS_ACCEL 3115 3115 free_irq_cpu_rmap(info->rmap); ··· 3333 3333 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3334 3334 int reset_flow) 3335 3335 { 3336 + struct devlink *devlink = priv_to_devlink(priv); 3336 3337 struct mlx4_dev *dev; 3337 3338 unsigned sum = 0; 3338 3339 int err; ··· 3342 3341 struct mlx4_dev_cap *dev_cap = NULL; 3343 3342 int existing_vfs = 0; 3344 3343 3344 + devl_assert_locked(devlink); 3345 3345 dev = &priv->dev; 3346 3346 3347 3347 INIT_LIST_HEAD(&priv->ctx_list); ··· 3958 3956 NL_SET_ERR_MSG_MOD(extack, "Namespace change is not supported"); 3959 3957 return -EOPNOTSUPP; 3960 3958 } 3959 + devl_lock(devlink); 3961 3960 if (persist->num_vfs) 3962 3961 mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n"); 3963 3962 mlx4_restart_one_down(persist->pdev); 3963 + devl_unlock(devlink); 3964 3964 return 0; 3965 3965 } 3966 3966 ··· 3975 3971 struct mlx4_dev_persistent *persist = dev->persist; 3976 3972 int err; 3977 3973 3974 + devl_lock(devlink); 3978 3975 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); 3979 3976 err = mlx4_restart_one_up(persist->pdev, true, devlink); 3977 + devl_unlock(devlink); 3980 3978 if (err) 3981 3979 mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n", 3982 3980 err); ··· 4005 3999 devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev); 4006 4000 if (!devlink) 4007 4001 return -ENOMEM; 4002 + devl_lock(devlink); 4008 4003 priv = devlink_priv(devlink); 4009 4004 4010 4005 dev = &priv->dev; ··· 4033 4026 4034 4027 pci_save_state(pdev); 4035 4028 devlink_set_features(devlink, DEVLINK_F_RELOAD); 4029 + devl_unlock(devlink); 4036 4030 devlink_register(devlink); 4037 4031 return 0; 4038 4032 ··· 4043 4035 err_devlink_unregister: 4044 4036 kfree(dev->persist); 4045 4037 err_devlink_free: 4038 + devl_unlock(devlink); 4046 4039 devlink_free(devlink); 4047 4040 return ret; 4048 4041 } ··· 4065 4056 struct mlx4_dev *dev = persist->dev; 4066 4057 struct mlx4_priv *priv = mlx4_priv(dev); 4067 4058 int pci_dev_data; 4059 + struct devlink *devlink; 4068 4060 int p, i; 4069 4061 4062 + devlink = priv_to_devlink(priv); 4063 + devl_assert_locked(devlink); 4070 4064 if (priv->removed) 4071 4065 return; 4072 4066 ··· 4149 4137 4150 4138 devlink_unregister(devlink); 4151 4139 4140 + devl_lock(devlink); 4152 4141 if (mlx4_is_slave(dev)) 4153 4142 persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; 4154 4143 ··· 4185 4172 devlink_params_unregister(devlink, mlx4_devlink_params, 4186 4173 ARRAY_SIZE(mlx4_devlink_params)); 4187 4174 kfree(dev->persist); 4175 + devl_unlock(devlink); 4188 4176 devlink_free(devlink); 4189 4177 } 4190 4178 ··· 4306 4292 pci_channel_state_t state) 4307 4293 { 4308 4294 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4295 + struct mlx4_dev *dev = persist->dev; 4296 + struct devlink *devlink; 4309 4297 4310 4298 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); 4311 4299 mlx4_enter_error_state(persist); 4312 4300 4301 + devlink = priv_to_devlink(mlx4_priv(dev)); 4302 + devl_lock(devlink); 4313 4303 mutex_lock(&persist->interface_state_mutex); 4314 4304 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4315 4305 mlx4_unload_one(pdev); 4316 4306 4317 4307 mutex_unlock(&persist->interface_state_mutex); 4308 + devl_unlock(devlink); 4318 4309 if (state == pci_channel_io_perm_failure) 4319 4310 return PCI_ERS_RESULT_DISCONNECT; 4320 4311 ··· 4352 4333 struct mlx4_dev *dev = persist->dev; 4353 4334 struct mlx4_priv *priv = mlx4_priv(dev); 4354 4335 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4336 + struct devlink *devlink; 4355 4337 int total_vfs; 4356 4338 int err; 4357 4339 ··· 4360 4340 total_vfs = dev->persist->num_vfs; 4361 4341 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4362 4342 4343 + devlink = priv_to_devlink(priv); 4344 + devl_lock(devlink); 4363 4345 mutex_lock(&persist->interface_state_mutex); 4364 4346 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4365 4347 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, ··· 4380 4358 } 4381 4359 end: 4382 4360 mutex_unlock(&persist->interface_state_mutex); 4383 - 4361 + devl_unlock(devlink); 4384 4362 } 4385 4363 4386 4364 static void mlx4_shutdown(struct pci_dev *pdev) 4387 4365 { 4388 4366 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4389 4367 struct mlx4_dev *dev = persist->dev; 4368 + struct devlink *devlink; 4390 4369 4391 4370 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 4371 + devlink = priv_to_devlink(mlx4_priv(dev)); 4372 + devl_lock(devlink); 4392 4373 mutex_lock(&persist->interface_state_mutex); 4393 4374 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4394 4375 mlx4_unload_one(pdev); 4395 4376 mutex_unlock(&persist->interface_state_mutex); 4377 + devl_unlock(devlink); 4396 4378 mlx4_pci_disable_device(dev); 4397 4379 } 4398 4380 ··· 4411 4385 struct pci_dev *pdev = to_pci_dev(dev_d); 4412 4386 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4413 4387 struct mlx4_dev *dev = persist->dev; 4388 + struct devlink *devlink; 4414 4389 4415 4390 mlx4_err(dev, "suspend was called\n"); 4391 + devlink = priv_to_devlink(mlx4_priv(dev)); 4392 + devl_lock(devlink); 4416 4393 mutex_lock(&persist->interface_state_mutex); 4417 4394 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4418 4395 mlx4_unload_one(pdev); 4419 4396 mutex_unlock(&persist->interface_state_mutex); 4397 + devl_unlock(devlink); 4420 4398 4421 4399 return 0; 4422 4400 } ··· 4432 4402 struct mlx4_dev *dev = persist->dev; 4433 4403 struct mlx4_priv *priv = mlx4_priv(dev); 4434 4404 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4405 + struct devlink *devlink; 4435 4406 int total_vfs; 4436 4407 int ret = 0; 4437 4408 ··· 4440 4409 total_vfs = dev->persist->num_vfs; 4441 4410 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4442 4411 4412 + devlink = priv_to_devlink(priv); 4413 + devl_lock(devlink); 4443 4414 mutex_lock(&persist->interface_state_mutex); 4444 4415 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4445 4416 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, ··· 4455 4422 } 4456 4423 } 4457 4424 mutex_unlock(&persist->interface_state_mutex); 4425 + devl_unlock(devlink); 4458 4426 4459 4427 return ret; 4460 4428 }
+4 -15
drivers/net/ethernet/mellanox/mlx5/core/dev.c
··· 335 335 336 336 int mlx5_attach_device(struct mlx5_core_dev *dev) 337 337 { 338 - struct devlink *devlink = priv_to_devlink(dev); 339 338 struct mlx5_priv *priv = &dev->priv; 340 339 struct auxiliary_device *adev; 341 340 struct auxiliary_driver *adrv; 342 341 int ret = 0, i; 343 342 344 - devl_lock(devlink); 343 + devl_assert_locked(priv_to_devlink(dev)); 345 344 mutex_lock(&mlx5_intf_mutex); 346 345 priv->flags &= ~MLX5_PRIV_FLAGS_DETACH; 347 346 priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW; ··· 393 394 } 394 395 priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW; 395 396 mutex_unlock(&mlx5_intf_mutex); 396 - devl_unlock(devlink); 397 397 return ret; 398 398 } 399 399 400 400 void mlx5_detach_device(struct mlx5_core_dev *dev) 401 401 { 402 - struct devlink *devlink = priv_to_devlink(dev); 403 402 struct mlx5_priv *priv = &dev->priv; 404 403 struct auxiliary_device *adev; 405 404 struct auxiliary_driver *adrv; 406 405 pm_message_t pm = {}; 407 406 int i; 408 407 409 - devl_lock(devlink); 408 + devl_assert_locked(priv_to_devlink(dev)); 410 409 mutex_lock(&mlx5_intf_mutex); 411 410 priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW; 412 411 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) { ··· 438 441 priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW; 439 442 priv->flags |= MLX5_PRIV_FLAGS_DETACH; 440 443 mutex_unlock(&mlx5_intf_mutex); 441 - devl_unlock(devlink); 442 444 } 443 445 444 446 int mlx5_register_device(struct mlx5_core_dev *dev) 445 447 { 446 - struct devlink *devlink; 447 448 int ret; 448 449 449 - devlink = priv_to_devlink(dev); 450 - devl_lock(devlink); 450 + devl_assert_locked(priv_to_devlink(dev)); 451 451 mutex_lock(&mlx5_intf_mutex); 452 452 dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; 453 453 ret = mlx5_rescan_drivers_locked(dev); 454 454 mutex_unlock(&mlx5_intf_mutex); 455 - devl_unlock(devlink); 456 455 if (ret) 457 456 mlx5_unregister_device(dev); 458 457 ··· 457 464 458 465 void mlx5_unregister_device(struct mlx5_core_dev *dev) 459 466 { 460 - struct devlink *devlink; 461 - 462 - devlink = priv_to_devlink(dev); 463 - devl_lock(devlink); 467 + devl_assert_locked(priv_to_devlink(dev)); 464 468 mutex_lock(&mlx5_intf_mutex); 465 469 dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; 466 470 mlx5_rescan_drivers_locked(dev); 467 471 mutex_unlock(&mlx5_intf_mutex); 468 - devl_unlock(devlink); 469 472 } 470 473 471 474 static int add_drivers(struct mlx5_core_dev *dev)
+40 -19
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
··· 104 104 if (err) 105 105 return err; 106 106 107 - return mlx5_fw_reset_wait_reset_done(dev); 107 + err = mlx5_fw_reset_wait_reset_done(dev); 108 + if (err) 109 + return err; 110 + 111 + mlx5_unload_one_devl_locked(dev); 112 + err = mlx5_health_wait_pci_up(dev); 113 + if (err) 114 + NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset"); 115 + 116 + return err; 108 117 } 109 118 110 119 static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink, ··· 143 134 struct mlx5_core_dev *dev = devlink_priv(devlink); 144 135 struct pci_dev *pdev = dev->pdev; 145 136 bool sf_dev_allocated; 137 + int ret = 0; 146 138 147 139 sf_dev_allocated = mlx5_sf_dev_allocated(dev); 148 140 if (sf_dev_allocated) { ··· 164 154 NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable"); 165 155 } 166 156 157 + devl_lock(devlink); 167 158 switch (action) { 168 159 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 169 - mlx5_unload_one(dev); 170 - return 0; 160 + mlx5_unload_one_devl_locked(dev); 161 + break; 171 162 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 172 163 if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) 173 - return mlx5_devlink_trigger_fw_live_patch(devlink, extack); 174 - return mlx5_devlink_reload_fw_activate(devlink, extack); 164 + ret = mlx5_devlink_trigger_fw_live_patch(devlink, extack); 165 + else 166 + ret = mlx5_devlink_reload_fw_activate(devlink, extack); 167 + break; 175 168 default: 176 169 /* Unsupported action should not get to this function */ 177 170 WARN_ON(1); 178 - return -EOPNOTSUPP; 171 + ret = -EOPNOTSUPP; 179 172 } 173 + 174 + devl_unlock(devlink); 175 + return ret; 180 176 } 181 177 182 178 static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action, ··· 190 174 struct netlink_ext_ack *extack) 191 175 { 192 176 struct mlx5_core_dev *dev = devlink_priv(devlink); 177 + int ret = 0; 193 178 179 + devl_lock(devlink); 194 180 *actions_performed = BIT(action); 195 181 switch (action) { 196 182 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: 197 - return mlx5_load_one(dev, false); 183 + ret = mlx5_load_one_devl_locked(dev, false); 184 + break; 198 185 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: 199 186 if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) 200 187 break; 201 188 /* On fw_activate action, also driver is reloaded and reinit performed */ 202 189 *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); 203 - return mlx5_load_one(dev, false); 190 + ret = mlx5_load_one_devl_locked(dev, false); 191 + break; 204 192 default: 205 193 /* Unsupported action should not get to this function */ 206 194 WARN_ON(1); 207 - return -EOPNOTSUPP; 195 + ret = -EOPNOTSUPP; 208 196 } 209 197 210 - return 0; 198 + devl_unlock(devlink); 199 + return ret; 211 200 } 212 201 213 202 static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id) ··· 849 828 struct mlx5_core_dev *core_dev = devlink_priv(devlink); 850 829 int err; 851 830 852 - err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr, 853 - ARRAY_SIZE(mlx5_trap_groups_arr)); 831 + err = devl_trap_groups_register(devlink, mlx5_trap_groups_arr, 832 + ARRAY_SIZE(mlx5_trap_groups_arr)); 854 833 if (err) 855 834 return err; 856 835 857 - err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr), 858 - &core_dev->priv); 836 + err = devl_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr), 837 + &core_dev->priv); 859 838 if (err) 860 839 goto err_trap_group; 861 840 return 0; 862 841 863 842 err_trap_group: 864 - devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr, 865 - ARRAY_SIZE(mlx5_trap_groups_arr)); 843 + devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr, 844 + ARRAY_SIZE(mlx5_trap_groups_arr)); 866 845 return err; 867 846 } 868 847 869 848 static void mlx5_devlink_traps_unregister(struct devlink *devlink) 870 849 { 871 - devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr)); 872 - devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr, 873 - ARRAY_SIZE(mlx5_trap_groups_arr)); 850 + devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr)); 851 + devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr, 852 + ARRAY_SIZE(mlx5_trap_groups_arr)); 874 853 } 875 854 876 855 int mlx5_devlink_register(struct devlink *devlink)
+4 -14
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1300 1300 */ 1301 1301 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) 1302 1302 { 1303 - struct devlink *devlink; 1304 1303 bool toggle_lag; 1305 1304 int ret; 1306 1305 1307 1306 if (!mlx5_esw_allowed(esw)) 1308 1307 return 0; 1309 1308 1309 + devl_assert_locked(priv_to_devlink(esw->dev)); 1310 + 1310 1311 toggle_lag = !mlx5_esw_is_fdb_created(esw); 1311 1312 1312 1313 if (toggle_lag) 1313 1314 mlx5_lag_disable_change(esw->dev); 1314 1315 1315 - devlink = priv_to_devlink(esw->dev); 1316 - devl_lock(devlink); 1317 1316 down_write(&esw->mode_lock); 1318 1317 if (!mlx5_esw_is_fdb_created(esw)) { 1319 1318 ret = mlx5_eswitch_enable_locked(esw, num_vfs); ··· 1326 1327 esw->esw_funcs.num_vfs = num_vfs; 1327 1328 } 1328 1329 up_write(&esw->mode_lock); 1329 - devl_unlock(devlink); 1330 1330 1331 1331 if (toggle_lag) 1332 1332 mlx5_lag_enable_change(esw->dev); ··· 1336 1338 /* When disabling sriov, free driver level resources. */ 1337 1339 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) 1338 1340 { 1339 - struct devlink *devlink; 1340 - 1341 1341 if (!mlx5_esw_allowed(esw)) 1342 1342 return; 1343 1343 1344 - devlink = priv_to_devlink(esw->dev); 1345 - devl_lock(devlink); 1344 + devl_assert_locked(priv_to_devlink(esw->dev)); 1346 1345 down_write(&esw->mode_lock); 1347 1346 /* If driver is unloaded, this function is called twice by remove_one() 1348 1347 * and mlx5_unload(). Prevent the second call. ··· 1368 1373 1369 1374 unlock: 1370 1375 up_write(&esw->mode_lock); 1371 - devl_unlock(devlink); 1372 1376 } 1373 1377 1374 1378 /* Free resources for corresponding eswitch mode. It is called by devlink ··· 1401 1407 1402 1408 void mlx5_eswitch_disable(struct mlx5_eswitch *esw) 1403 1409 { 1404 - struct devlink *devlink; 1405 - 1406 1410 if (!mlx5_esw_allowed(esw)) 1407 1411 return; 1408 1412 1413 + devl_assert_locked(priv_to_devlink(esw->dev)); 1409 1414 mlx5_lag_disable_change(esw->dev); 1410 - devlink = priv_to_devlink(esw->dev); 1411 - devl_lock(devlink); 1412 1415 down_write(&esw->mode_lock); 1413 1416 mlx5_eswitch_disable_locked(esw); 1414 1417 up_write(&esw->mode_lock); 1415 - devl_unlock(devlink); 1416 1418 mlx5_lag_enable_change(esw->dev); 1417 1419 } 1418 1420
+3 -7
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
··· 149 149 if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) { 150 150 complete(&fw_reset->done); 151 151 } else { 152 + mlx5_unload_one(dev); 153 + if (mlx5_health_wait_pci_up(dev)) 154 + mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); 152 155 mlx5_load_one(dev, false); 153 156 devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0, 154 157 BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | ··· 186 183 struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, 187 184 reset_reload_work); 188 185 struct mlx5_core_dev *dev = fw_reset->dev; 189 - int err; 190 186 191 187 mlx5_sync_reset_clear_reset_requested(dev, false); 192 188 mlx5_enter_error_state(dev, true); 193 - mlx5_unload_one(dev); 194 - err = mlx5_health_wait_pci_up(dev); 195 - if (err) 196 - mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); 197 - fw_reset->ret = err; 198 189 mlx5_fw_reset_complete_reload(dev); 199 190 } 200 191 ··· 392 395 } 393 396 394 397 mlx5_enter_error_state(dev, true); 395 - mlx5_unload_one(dev); 396 398 done: 397 399 fw_reset->ret = err; 398 400 mlx5_fw_reset_complete_reload(dev);
+4
drivers/net/ethernet/mellanox/mlx5/core/health.c
··· 666 666 struct mlx5_fw_reporter_ctx fw_reporter_ctx; 667 667 struct mlx5_core_health *health; 668 668 struct mlx5_core_dev *dev; 669 + struct devlink *devlink; 669 670 struct mlx5_priv *priv; 670 671 671 672 health = container_of(work, struct mlx5_core_health, fatal_report_work); 672 673 priv = container_of(health, struct mlx5_priv, health); 673 674 dev = container_of(priv, struct mlx5_core_dev, priv); 675 + devlink = priv_to_devlink(dev); 674 676 675 677 enter_error_state(dev, false); 676 678 if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) { 679 + devl_lock(devlink); 677 680 if (mlx5_health_try_recover(dev)) 678 681 mlx5_core_err(dev, "health recovery failed\n"); 682 + devl_unlock(devlink); 679 683 return; 680 684 } 681 685 fw_reporter_ctx.err_synd = health->synd;
+34 -4
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1304 1304 1305 1305 int mlx5_init_one(struct mlx5_core_dev *dev) 1306 1306 { 1307 + struct devlink *devlink = priv_to_devlink(dev); 1307 1308 int err = 0; 1308 1309 1310 + devl_lock(devlink); 1309 1311 mutex_lock(&dev->intf_state_mutex); 1310 1312 dev->state = MLX5_DEVICE_STATE_UP; 1311 1313 ··· 1336 1334 goto err_register; 1337 1335 1338 1336 mutex_unlock(&dev->intf_state_mutex); 1337 + devl_unlock(devlink); 1339 1338 return 0; 1340 1339 1341 1340 err_register: ··· 1351 1348 err_function: 1352 1349 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1353 1350 mutex_unlock(&dev->intf_state_mutex); 1351 + devl_unlock(devlink); 1354 1352 return err; 1355 1353 } 1356 1354 1357 1355 void mlx5_uninit_one(struct mlx5_core_dev *dev) 1358 1356 { 1357 + struct devlink *devlink = priv_to_devlink(dev); 1358 + 1359 + devl_lock(devlink); 1359 1360 mutex_lock(&dev->intf_state_mutex); 1360 1361 1361 1362 mlx5_unregister_device(dev); ··· 1378 1371 mlx5_function_teardown(dev, true); 1379 1372 out: 1380 1373 mutex_unlock(&dev->intf_state_mutex); 1374 + devl_unlock(devlink); 1381 1375 } 1382 1376 1383 - int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery) 1377 + int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery) 1384 1378 { 1385 1379 int err = 0; 1386 1380 u64 timeout; 1387 1381 1382 + devl_assert_locked(priv_to_devlink(dev)); 1388 1383 mutex_lock(&dev->intf_state_mutex); 1389 1384 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 1390 1385 mlx5_core_warn(dev, "interface is up, NOP\n"); ··· 1428 1419 return err; 1429 1420 } 1430 1421 1431 - void mlx5_unload_one(struct mlx5_core_dev *dev) 1422 + int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery) 1432 1423 { 1424 + struct devlink *devlink = priv_to_devlink(dev); 1425 + int ret; 1426 + 1427 + devl_lock(devlink); 1428 + ret = mlx5_load_one_devl_locked(dev, recovery); 1429 + devl_unlock(devlink); 1430 + return ret; 1431 + } 1432 + 1433 + void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev) 1434 + { 1435 + devl_assert_locked(priv_to_devlink(dev)); 1433 1436 mutex_lock(&dev->intf_state_mutex); 1434 1437 1435 1438 mlx5_detach_device(dev); ··· 1457 1436 mlx5_function_teardown(dev, false); 1458 1437 out: 1459 1438 mutex_unlock(&dev->intf_state_mutex); 1439 + } 1440 + 1441 + void mlx5_unload_one(struct mlx5_core_dev *dev) 1442 + { 1443 + struct devlink *devlink = priv_to_devlink(dev); 1444 + 1445 + devl_lock(devlink); 1446 + mlx5_unload_one_devl_locked(dev); 1447 + devl_unlock(devlink); 1460 1448 } 1461 1449 1462 1450 static const int types[] = { ··· 1932 1902 void mlx5_disable_device(struct mlx5_core_dev *dev) 1933 1903 { 1934 1904 mlx5_error_sw_reset(dev); 1935 - mlx5_unload_one(dev); 1905 + mlx5_unload_one_devl_locked(dev); 1936 1906 } 1937 1907 1938 1908 int mlx5_recover_device(struct mlx5_core_dev *dev) ··· 1943 1913 return -EIO; 1944 1914 } 1945 1915 1946 - return mlx5_load_one(dev, true); 1916 + return mlx5_load_one_devl_locked(dev, true); 1947 1917 } 1948 1918 1949 1919 static struct pci_driver mlx5_core_driver = {
+2
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 290 290 int mlx5_init_one(struct mlx5_core_dev *dev); 291 291 void mlx5_uninit_one(struct mlx5_core_dev *dev); 292 292 void mlx5_unload_one(struct mlx5_core_dev *dev); 293 + void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev); 293 294 int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery); 295 + int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery); 294 296 295 297 int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out); 296 298
+6
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
··· 154 154 static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs) 155 155 { 156 156 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 157 + struct devlink *devlink = priv_to_devlink(dev); 157 158 int err; 158 159 160 + devl_lock(devlink); 159 161 err = mlx5_device_enable_sriov(dev, num_vfs); 160 162 if (err) { 161 163 mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err); 162 164 return err; 163 165 } 166 + devl_unlock(devlink); 164 167 165 168 err = pci_enable_sriov(pdev, num_vfs); 166 169 if (err) { ··· 176 173 void mlx5_sriov_disable(struct pci_dev *pdev) 177 174 { 178 175 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 176 + struct devlink *devlink = priv_to_devlink(dev); 179 177 int num_vfs = pci_num_vf(dev->pdev); 180 178 181 179 pci_disable_sriov(pdev); 180 + devl_lock(devlink); 182 181 mlx5_device_disable_sriov(dev, num_vfs, true); 182 + devl_unlock(devlink); 183 183 } 184 184 185 185 int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
+76 -59
net/core/devlink.c
··· 699 699 const struct devlink_region_ops *ops; 700 700 const struct devlink_port_region_ops *port_ops; 701 701 }; 702 + struct mutex snapshot_lock; /* protects snapshot_list, 703 + * max_snapshots and cur_snapshots 704 + * consistency. 705 + */ 702 706 struct list_head snapshot_list; 703 707 u32 max_snapshots; 704 708 u32 cur_snapshots; ··· 5898 5894 { 5899 5895 unsigned long count; 5900 5896 void *p; 5897 + int err; 5901 5898 5902 - devl_assert_locked(devlink); 5903 - 5899 + xa_lock(&devlink->snapshot_ids); 5904 5900 p = xa_load(&devlink->snapshot_ids, id); 5905 - if (WARN_ON(!p)) 5906 - return -EINVAL; 5901 + if (WARN_ON(!p)) { 5902 + err = -EINVAL; 5903 + goto unlock; 5904 + } 5907 5905 5908 - if (WARN_ON(!xa_is_value(p))) 5909 - return -EINVAL; 5906 + if (WARN_ON(!xa_is_value(p))) { 5907 + err = -EINVAL; 5908 + goto unlock; 5909 + } 5910 5910 5911 5911 count = xa_to_value(p); 5912 5912 count++; 5913 5913 5914 - return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(count), 5915 - GFP_KERNEL)); 5914 + err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(count), 5915 + GFP_ATOMIC)); 5916 + unlock: 5917 + xa_unlock(&devlink->snapshot_ids); 5918 + return err; 5916 5919 } 5917 5920 5918 5921 /** ··· 5942 5931 unsigned long count; 5943 5932 void *p; 5944 5933 5945 - devl_assert_locked(devlink); 5946 - 5934 + xa_lock(&devlink->snapshot_ids); 5947 5935 p = xa_load(&devlink->snapshot_ids, id); 5948 5936 if (WARN_ON(!p)) 5949 - return; 5937 + goto unlock; 5950 5938 5951 5939 if (WARN_ON(!xa_is_value(p))) 5952 - return; 5940 + goto unlock; 5953 5941 5954 5942 count = xa_to_value(p); 5955 5943 5956 5944 if (count > 1) { 5957 5945 count--; 5958 - xa_store(&devlink->snapshot_ids, id, xa_mk_value(count), 5959 - GFP_KERNEL); 5946 + __xa_store(&devlink->snapshot_ids, id, xa_mk_value(count), 5947 + GFP_ATOMIC); 5960 5948 } else { 5961 5949 /* If this was the last user, we can erase this id */ 5962 - xa_erase(&devlink->snapshot_ids, id); 5950 + __xa_erase(&devlink->snapshot_ids, id); 5963 5951 } 5952 + unlock: 5953 + xa_unlock(&devlink->snapshot_ids); 5964 5954 } 5965 5955 5966 5956 /** ··· 5982 5970 */ 5983 5971 static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id) 5984 5972 { 5985 - devl_assert_locked(devlink); 5973 + int err; 5986 5974 5987 - if (xa_load(&devlink->snapshot_ids, id)) 5975 + xa_lock(&devlink->snapshot_ids); 5976 + if (xa_load(&devlink->snapshot_ids, id)) { 5977 + xa_unlock(&devlink->snapshot_ids); 5988 5978 return -EEXIST; 5989 - 5990 - return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(0), 5991 - GFP_KERNEL)); 5979 + } 5980 + err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(0), 5981 + GFP_ATOMIC)); 5982 + xa_unlock(&devlink->snapshot_ids); 5983 + return err; 5992 5984 } 5993 5985 5994 5986 /** ··· 6013 5997 */ 6014 5998 static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id) 6015 5999 { 6016 - devl_assert_locked(devlink); 6017 - 6018 6000 return xa_alloc(&devlink->snapshot_ids, id, xa_mk_value(1), 6019 6001 xa_limit_32b, GFP_KERNEL); 6020 6002 } ··· 6025 6011 * Multiple snapshots can be created on a region. 6026 6012 * The @snapshot_id should be obtained using the getter function. 6027 6013 * 6028 - * Must be called only while holding the devlink instance lock. 6014 + * Must be called only while holding the region snapshot lock. 6029 6015 * 6030 6016 * @region: devlink region of the snapshot 6031 6017 * @data: snapshot data ··· 6039 6025 struct devlink_snapshot *snapshot; 6040 6026 int err; 6041 6027 6042 - devl_assert_locked(devlink); 6028 + lockdep_assert_held(&region->snapshot_lock); 6043 6029 6044 6030 /* check if region can hold one more snapshot */ 6045 6031 if (region->cur_snapshots == region->max_snapshots) ··· 6077 6063 { 6078 6064 struct devlink *devlink = region->devlink; 6079 6065 6080 - devl_assert_locked(devlink); 6066 + lockdep_assert_held(&region->snapshot_lock); 6081 6067 6082 6068 devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL); 6083 6069 region->cur_snapshots--; ··· 6256 6242 if (!region) 6257 6243 return -EINVAL; 6258 6244 6245 + mutex_lock(&region->snapshot_lock); 6259 6246 snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id); 6260 - if (!snapshot) 6247 + if (!snapshot) { 6248 + mutex_unlock(&region->snapshot_lock); 6261 6249 return -EINVAL; 6250 + } 6262 6251 6263 6252 devlink_region_snapshot_del(region, snapshot); 6253 + mutex_unlock(&region->snapshot_lock); 6264 6254 return 0; 6265 6255 } 6266 6256 ··· 6312 6294 return -EOPNOTSUPP; 6313 6295 } 6314 6296 6297 + mutex_lock(&region->snapshot_lock); 6298 + 6315 6299 if (region->cur_snapshots == region->max_snapshots) { 6316 6300 NL_SET_ERR_MSG_MOD(info->extack, "The region has reached the maximum number of stored snapshots"); 6317 - return -ENOSPC; 6301 + err = -ENOSPC; 6302 + goto unlock; 6318 6303 } 6319 6304 6320 6305 snapshot_id_attr = info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]; ··· 6326 6305 6327 6306 if (devlink_region_snapshot_get_by_id(region, snapshot_id)) { 6328 6307 NL_SET_ERR_MSG_MOD(info->extack, "The requested snapshot id is already in use"); 6329 - return -EEXIST; 6308 + err = -EEXIST; 6309 + goto unlock; 6330 6310 } 6331 6311 6332 6312 err = __devlink_snapshot_id_insert(devlink, snapshot_id); 6333 6313 if (err) 6334 - return err; 6314 + goto unlock; 6335 6315 } else { 6336 6316 err = __devlink_region_snapshot_id_get(devlink, &snapshot_id); 6337 6317 if (err) { 6338 6318 NL_SET_ERR_MSG_MOD(info->extack, "Failed to allocate a new snapshot id"); 6339 - return err; 6319 + goto unlock; 6340 6320 } 6341 6321 } 6342 6322 ··· 6375 6353 goto err_notify; 6376 6354 } 6377 6355 6356 + mutex_unlock(&region->snapshot_lock); 6378 6357 return 0; 6379 6358 6380 6359 err_snapshot_create: 6381 6360 region->ops->destructor(data); 6382 6361 err_snapshot_capture: 6383 6362 __devlink_snapshot_id_decrement(devlink, snapshot_id); 6363 + mutex_unlock(&region->snapshot_lock); 6384 6364 return err; 6385 6365 6386 6366 err_notify: 6387 6367 devlink_region_snapshot_del(region, snapshot); 6368 + unlock: 6369 + mutex_unlock(&region->snapshot_lock); 6388 6370 return err; 6389 6371 } 6390 6372 ··· 7757 7731 enum devlink_health_reporter_state prev_health_state; 7758 7732 struct devlink *devlink = reporter->devlink; 7759 7733 unsigned long recover_ts_threshold; 7734 + int ret; 7760 7735 7761 7736 /* write a log message of the current error */ 7762 7737 WARN_ON(!msg); ··· 7791 7764 mutex_unlock(&reporter->dump_lock); 7792 7765 } 7793 7766 7794 - if (reporter->auto_recover) 7795 - return devlink_health_reporter_recover(reporter, 7796 - priv_ctx, NULL); 7767 + if (!reporter->auto_recover) 7768 + return 0; 7797 7769 7798 - return 0; 7770 + devl_lock(devlink); 7771 + ret = devlink_health_reporter_recover(reporter, priv_ctx, NULL); 7772 + devl_unlock(devlink); 7773 + 7774 + return ret; 7799 7775 } 7800 7776 EXPORT_SYMBOL_GPL(devlink_health_report); 7801 7777 ··· 9473 9443 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 9474 9444 .doit = devlink_nl_cmd_health_reporter_get_doit, 9475 9445 .dumpit = devlink_nl_cmd_health_reporter_get_dumpit, 9476 - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | 9477 - DEVLINK_NL_FLAG_NO_LOCK, 9446 + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT, 9478 9447 /* can be retrieved by unprivileged users */ 9479 9448 }, 9480 9449 { ··· 9481 9452 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 9482 9453 .doit = devlink_nl_cmd_health_reporter_set_doit, 9483 9454 .flags = GENL_ADMIN_PERM, 9484 - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | 9485 - DEVLINK_NL_FLAG_NO_LOCK, 9455 + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT, 9486 9456 }, 9487 9457 { 9488 9458 .cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER, 9489 9459 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 9490 9460 .doit = devlink_nl_cmd_health_reporter_recover_doit, 9491 9461 .flags = GENL_ADMIN_PERM, 9492 - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | 9493 - DEVLINK_NL_FLAG_NO_LOCK, 9462 + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT, 9494 9463 }, 9495 9464 { 9496 9465 .cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE, 9497 9466 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 9498 9467 .doit = devlink_nl_cmd_health_reporter_diagnose_doit, 9499 9468 .flags = GENL_ADMIN_PERM, 9500 - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | 9501 - DEVLINK_NL_FLAG_NO_LOCK, 9469 + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT, 9502 9470 }, 9503 9471 { 9504 9472 .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET, ··· 9509 9483 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 9510 9484 .doit = devlink_nl_cmd_health_reporter_dump_clear_doit, 9511 9485 .flags = GENL_ADMIN_PERM, 9512 - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | 9513 - DEVLINK_NL_FLAG_NO_LOCK, 9486 + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT, 9514 9487 }, 9515 9488 { 9516 9489 .cmd = DEVLINK_CMD_HEALTH_REPORTER_TEST, 9517 9490 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 9518 9491 .doit = devlink_nl_cmd_health_reporter_test_doit, 9519 9492 .flags = GENL_ADMIN_PERM, 9520 - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | 9521 - DEVLINK_NL_FLAG_NO_LOCK, 9493 + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT, 9522 9494 }, 9523 9495 { 9524 9496 .cmd = DEVLINK_CMD_FLASH_UPDATE, ··· 11324 11300 region->ops = ops; 11325 11301 region->size = region_size; 11326 11302 INIT_LIST_HEAD(&region->snapshot_list); 11303 + mutex_init(&region->snapshot_lock); 11327 11304 list_add_tail(&region->list, &devlink->region_list); 11328 11305 devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW); 11329 11306 ··· 11398 11373 region->port_ops = ops; 11399 11374 region->size = region_size; 11400 11375 INIT_LIST_HEAD(&region->snapshot_list); 11376 + mutex_init(&region->snapshot_lock); 11401 11377 list_add_tail(&region->list, &port->region_list); 11402 11378 devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW); 11403 11379 ··· 11428 11402 devlink_region_snapshot_del(region, snapshot); 11429 11403 11430 11404 list_del(&region->list); 11405 + mutex_destroy(&region->snapshot_lock); 11431 11406 11432 11407 devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL); 11433 11408 kfree(region); ··· 11469 11442 */ 11470 11443 int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id) 11471 11444 { 11472 - int err; 11473 - 11474 - devl_lock(devlink); 11475 - err = __devlink_region_snapshot_id_get(devlink, id); 11476 - devl_unlock(devlink); 11477 - 11478 - return err; 11445 + return __devlink_region_snapshot_id_get(devlink, id); 11479 11446 } 11480 11447 EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get); 11481 11448 ··· 11485 11464 */ 11486 11465 void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id) 11487 11466 { 11488 - devl_lock(devlink); 11489 11467 __devlink_snapshot_id_decrement(devlink, id); 11490 - devl_unlock(devlink); 11491 11468 } 11492 11469 EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put); 11493 11470 ··· 11504 11485 int devlink_region_snapshot_create(struct devlink_region *region, 11505 11486 u8 *data, u32 snapshot_id) 11506 11487 { 11507 - struct devlink *devlink = region->devlink; 11508 11488 int err; 11509 11489 11510 - devl_lock(devlink); 11490 + mutex_lock(&region->snapshot_lock); 11511 11491 err = __devlink_region_snapshot_create(region, data, snapshot_id); 11512 - devl_unlock(devlink); 11513 - 11492 + mutex_unlock(&region->snapshot_lock); 11514 11493 return err; 11515 11494 } 11516 11495 EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);