Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/mlx4: Avoid flush_scheduled_work() usage

Flushing system-wide workqueues is dangerous and will be forbidden.
Replace system_wq with local cm_wq.

Link: https://lore.kernel.org/r/22f7183b-cc16-5a34-e879-7605f5efc6e6@I-love.SAKURA.ne.jp
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Tetsuo Handa and committed by
Jason Gunthorpe
9cf62d91 549f39a5

+34 -8
+22 -7
drivers/infiniband/hw/mlx4/cm.c
··· 80 80 union ib_gid primary_path_sgid; 81 81 }; 82 82 83 + static struct workqueue_struct *cm_wq; 83 84 84 85 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) 85 86 { ··· 289 288 /*make sure that there is no schedule inside the scheduled work.*/ 290 289 if (!sriov->is_going_down && !id->scheduled_delete) { 291 290 id->scheduled_delete = 1; 292 - schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT); 291 + queue_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT); 293 292 } else if (id->scheduled_delete) { 294 293 /* Adjust timeout if already scheduled */ 295 - mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT); 294 + mod_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT); 296 295 } 297 296 spin_unlock_irqrestore(&sriov->going_down_lock, flags); 298 297 spin_unlock(&sriov->id_map_lock); ··· 371 370 ret = xa_err(item); 372 371 else 373 372 /* If a retry, adjust delayed work */ 374 - mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT); 373 + mod_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT); 375 374 goto err_or_exists; 376 375 } 377 376 xa_unlock(&sriov->xa_rej_tmout); ··· 394 393 return xa_err(old); 395 394 } 396 395 397 - schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT); 396 + queue_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT); 398 397 399 398 return 0; 400 399 ··· 501 500 xa_lock(&sriov->xa_rej_tmout); 502 501 xa_for_each(&sriov->xa_rej_tmout, id, item) { 503 502 if (slave < 0 || slave == item->slave) { 504 - mod_delayed_work(system_wq, &item->timeout, 0); 503 + mod_delayed_work(cm_wq, &item->timeout, 0); 505 504 flush_needed = true; 506 505 ++cnt; 507 506 } ··· 509 508 xa_unlock(&sriov->xa_rej_tmout); 510 509 511 510 if (flush_needed) { 512 - flush_scheduled_work(); 511 + flush_workqueue(cm_wq); 513 512 pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n", 514 513 cnt, slave); 515 514 } ··· 541 540 spin_unlock(&sriov->id_map_lock); 542 541 543 542 if (need_flush) 544 - flush_scheduled_work(); /* make sure all timers were flushed */ 543 + flush_workqueue(cm_wq); /* make sure all timers were flushed */ 545 544 546 545 /* now, remove all leftover entries from databases*/ 547 546 spin_lock(&sriov->id_map_lock); ··· 587 586 } 588 587 589 588 rej_tmout_xa_cleanup(sriov, slave); 589 + } 590 + 591 + int mlx4_ib_cm_init(void) 592 + { 593 + cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0); 594 + if (!cm_wq) 595 + return -ENOMEM; 596 + 597 + return 0; 598 + } 599 + 600 + void mlx4_ib_cm_destroy(void) 601 + { 602 + destroy_workqueue(cm_wq); 590 603 }
+9 -1
drivers/infiniband/hw/mlx4/main.c
··· 3307 3307 if (!wq) 3308 3308 return -ENOMEM; 3309 3309 3310 - err = mlx4_ib_mcg_init(); 3310 + err = mlx4_ib_cm_init(); 3311 3311 if (err) 3312 3312 goto clean_wq; 3313 + 3314 + err = mlx4_ib_mcg_init(); 3315 + if (err) 3316 + goto clean_cm; 3313 3317 3314 3318 err = mlx4_register_interface(&mlx4_ib_interface); 3315 3319 if (err) ··· 3324 3320 clean_mcg: 3325 3321 mlx4_ib_mcg_destroy(); 3326 3322 3323 + clean_cm: 3324 + mlx4_ib_cm_destroy(); 3325 + 3327 3326 clean_wq: 3328 3327 destroy_workqueue(wq); 3329 3328 return err; ··· 3336 3329 { 3337 3330 mlx4_unregister_interface(&mlx4_ib_interface); 3338 3331 mlx4_ib_mcg_destroy(); 3332 + mlx4_ib_cm_destroy(); 3339 3333 destroy_workqueue(wq); 3340 3334 } 3341 3335
+3
drivers/infiniband/hw/mlx4/mlx4_ib.h
··· 937 937 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, 938 938 int *num_of_mtts); 939 939 940 + int mlx4_ib_cm_init(void); 941 + void mlx4_ib_cm_destroy(void); 942 + 940 943 #endif /* MLX4_IB_H */