RDMA: Update workqueue usage

* ib_wq is added, which is used as the common workqueue for infiniband
instead of the system workqueue. All system workqueue usages
including flush_scheduled_work() callers are converted to use and
flush ib_wq.

* cancel_delayed_work() + flush_scheduled_work() converted to
cancel_delayed_work_sync().

* qib_wq is removed and ib_wq is used instead.

This is to prepare for deprecation of flush_scheduled_work().

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Tejun Heo and committed by Roland Dreier f0626710 948579cd

+39 -50
+2 -2
drivers/infiniband/core/cache.c
··· 308 INIT_WORK(&work->work, ib_cache_task); 309 work->device = event->device; 310 work->port_num = event->element.port_num; 311 - schedule_work(&work->work); 312 } 313 } 314 } ··· 368 int p; 369 370 ib_unregister_event_handler(&device->cache.event_handler); 371 - flush_scheduled_work(); 372 373 for (p = 0; p <= end_port(device) - start_port(device); ++p) { 374 kfree(device->cache.pkey_cache[p]);
··· 308 INIT_WORK(&work->work, ib_cache_task); 309 work->device = event->device; 310 work->port_num = event->element.port_num; 311 + queue_work(ib_wq, &work->work); 312 } 313 } 314 } ··· 368 int p; 369 370 ib_unregister_event_handler(&device->cache.event_handler); 371 + flush_workqueue(ib_wq); 372 373 for (p = 0; p <= end_port(device) - start_port(device); ++p) { 374 kfree(device->cache.pkey_cache[p]);
+9 -2
drivers/infiniband/core/device.c
··· 38 #include <linux/slab.h> 39 #include <linux/init.h> 40 #include <linux/mutex.h> 41 - #include <linux/workqueue.h> 42 43 #include "core_priv.h" 44 ··· 50 struct ib_client *client; 51 void * data; 52 }; 53 54 static LIST_HEAD(device_list); 55 static LIST_HEAD(client_list); ··· 720 { 721 int ret; 722 723 ret = ib_sysfs_setup(); 724 if (ret) 725 printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); ··· 732 if (ret) { 733 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); 734 ib_sysfs_cleanup(); 735 } 736 737 return ret; ··· 743 ib_cache_cleanup(); 744 ib_sysfs_cleanup(); 745 /* Make sure that any pending umem accounting work is done. */ 746 - flush_scheduled_work(); 747 } 748 749 module_init(ib_core_init);
··· 38 #include <linux/slab.h> 39 #include <linux/init.h> 40 #include <linux/mutex.h> 41 42 #include "core_priv.h" 43 ··· 51 struct ib_client *client; 52 void * data; 53 }; 54 + 55 + struct workqueue_struct *ib_wq; 56 + EXPORT_SYMBOL_GPL(ib_wq); 57 58 static LIST_HEAD(device_list); 59 static LIST_HEAD(client_list); ··· 718 { 719 int ret; 720 721 + ib_wq = alloc_workqueue("infiniband", 0, 0); 722 + if (!ib_wq) 723 + return -ENOMEM; 724 + 725 ret = ib_sysfs_setup(); 726 if (ret) 727 printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); ··· 726 if (ret) { 727 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); 728 ib_sysfs_cleanup(); 729 + destroy_workqueue(ib_wq); 730 } 731 732 return ret; ··· 736 ib_cache_cleanup(); 737 ib_sysfs_cleanup(); 738 /* Make sure that any pending umem accounting work is done. */ 739 + destroy_workqueue(ib_wq); 740 } 741 742 module_init(ib_core_init);
+1 -1
drivers/infiniband/core/sa_query.c
··· 425 port->sm_ah = NULL; 426 spin_unlock_irqrestore(&port->ah_lock, flags); 427 428 - schedule_work(&sa_dev->port[event->element.port_num - 429 sa_dev->start_port].update_task); 430 } 431 }
··· 425 port->sm_ah = NULL; 426 spin_unlock_irqrestore(&port->ah_lock, flags); 427 428 + queue_work(ib_wq, &sa_dev->port[event->element.port_num - 429 sa_dev->start_port].update_task); 430 } 431 }
+1 -1
drivers/infiniband/core/umem.c
··· 262 umem->mm = mm; 263 umem->diff = diff; 264 265 - schedule_work(&umem->work); 266 return; 267 } 268 } else
··· 262 umem->mm = mm; 263 umem->diff = diff; 264 265 + queue_work(ib_wq, &umem->work); 266 return; 267 } 268 } else
+1 -1
drivers/infiniband/hw/ipath/ipath_driver.c
··· 755 */ 756 ipath_shutdown_device(dd); 757 758 - flush_scheduled_work(); 759 760 if (dd->verbs_dev) 761 ipath_unregister_ib_device(dd->verbs_dev);
··· 755 */ 756 ipath_shutdown_device(dd); 757 758 + flush_workqueue(ib_wq); 759 760 if (dd->verbs_dev) 761 ipath_unregister_ib_device(dd->verbs_dev);
+1 -1
drivers/infiniband/hw/ipath/ipath_user_pages.c
··· 220 work->mm = mm; 221 work->num_pages = num_pages; 222 223 - schedule_work(&work->work); 224 return; 225 226 bail_mm:
··· 220 work->mm = mm; 221 work->num_pages = num_pages; 222 223 + queue_work(ib_wq, &work->work); 224 return; 225 226 bail_mm:
+3 -4
drivers/infiniband/hw/qib/qib_iba7220.c
··· 1692 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; 1693 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 1694 wake_up(&ppd->cpspec->autoneg_wait); 1695 - cancel_delayed_work(&ppd->cpspec->autoneg_work); 1696 - flush_scheduled_work(); 1697 1698 shutdown_7220_relock_poll(ppd->dd); 1699 val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); ··· 3514 3515 toggle_7220_rclkrls(ppd->dd); 3516 /* 2 msec is minimum length of a poll cycle */ 3517 - schedule_delayed_work(&ppd->cpspec->autoneg_work, 3518 - msecs_to_jiffies(2)); 3519 } 3520 3521 /*
··· 1692 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; 1693 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 1694 wake_up(&ppd->cpspec->autoneg_wait); 1695 + cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); 1696 1697 shutdown_7220_relock_poll(ppd->dd); 1698 val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); ··· 3515 3516 toggle_7220_rclkrls(ppd->dd); 3517 /* 2 msec is minimum length of a poll cycle */ 3518 + queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, 3519 + msecs_to_jiffies(2)); 3520 } 3521 3522 /*
+7 -7
drivers/infiniband/hw/qib/qib_iba7322.c
··· 2406 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; 2407 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 2408 wake_up(&ppd->cpspec->autoneg_wait); 2409 - cancel_delayed_work(&ppd->cpspec->autoneg_work); 2410 if (ppd->dd->cspec->r1) 2411 - cancel_delayed_work(&ppd->cpspec->ipg_work); 2412 - flush_scheduled_work(); 2413 2414 ppd->cpspec->chase_end = 0; 2415 if (ppd->cpspec->chase_timer.data) /* if initted */ ··· 2705 if (!(pins & mask)) { 2706 ++handled; 2707 qd->t_insert = get_jiffies_64(); 2708 - schedule_work(&qd->work); 2709 } 2710 } 2711 } ··· 4989 set_7322_ibspeed_fast(ppd, QIB_IB_DDR); 4990 qib_7322_mini_pcs_reset(ppd); 4991 /* 2 msec is minimum length of a poll cycle */ 4992 - schedule_delayed_work(&ppd->cpspec->autoneg_work, 4993 - msecs_to_jiffies(2)); 4994 } 4995 4996 /* ··· 5120 ib_free_send_mad(send_buf); 5121 retry: 5122 delay = 2 << ppd->cpspec->ipg_tries; 5123 - schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); 5124 } 5125 5126 /*
··· 2406 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; 2407 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 2408 wake_up(&ppd->cpspec->autoneg_wait); 2409 + cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); 2410 if (ppd->dd->cspec->r1) 2411 + cancel_delayed_work_sync(&ppd->cpspec->ipg_work); 2412 2413 ppd->cpspec->chase_end = 0; 2414 if (ppd->cpspec->chase_timer.data) /* if initted */ ··· 2706 if (!(pins & mask)) { 2707 ++handled; 2708 qd->t_insert = get_jiffies_64(); 2709 + queue_work(ib_wq, &qd->work); 2710 } 2711 } 2712 } ··· 4990 set_7322_ibspeed_fast(ppd, QIB_IB_DDR); 4991 qib_7322_mini_pcs_reset(ppd); 4992 /* 2 msec is minimum length of a poll cycle */ 4993 + queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, 4994 + msecs_to_jiffies(2)); 4995 } 4996 4997 /* ··· 5121 ib_free_send_mad(send_buf); 5122 retry: 5123 delay = 2 << ppd->cpspec->ipg_tries; 5124 + queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work, 5125 + msecs_to_jiffies(delay)); 5126 } 5127 5128 /*
+4 -22
drivers/infiniband/hw/qib/qib_init.c
··· 80 module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); 81 MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); 82 83 - struct workqueue_struct *qib_wq; 84 struct workqueue_struct *qib_cq_wq; 85 86 static void verify_interrupt(unsigned long); ··· 1043 if (ret) 1044 goto bail; 1045 1046 - /* 1047 - * We create our own workqueue mainly because we want to be 1048 - * able to flush it when devices are being removed. We can't 1049 - * use schedule_work()/flush_scheduled_work() because both 1050 - * unregister_netdev() and linkwatch_event take the rtnl lock, 1051 - * so flush_scheduled_work() can deadlock during device 1052 - * removal. 1053 - */ 1054 - qib_wq = create_workqueue("qib"); 1055 - if (!qib_wq) { 1056 - ret = -ENOMEM; 1057 - goto bail_dev; 1058 - } 1059 - 1060 qib_cq_wq = create_singlethread_workqueue("qib_cq"); 1061 if (!qib_cq_wq) { 1062 ret = -ENOMEM; 1063 - goto bail_wq; 1064 } 1065 1066 /* ··· 1076 idr_destroy(&qib_unit_table); 1077 bail_cq_wq: 1078 destroy_workqueue(qib_cq_wq); 1079 - bail_wq: 1080 - destroy_workqueue(qib_wq); 1081 bail_dev: 1082 qib_dev_cleanup(); 1083 bail: ··· 1099 1100 pci_unregister_driver(&qib_driver); 1101 1102 - destroy_workqueue(qib_wq); 1103 destroy_workqueue(qib_cq_wq); 1104 1105 qib_cpulist_count = 0; ··· 1271 1272 if (qib_mini_init || initfail || ret) { 1273 qib_stop_timers(dd); 1274 - flush_scheduled_work(); 1275 for (pidx = 0; pidx < dd->num_pports; ++pidx) 1276 dd->f_quiet_serdes(dd->pport + pidx); 1277 if (qib_mini_init) ··· 1320 1321 qib_stop_timers(dd); 1322 1323 - /* wait until all of our (qsfp) schedule_work() calls complete */ 1324 - flush_scheduled_work(); 1325 1326 ret = qibfs_remove(dd); 1327 if (ret)
··· 80 module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); 81 MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); 82 83 struct workqueue_struct *qib_cq_wq; 84 85 static void verify_interrupt(unsigned long); ··· 1044 if (ret) 1045 goto bail; 1046 1047 qib_cq_wq = create_singlethread_workqueue("qib_cq"); 1048 if (!qib_cq_wq) { 1049 ret = -ENOMEM; 1050 + goto bail_dev; 1051 } 1052 1053 /* ··· 1091 idr_destroy(&qib_unit_table); 1092 bail_cq_wq: 1093 destroy_workqueue(qib_cq_wq); 1094 bail_dev: 1095 qib_dev_cleanup(); 1096 bail: ··· 1116 1117 pci_unregister_driver(&qib_driver); 1118 1119 destroy_workqueue(qib_cq_wq); 1120 1121 qib_cpulist_count = 0; ··· 1289 1290 if (qib_mini_init || initfail || ret) { 1291 qib_stop_timers(dd); 1292 + flush_workqueue(ib_wq); 1293 for (pidx = 0; pidx < dd->num_pports; ++pidx) 1294 dd->f_quiet_serdes(dd->pport + pidx); 1295 if (qib_mini_init) ··· 1338 1339 qib_stop_timers(dd); 1340 1341 + /* wait until all of our (qsfp) queue_work() calls complete */ 1342 + flush_workqueue(ib_wq); 1343 1344 ret = qibfs_remove(dd); 1345 if (ret)
+4 -5
drivers/infiniband/hw/qib/qib_qsfp.c
··· 485 goto bail; 486 /* We see a module, but it may be unwise to look yet. Just schedule */ 487 qd->t_insert = get_jiffies_64(); 488 - schedule_work(&qd->work); 489 bail: 490 return; 491 } ··· 493 void qib_qsfp_deinit(struct qib_qsfp_data *qd) 494 { 495 /* 496 - * There is nothing to do here for now. our 497 - * work is scheduled with schedule_work(), and 498 - * flush_scheduled_work() from remove_one will 499 - * block until all work ssetup with schedule_work() 500 * completes. 501 */ 502 }
··· 485 goto bail; 486 /* We see a module, but it may be unwise to look yet. Just schedule */ 487 qd->t_insert = get_jiffies_64(); 488 + queue_work(ib_wq, &qd->work); 489 bail: 490 return; 491 } ··· 493 void qib_qsfp_deinit(struct qib_qsfp_data *qd) 494 { 495 /* 496 + * There is nothing to do here for now. our work is scheduled 497 + * with queue_work(), and flush_workqueue() from remove_one 498 + * will block until all work setup with queue_work() 499 * completes. 500 */ 501 }
+1 -2
drivers/infiniband/hw/qib/qib_verbs.h
··· 805 !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); 806 } 807 808 - extern struct workqueue_struct *qib_wq; 809 extern struct workqueue_struct *qib_cq_wq; 810 811 /* ··· 813 static inline void qib_schedule_send(struct qib_qp *qp) 814 { 815 if (qib_send_ok(qp)) 816 - queue_work(qib_wq, &qp->s_work); 817 } 818 819 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
··· 805 !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); 806 } 807 808 extern struct workqueue_struct *qib_cq_wq; 809 810 /* ··· 814 static inline void qib_schedule_send(struct qib_qp *qp) 815 { 816 if (qib_send_ok(qp)) 817 + queue_work(ib_wq, &qp->s_work); 818 } 819 820 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
+2 -2
drivers/infiniband/ulp/srp/ib_srp.c
··· 638 if (target->state == SRP_TARGET_CONNECTING) { 639 target->state = SRP_TARGET_DEAD; 640 INIT_WORK(&target->work, srp_remove_work); 641 - schedule_work(&target->work); 642 } 643 spin_unlock_irq(&target->lock); 644 ··· 2199 * started before we marked our target ports as 2200 * removed, and any target port removal tasks. 2201 */ 2202 - flush_scheduled_work(); 2203 2204 list_for_each_entry_safe(target, tmp_target, 2205 &host->target_list, list) {
··· 638 if (target->state == SRP_TARGET_CONNECTING) { 639 target->state = SRP_TARGET_DEAD; 640 INIT_WORK(&target->work, srp_remove_work); 641 + queue_work(ib_wq, &target->work); 642 } 643 spin_unlock_irq(&target->lock); 644 ··· 2199 * started before we marked our target ports as 2200 * removed, and any target port removal tasks. 2201 */ 2202 + flush_workqueue(ib_wq); 2203 2204 list_for_each_entry_safe(target, tmp_target, 2205 &host->target_list, list) {
+3
include/rdma/ib_verbs.h
··· 47 #include <linux/list.h> 48 #include <linux/rwsem.h> 49 #include <linux/scatterlist.h> 50 51 #include <asm/atomic.h> 52 #include <asm/uaccess.h> 53 54 union ib_gid { 55 u8 raw[16];
··· 47 #include <linux/list.h> 48 #include <linux/rwsem.h> 49 #include <linux/scatterlist.h> 50 + #include <linux/workqueue.h> 51 52 #include <asm/atomic.h> 53 #include <asm/uaccess.h> 54 + 55 + extern struct workqueue_struct *ib_wq; 56 57 union ib_gid { 58 u8 raw[16];