RDMA: Update workqueue usage

* ib_wq is added, which is used as the common workqueue for infiniband
instead of the system workqueue. All system workqueue usages
including flush_scheduled_work() callers are converted to use and
flush ib_wq.

* cancel_delayed_work() + flush_scheduled_work() converted to
cancel_delayed_work_sync().

* qib_wq is removed and ib_wq is used instead.

This is to prepare for deprecation of flush_scheduled_work().

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Tejun Heo and committed by Roland Dreier f0626710 948579cd

+39 -50
+2 -2
drivers/infiniband/core/cache.c
··· 308 308 INIT_WORK(&work->work, ib_cache_task); 309 309 work->device = event->device; 310 310 work->port_num = event->element.port_num; 311 - schedule_work(&work->work); 311 + queue_work(ib_wq, &work->work); 312 312 } 313 313 } 314 314 } ··· 368 368 int p; 369 369 370 370 ib_unregister_event_handler(&device->cache.event_handler); 371 - flush_scheduled_work(); 371 + flush_workqueue(ib_wq); 372 372 373 373 for (p = 0; p <= end_port(device) - start_port(device); ++p) { 374 374 kfree(device->cache.pkey_cache[p]);
+9 -2
drivers/infiniband/core/device.c
··· 38 38 #include <linux/slab.h> 39 39 #include <linux/init.h> 40 40 #include <linux/mutex.h> 41 - #include <linux/workqueue.h> 42 41 43 42 #include "core_priv.h" 44 43 ··· 50 51 struct ib_client *client; 51 52 void * data; 52 53 }; 54 + 55 + struct workqueue_struct *ib_wq; 56 + EXPORT_SYMBOL_GPL(ib_wq); 53 57 54 58 static LIST_HEAD(device_list); 55 59 static LIST_HEAD(client_list); ··· 720 718 { 721 719 int ret; 722 720 721 + ib_wq = alloc_workqueue("infiniband", 0, 0); 722 + if (!ib_wq) 723 + return -ENOMEM; 724 + 723 725 ret = ib_sysfs_setup(); 724 726 if (ret) 725 727 printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); ··· 732 726 if (ret) { 733 727 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); 734 728 ib_sysfs_cleanup(); 729 + destroy_workqueue(ib_wq); 735 730 } 736 731 737 732 return ret; ··· 743 736 ib_cache_cleanup(); 744 737 ib_sysfs_cleanup(); 745 738 /* Make sure that any pending umem accounting work is done. */ 746 - flush_scheduled_work(); 739 + destroy_workqueue(ib_wq); 747 740 } 748 741 749 742 module_init(ib_core_init);
+1 -1
drivers/infiniband/core/sa_query.c
··· 425 425 port->sm_ah = NULL; 426 426 spin_unlock_irqrestore(&port->ah_lock, flags); 427 427 428 - schedule_work(&sa_dev->port[event->element.port_num - 428 + queue_work(ib_wq, &sa_dev->port[event->element.port_num - 429 429 sa_dev->start_port].update_task); 430 430 } 431 431 }
+1 -1
drivers/infiniband/core/umem.c
··· 262 262 umem->mm = mm; 263 263 umem->diff = diff; 264 264 265 - schedule_work(&umem->work); 265 + queue_work(ib_wq, &umem->work); 266 266 return; 267 267 } 268 268 } else
+1 -1
drivers/infiniband/hw/ipath/ipath_driver.c
··· 755 755 */ 756 756 ipath_shutdown_device(dd); 757 757 758 - flush_scheduled_work(); 758 + flush_workqueue(ib_wq); 759 759 760 760 if (dd->verbs_dev) 761 761 ipath_unregister_ib_device(dd->verbs_dev);
+1 -1
drivers/infiniband/hw/ipath/ipath_user_pages.c
··· 220 220 work->mm = mm; 221 221 work->num_pages = num_pages; 222 222 223 - schedule_work(&work->work); 223 + queue_work(ib_wq, &work->work); 224 224 return; 225 225 226 226 bail_mm:
+3 -4
drivers/infiniband/hw/qib/qib_iba7220.c
··· 1692 1692 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; 1693 1693 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 1694 1694 wake_up(&ppd->cpspec->autoneg_wait); 1695 - cancel_delayed_work(&ppd->cpspec->autoneg_work); 1696 - flush_scheduled_work(); 1695 + cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); 1697 1696 1698 1697 shutdown_7220_relock_poll(ppd->dd); 1699 1698 val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); ··· 3514 3515 3515 3516 toggle_7220_rclkrls(ppd->dd); 3516 3517 /* 2 msec is minimum length of a poll cycle */ 3517 - schedule_delayed_work(&ppd->cpspec->autoneg_work, 3518 - msecs_to_jiffies(2)); 3518 + queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, 3519 + msecs_to_jiffies(2)); 3519 3520 } 3520 3521 3521 3522 /*
+7 -7
drivers/infiniband/hw/qib/qib_iba7322.c
··· 2406 2406 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; 2407 2407 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 2408 2408 wake_up(&ppd->cpspec->autoneg_wait); 2409 - cancel_delayed_work(&ppd->cpspec->autoneg_work); 2409 + cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); 2410 2410 if (ppd->dd->cspec->r1) 2411 - cancel_delayed_work(&ppd->cpspec->ipg_work); 2412 - flush_scheduled_work(); 2411 + cancel_delayed_work_sync(&ppd->cpspec->ipg_work); 2413 2412 2414 2413 ppd->cpspec->chase_end = 0; 2415 2414 if (ppd->cpspec->chase_timer.data) /* if initted */ ··· 2705 2706 if (!(pins & mask)) { 2706 2707 ++handled; 2707 2708 qd->t_insert = get_jiffies_64(); 2708 - schedule_work(&qd->work); 2709 + queue_work(ib_wq, &qd->work); 2709 2710 } 2710 2711 } 2711 2712 } ··· 4989 4990 set_7322_ibspeed_fast(ppd, QIB_IB_DDR); 4990 4991 qib_7322_mini_pcs_reset(ppd); 4991 4992 /* 2 msec is minimum length of a poll cycle */ 4992 - schedule_delayed_work(&ppd->cpspec->autoneg_work, 4993 - msecs_to_jiffies(2)); 4993 + queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, 4994 + msecs_to_jiffies(2)); 4994 4995 } 4995 4996 4996 4997 /* ··· 5120 5121 ib_free_send_mad(send_buf); 5121 5122 retry: 5122 5123 delay = 2 << ppd->cpspec->ipg_tries; 5123 - schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); 5124 + queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work, 5125 + msecs_to_jiffies(delay)); 5124 5126 } 5125 5127 5126 5128 /*
+4 -22
drivers/infiniband/hw/qib/qib_init.c
··· 80 80 module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); 81 81 MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); 82 82 83 - struct workqueue_struct *qib_wq; 84 83 struct workqueue_struct *qib_cq_wq; 85 84 86 85 static void verify_interrupt(unsigned long); ··· 1043 1044 if (ret) 1044 1045 goto bail; 1045 1046 1046 - /* 1047 - * We create our own workqueue mainly because we want to be 1048 - * able to flush it when devices are being removed. We can't 1049 - * use schedule_work()/flush_scheduled_work() because both 1050 - * unregister_netdev() and linkwatch_event take the rtnl lock, 1051 - * so flush_scheduled_work() can deadlock during device 1052 - * removal. 1053 - */ 1054 - qib_wq = create_workqueue("qib"); 1055 - if (!qib_wq) { 1056 - ret = -ENOMEM; 1057 - goto bail_dev; 1058 - } 1059 - 1060 1047 qib_cq_wq = create_singlethread_workqueue("qib_cq"); 1061 1048 if (!qib_cq_wq) { 1062 1049 ret = -ENOMEM; 1063 - goto bail_wq; 1050 + goto bail_dev; 1064 1051 } 1065 1052 1066 1053 /* ··· 1076 1091 idr_destroy(&qib_unit_table); 1077 1092 bail_cq_wq: 1078 1093 destroy_workqueue(qib_cq_wq); 1079 - bail_wq: 1080 - destroy_workqueue(qib_wq); 1081 1094 bail_dev: 1082 1095 qib_dev_cleanup(); 1083 1096 bail: ··· 1099 1116 1100 1117 pci_unregister_driver(&qib_driver); 1101 1118 1102 - destroy_workqueue(qib_wq); 1103 1119 destroy_workqueue(qib_cq_wq); 1104 1120 1105 1121 qib_cpulist_count = 0; ··· 1271 1289 1272 1290 if (qib_mini_init || initfail || ret) { 1273 1291 qib_stop_timers(dd); 1274 - flush_scheduled_work(); 1292 + flush_workqueue(ib_wq); 1275 1293 for (pidx = 0; pidx < dd->num_pports; ++pidx) 1276 1294 dd->f_quiet_serdes(dd->pport + pidx); 1277 1295 if (qib_mini_init) ··· 1320 1338 1321 1339 qib_stop_timers(dd); 1322 1340 1323 - /* wait until all of our (qsfp) schedule_work() calls complete */ 1324 - flush_scheduled_work(); 1341 + /* wait until all of our (qsfp) queue_work() calls complete */ 1342 + flush_workqueue(ib_wq); 1325 1343 1326 1344 ret = qibfs_remove(dd); 1327 1345 if (ret)
+4 -5
drivers/infiniband/hw/qib/qib_qsfp.c
··· 485 485 goto bail; 486 486 /* We see a module, but it may be unwise to look yet. Just schedule */ 487 487 qd->t_insert = get_jiffies_64(); 488 - schedule_work(&qd->work); 488 + queue_work(ib_wq, &qd->work); 489 489 bail: 490 490 return; 491 491 } ··· 493 493 void qib_qsfp_deinit(struct qib_qsfp_data *qd) 494 494 { 495 495 /* 496 - * There is nothing to do here for now. our 497 - * work is scheduled with schedule_work(), and 498 - * flush_scheduled_work() from remove_one will 499 - * block until all work ssetup with schedule_work() 496 + * There is nothing to do here for now. our work is scheduled 497 + * with queue_work(), and flush_workqueue() from remove_one 498 + * will block until all work setup with queue_work() 500 499 * completes. 501 500 */ 502 501 }
+1 -2
drivers/infiniband/hw/qib/qib_verbs.h
··· 805 805 !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); 806 806 } 807 807 808 - extern struct workqueue_struct *qib_wq; 809 808 extern struct workqueue_struct *qib_cq_wq; 810 809 811 810 /* ··· 813 814 static inline void qib_schedule_send(struct qib_qp *qp) 814 815 { 815 816 if (qib_send_ok(qp)) 816 - queue_work(qib_wq, &qp->s_work); 817 + queue_work(ib_wq, &qp->s_work); 817 818 } 818 819 819 820 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
+2 -2
drivers/infiniband/ulp/srp/ib_srp.c
··· 638 638 if (target->state == SRP_TARGET_CONNECTING) { 639 639 target->state = SRP_TARGET_DEAD; 640 640 INIT_WORK(&target->work, srp_remove_work); 641 - schedule_work(&target->work); 641 + queue_work(ib_wq, &target->work); 642 642 } 643 643 spin_unlock_irq(&target->lock); 644 644 ··· 2199 2199 * started before we marked our target ports as 2200 2200 * removed, and any target port removal tasks. 2201 2201 */ 2202 - flush_scheduled_work(); 2202 + flush_workqueue(ib_wq); 2203 2203 2204 2204 list_for_each_entry_safe(target, tmp_target, 2205 2205 &host->target_list, list) {
+3
include/rdma/ib_verbs.h
··· 47 47 #include <linux/list.h> 48 48 #include <linux/rwsem.h> 49 49 #include <linux/scatterlist.h> 50 + #include <linux/workqueue.h> 50 51 51 52 #include <asm/atomic.h> 52 53 #include <asm/uaccess.h> 54 + 55 + extern struct workqueue_struct *ib_wq; 53 56 54 57 union ib_gid { 55 58 u8 raw[16];