Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

afs: add afs_wq and use it instead of the system workqueue

flush_scheduled_work() is going away. afs needs to make sure all the
works it has queued have finished before being unloaded and there can
be arbitrary number of pending works. Add afs_wq and use it as the
flush domain instead of the system workqueue.

Also, convert cancel_delayed_work() + flush_scheduled_work() to
cancel_delayed_work_sync() in afs_mntpt_kill_timer().

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: David Howells <dhowells@redhat.com>
Cc: linux-afs@lists.infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Tejun Heo and committed by
Linus Torvalds
0ad53eee e1fcc7e2

+38 -28
+6 -6
fs/afs/cmservice.c
··· 289 289 call->server = server; 290 290 291 291 INIT_WORK(&call->work, SRXAFSCB_CallBack); 292 - schedule_work(&call->work); 292 + queue_work(afs_wq, &call->work); 293 293 return 0; 294 294 } 295 295 ··· 336 336 call->server = server; 337 337 338 338 INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); 339 - schedule_work(&call->work); 339 + queue_work(afs_wq, &call->work); 340 340 return 0; 341 341 } 342 342 ··· 367 367 call->server = server; 368 368 369 369 INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); 370 - schedule_work(&call->work); 370 + queue_work(afs_wq, &call->work); 371 371 return 0; 372 372 } 373 373 ··· 400 400 call->state = AFS_CALL_REPLYING; 401 401 402 402 INIT_WORK(&call->work, SRXAFSCB_Probe); 403 - schedule_work(&call->work); 403 + queue_work(afs_wq, &call->work); 404 404 return 0; 405 405 } 406 406 ··· 496 496 call->state = AFS_CALL_REPLYING; 497 497 498 498 INIT_WORK(&call->work, SRXAFSCB_ProbeUuid); 499 - schedule_work(&call->work); 499 + queue_work(afs_wq, &call->work); 500 500 return 0; 501 501 } 502 502 ··· 580 580 call->state = AFS_CALL_REPLYING; 581 581 582 582 INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself); 583 - schedule_work(&call->work); 583 + queue_work(afs_wq, &call->work); 584 584 return 0; 585 585 }
+1
fs/afs/internal.h
··· 577 577 /* 578 578 * main.c 579 579 */ 580 + extern struct workqueue_struct *afs_wq; 580 581 extern struct afs_uuid afs_uuid; 581 582 582 583 /*
+11 -2
fs/afs/main.c
··· 30 30 MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); 31 31 32 32 struct afs_uuid afs_uuid; 33 + struct workqueue_struct *afs_wq; 33 34 34 35 /* 35 36 * get a client UUID ··· 88 87 if (ret < 0) 89 88 return ret; 90 89 90 + /* create workqueue */ 91 + ret = -ENOMEM; 92 + afs_wq = alloc_workqueue("afs", 0, 0); 93 + if (!afs_wq) 94 + return ret; 95 + 91 96 /* register the /proc stuff */ 92 97 ret = afs_proc_init(); 93 98 if (ret < 0) 94 - return ret; 99 + goto error_proc; 95 100 96 101 #ifdef CONFIG_AFS_FSCACHE 97 102 /* we want to be able to cache */ ··· 147 140 error_cache: 148 141 #endif 149 142 afs_proc_cleanup(); 143 + error_proc: 144 + destroy_workqueue(afs_wq); 150 145 rcu_barrier(); 151 146 printk(KERN_ERR "kAFS: failed to register: %d\n", ret); 152 147 return ret; ··· 172 163 afs_purge_servers(); 173 164 afs_callback_update_kill(); 174 165 afs_vlocation_purge(); 175 - flush_scheduled_work(); 166 + destroy_workqueue(afs_wq); 176 167 afs_cell_purge(); 177 168 #ifdef CONFIG_AFS_FSCACHE 178 169 fscache_unregister_netfs(&afs_cache_netfs);
+5 -6
fs/afs/mntpt.c
··· 268 268 path_put(&nd->path); 269 269 nd->path.mnt = newmnt; 270 270 nd->path.dentry = dget(newmnt->mnt_root); 271 - schedule_delayed_work(&afs_mntpt_expiry_timer, 272 - afs_mntpt_expiry_timeout * HZ); 271 + queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer, 272 + afs_mntpt_expiry_timeout * HZ); 273 273 break; 274 274 case -EBUSY: 275 275 /* someone else made a mount here whilst we were busy */ ··· 295 295 296 296 if (!list_empty(&afs_vfsmounts)) { 297 297 mark_mounts_for_expiry(&afs_vfsmounts); 298 - schedule_delayed_work(&afs_mntpt_expiry_timer, 299 - afs_mntpt_expiry_timeout * HZ); 298 + queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer, 299 + afs_mntpt_expiry_timeout * HZ); 300 300 } 301 301 302 302 _leave(""); ··· 310 310 _enter(""); 311 311 312 312 ASSERT(list_empty(&afs_vfsmounts)); 313 - cancel_delayed_work(&afs_mntpt_expiry_timer); 314 - flush_scheduled_work(); 313 + cancel_delayed_work_sync(&afs_mntpt_expiry_timer); 315 314 }
+1 -1
fs/afs/rxrpc.c
··· 410 410 if (!call) { 411 411 /* its an incoming call for our callback service */ 412 412 skb_queue_tail(&afs_incoming_calls, skb); 413 - schedule_work(&afs_collect_incoming_call_work); 413 + queue_work(afs_wq, &afs_collect_incoming_call_work); 414 414 } else { 415 415 /* route the messages directly to the appropriate call */ 416 416 skb_queue_tail(&call->rx_queue, skb);
+7 -6
fs/afs/server.c
··· 238 238 if (atomic_read(&server->usage) == 0) { 239 239 list_move_tail(&server->grave, &afs_server_graveyard); 240 240 server->time_of_death = get_seconds(); 241 - schedule_delayed_work(&afs_server_reaper, 242 - afs_server_timeout * HZ); 241 + queue_delayed_work(afs_wq, &afs_server_reaper, 242 + afs_server_timeout * HZ); 243 243 } 244 244 spin_unlock(&afs_server_graveyard_lock); 245 245 _leave(" [dead]"); ··· 285 285 expiry = server->time_of_death + afs_server_timeout; 286 286 if (expiry > now) { 287 287 delay = (expiry - now) * HZ; 288 - if (!schedule_delayed_work(&afs_server_reaper, delay)) { 288 + if (!queue_delayed_work(afs_wq, &afs_server_reaper, 289 + delay)) { 289 290 cancel_delayed_work(&afs_server_reaper); 290 - schedule_delayed_work(&afs_server_reaper, 291 - delay); 291 + queue_delayed_work(afs_wq, &afs_server_reaper, 292 + delay); 292 293 } 293 294 break; 294 295 } ··· 324 323 { 325 324 afs_server_timeout = 0; 326 325 cancel_delayed_work(&afs_server_reaper); 327 - schedule_delayed_work(&afs_server_reaper, 0); 326 + queue_delayed_work(afs_wq, &afs_server_reaper, 0); 328 327 }
+7 -7
fs/afs/vlocation.c
··· 507 507 _debug("buried"); 508 508 list_move_tail(&vl->grave, &afs_vlocation_graveyard); 509 509 vl->time_of_death = get_seconds(); 510 - schedule_delayed_work(&afs_vlocation_reap, 511 - afs_vlocation_timeout * HZ); 510 + queue_delayed_work(afs_wq, &afs_vlocation_reap, 511 + afs_vlocation_timeout * HZ); 512 512 513 513 /* suspend updates on this record */ 514 514 if (!list_empty(&vl->update)) { ··· 561 561 if (expiry > now) { 562 562 delay = (expiry - now) * HZ; 563 563 _debug("delay %lu", delay); 564 - if (!schedule_delayed_work(&afs_vlocation_reap, 565 - delay)) { 564 + if (!queue_delayed_work(afs_wq, &afs_vlocation_reap, 565 + delay)) { 566 566 cancel_delayed_work(&afs_vlocation_reap); 567 - schedule_delayed_work(&afs_vlocation_reap, 568 - delay); 567 + queue_delayed_work(afs_wq, &afs_vlocation_reap, 568 + delay); 569 569 } 570 570 break; 571 571 } ··· 620 620 destroy_workqueue(afs_vlocation_update_worker); 621 621 622 622 cancel_delayed_work(&afs_vlocation_reap); 623 - schedule_delayed_work(&afs_vlocation_reap, 0); 623 + queue_delayed_work(afs_wq, &afs_vlocation_reap, 0); 624 624 } 625 625 626 626 /*