Move workqueue exports to where the functions are defined.

Signed-off-by: Dave Jones <davej@redhat.com>

+10 -11
+10 -11
kernel/workqueue.c
··· 114 put_cpu(); 115 return ret; 116 } 117 118 static void delayed_work_timer_fn(unsigned long __data) 119 { ··· 148 } 149 return ret; 150 } 151 152 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 153 struct work_struct *work, unsigned long delay) ··· 170 } 171 return ret; 172 } 173 174 static void run_workqueue(struct cpu_workqueue_struct *cwq) 175 { ··· 305 unlock_cpu_hotplug(); 306 } 307 } 308 309 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 310 int cpu) ··· 383 } 384 return wq; 385 } 386 387 static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 388 { ··· 421 free_percpu(wq->cpu_wq); 422 kfree(wq); 423 } 424 425 static struct workqueue_struct *keventd_wq; 426 ··· 429 { 430 return queue_work(keventd_wq, work); 431 } 432 433 int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 434 { 435 return queue_delayed_work(keventd_wq, work, delay); 436 } 437 438 int schedule_delayed_work_on(int cpu, 439 struct work_struct *work, unsigned long delay) 440 { 441 return queue_delayed_work_on(cpu, keventd_wq, work, delay); 442 } 443 444 /** 445 * schedule_on_each_cpu - call a function on each online CPU from keventd ··· 479 { 480 flush_workqueue(keventd_wq); 481 } 482 483 /** 484 * cancel_rearming_delayed_workqueue - reliably kill off a delayed ··· 636 BUG_ON(!keventd_wq); 637 } 638 639 - EXPORT_SYMBOL_GPL(__create_workqueue); 640 - EXPORT_SYMBOL_GPL(queue_work); 641 - EXPORT_SYMBOL_GPL(queue_delayed_work); 642 - EXPORT_SYMBOL_GPL(queue_delayed_work_on); 643 - EXPORT_SYMBOL_GPL(flush_workqueue); 644 - EXPORT_SYMBOL_GPL(destroy_workqueue); 645 - 646 - EXPORT_SYMBOL(schedule_work); 647 - EXPORT_SYMBOL(schedule_delayed_work); 648 - EXPORT_SYMBOL(schedule_delayed_work_on); 649 - EXPORT_SYMBOL(flush_scheduled_work);
··· 114 put_cpu(); 115 return ret; 116 } 117 + EXPORT_SYMBOL_GPL(queue_work); 118 119 static void delayed_work_timer_fn(unsigned long __data) 120 { ··· 147 } 148 return ret; 149 } 150 + EXPORT_SYMBOL_GPL(queue_delayed_work); 151 152 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 153 struct work_struct *work, unsigned long delay) ··· 168 } 169 return ret; 170 } 171 + EXPORT_SYMBOL_GPL(queue_delayed_work_on); 172 173 static void run_workqueue(struct cpu_workqueue_struct *cwq) 174 { ··· 302 unlock_cpu_hotplug(); 303 } 304 } 305 + EXPORT_SYMBOL_GPL(flush_workqueue); 306 307 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 308 int cpu) ··· 379 } 380 return wq; 381 } 382 + EXPORT_SYMBOL_GPL(__create_workqueue); 383 384 static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 385 { ··· 416 free_percpu(wq->cpu_wq); 417 kfree(wq); 418 } 419 + EXPORT_SYMBOL_GPL(destroy_workqueue); 420 421 static struct workqueue_struct *keventd_wq; 422 ··· 423 { 424 return queue_work(keventd_wq, work); 425 } 426 + EXPORT_SYMBOL(schedule_work); 427 428 int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 429 { 430 return queue_delayed_work(keventd_wq, work, delay); 431 } 432 + EXPORT_SYMBOL(schedule_delayed_work); 433 434 int schedule_delayed_work_on(int cpu, 435 struct work_struct *work, unsigned long delay) 436 { 437 return queue_delayed_work_on(cpu, keventd_wq, work, delay); 438 } 439 + EXPORT_SYMBOL(schedule_delayed_work_on); 440 441 /** 442 * schedule_on_each_cpu - call a function on each online CPU from keventd ··· 470 { 471 flush_workqueue(keventd_wq); 472 } 473 + EXPORT_SYMBOL(flush_scheduled_work); 474 475 /** 476 * cancel_rearming_delayed_workqueue - reliably kill off a delayed ··· 626 BUG_ON(!keventd_wq); 627 } 628