Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

WorkStruct: Pass the work_struct pointer instead of context data

Pass the work_struct pointer to the work function rather than context data.
The work function can use container_of() to work out the data.

For the cases where the container of the work_struct may go away the moment the
pending bit is cleared, it is made possible to defer the release of the
structure by deferring the clearing of the pending bit.

To make this work, an extra flag is introduced into the management side of the
work_struct. This governs auto-release of the structure upon execution.

Ordinarily, the work queue executor would release the work_struct for further
scheduling or deallocation by clearing the pending bit prior to jumping to the
work function. This means that, unless the driver makes some guarantee itself
that the work_struct won't go away, the work function may not access anything
else in the work_struct or its container lest they be deallocated.. This is a
problem if the auxiliary data is taken away (as done by the last patch).

However, if the pending bit is *not* cleared before jumping to the work
function, then the work function *may* access the work_struct and its container
with no problems. But then the work function must itself release the
work_struct by calling work_release().

In most cases, automatic release is fine, so this is the default. Special
initiators exist for the non-auto-release case (ending in _NAR).


Signed-Off-By: David Howells <dhowells@redhat.com>

+293 -219
+3 -3
arch/x86_64/kernel/mce.c
··· 306 306 */ 307 307 308 308 static int check_interval = 5 * 60; /* 5 minutes */ 309 - static void mcheck_timer(void *data); 310 - static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer, NULL); 309 + static void mcheck_timer(struct work_struct *work); 310 + static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); 311 311 312 312 static void mcheck_check_cpu(void *info) 313 313 { ··· 315 315 do_machine_check(NULL, 0); 316 316 } 317 317 318 - static void mcheck_timer(void *data) 318 + static void mcheck_timer(struct work_struct *work) 319 319 { 320 320 on_each_cpu(mcheck_check_cpu, NULL, 1, 1); 321 321 schedule_delayed_work(&mcheck_work, check_interval * HZ);
+7 -5
arch/x86_64/kernel/smpboot.c
··· 753 753 } 754 754 755 755 struct create_idle { 756 + struct work_struct work; 756 757 struct task_struct *idle; 757 758 struct completion done; 758 759 int cpu; 759 760 }; 760 761 761 - void do_fork_idle(void *_c_idle) 762 + void do_fork_idle(struct work_struct *work) 762 763 { 763 - struct create_idle *c_idle = _c_idle; 764 + struct create_idle *c_idle = 765 + container_of(work, struct create_idle, work); 764 766 765 767 c_idle->idle = fork_idle(c_idle->cpu); 766 768 complete(&c_idle->done); ··· 777 775 int timeout; 778 776 unsigned long start_rip; 779 777 struct create_idle c_idle = { 778 + .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), 780 779 .cpu = cpu, 781 780 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 782 781 }; 783 - DECLARE_WORK(work, do_fork_idle, &c_idle); 784 782 785 783 /* allocate memory for gdts of secondary cpus. Hotplug is considered */ 786 784 if (!cpu_gdt_descr[cpu].address && ··· 827 825 * thread. 828 826 */ 829 827 if (!keventd_up() || current_is_keventd()) 830 - work.func(work.data); 828 + c_idle.work.func(&c_idle.work); 831 829 else { 832 - schedule_work(&work); 830 + schedule_work(&c_idle.work); 833 831 wait_for_completion(&c_idle.done); 834 832 } 835 833
+2 -2
arch/x86_64/kernel/time.c
··· 563 563 static unsigned int cpufreq_init = 0; 564 564 static struct work_struct cpufreq_delayed_get_work; 565 565 566 - static void handle_cpufreq_delayed_get(void *v) 566 + static void handle_cpufreq_delayed_get(struct work_struct *v) 567 567 { 568 568 unsigned int cpu; 569 569 for_each_online_cpu(cpu) { ··· 639 639 640 640 static int __init cpufreq_tsc(void) 641 641 { 642 - INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); 642 + INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); 643 643 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, 644 644 CPUFREQ_TRANSITION_NOTIFIER)) 645 645 cpufreq_init = 1;
+4 -3
block/as-iosched.c
··· 1274 1274 * 1275 1275 * FIXME! dispatch queue is not a queue at all! 1276 1276 */ 1277 - static void as_work_handler(void *data) 1277 + static void as_work_handler(struct work_struct *work) 1278 1278 { 1279 - struct request_queue *q = data; 1279 + struct as_data *ad = container_of(work, struct as_data, antic_work); 1280 + struct request_queue *q = ad->q; 1280 1281 unsigned long flags; 1281 1282 1282 1283 spin_lock_irqsave(q->queue_lock, flags); ··· 1333 1332 ad->antic_timer.function = as_antic_timeout; 1334 1333 ad->antic_timer.data = (unsigned long)q; 1335 1334 init_timer(&ad->antic_timer); 1336 - INIT_WORK(&ad->antic_work, as_work_handler, q); 1335 + INIT_WORK(&ad->antic_work, as_work_handler); 1337 1336 1338 1337 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); 1339 1338 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
+5 -3
block/cfq-iosched.c
··· 1841 1841 return 1; 1842 1842 } 1843 1843 1844 - static void cfq_kick_queue(void *data) 1844 + static void cfq_kick_queue(struct work_struct *work) 1845 1845 { 1846 - request_queue_t *q = data; 1846 + struct cfq_data *cfqd = 1847 + container_of(work, struct cfq_data, unplug_work); 1848 + request_queue_t *q = cfqd->queue; 1847 1849 unsigned long flags; 1848 1850 1849 1851 spin_lock_irqsave(q->queue_lock, flags); ··· 1989 1987 cfqd->idle_class_timer.function = cfq_idle_class_timer; 1990 1988 cfqd->idle_class_timer.data = (unsigned long) cfqd; 1991 1989 1992 - INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); 1990 + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 1993 1991 1994 1992 cfqd->cfq_quantum = cfq_quantum; 1995 1993 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
+4 -4
block/ll_rw_blk.c
··· 34 34 */ 35 35 #include <scsi/scsi_cmnd.h> 36 36 37 - static void blk_unplug_work(void *data); 37 + static void blk_unplug_work(struct work_struct *work); 38 38 static void blk_unplug_timeout(unsigned long data); 39 39 static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); 40 40 static void init_request_from_bio(struct request *req, struct bio *bio); ··· 227 227 if (q->unplug_delay == 0) 228 228 q->unplug_delay = 1; 229 229 230 - INIT_WORK(&q->unplug_work, blk_unplug_work, q); 230 + INIT_WORK(&q->unplug_work, blk_unplug_work); 231 231 232 232 q->unplug_timer.function = blk_unplug_timeout; 233 233 q->unplug_timer.data = (unsigned long)q; ··· 1631 1631 } 1632 1632 } 1633 1633 1634 - static void blk_unplug_work(void *data) 1634 + static void blk_unplug_work(struct work_struct *work) 1635 1635 { 1636 - request_queue_t *q = data; 1636 + request_queue_t *q = container_of(work, request_queue_t, unplug_work); 1637 1637 1638 1638 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, 1639 1639 q->rq.count[READ] + q->rq.count[WRITE]);
+4 -3
crypto/cryptomgr.c
··· 40 40 char template[CRYPTO_MAX_ALG_NAME]; 41 41 }; 42 42 43 - static void cryptomgr_probe(void *data) 43 + static void cryptomgr_probe(struct work_struct *work) 44 44 { 45 - struct cryptomgr_param *param = data; 45 + struct cryptomgr_param *param = 46 + container_of(work, struct cryptomgr_param, work); 46 47 struct crypto_template *tmpl; 47 48 struct crypto_instance *inst; 48 49 int err; ··· 113 112 param->larval.type = larval->alg.cra_flags; 114 113 param->larval.mask = larval->mask; 115 114 116 - INIT_WORK(&param->work, cryptomgr_probe, param); 115 + INIT_WORK(&param->work, cryptomgr_probe); 117 116 schedule_work(&param->work); 118 117 119 118 return NOTIFY_STOP;
+8 -17
drivers/acpi/osl.c
··· 50 50 struct acpi_os_dpc { 51 51 acpi_osd_exec_callback function; 52 52 void *context; 53 + struct work_struct work; 53 54 }; 54 55 55 56 #ifdef CONFIG_ACPI_CUSTOM_DSDT ··· 565 564 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); 566 565 } 567 566 568 - static void acpi_os_execute_deferred(void *context) 567 + static void acpi_os_execute_deferred(struct work_struct *work) 569 568 { 570 - struct acpi_os_dpc *dpc = NULL; 571 - 572 - 573 - dpc = (struct acpi_os_dpc *)context; 569 + struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 574 570 if (!dpc) { 575 571 printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); 576 572 return; ··· 600 602 { 601 603 acpi_status status = AE_OK; 602 604 struct acpi_os_dpc *dpc; 603 - struct work_struct *task; 604 605 605 606 ACPI_FUNCTION_TRACE("os_queue_for_execution"); 606 607 ··· 612 615 613 616 /* 614 617 * Allocate/initialize DPC structure. Note that this memory will be 615 - * freed by the callee. The kernel handles the tq_struct list in a 618 + * freed by the callee. The kernel handles the work_struct list in a 616 619 * way that allows us to also free its memory inside the callee. 617 620 * Because we may want to schedule several tasks with different 618 621 * parameters we can't use the approach some kernel code uses of 619 - * having a static tq_struct. 620 - * We can save time and code by allocating the DPC and tq_structs 621 - * from the same memory. 622 + * having a static work_struct. 622 623 */ 623 624 624 - dpc = 625 - kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct), 626 - GFP_ATOMIC); 625 + dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 627 626 if (!dpc) 628 627 return_ACPI_STATUS(AE_NO_MEMORY); 629 628 630 629 dpc->function = function; 631 630 dpc->context = context; 632 631 633 - task = (void *)(dpc + 1); 634 - INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc); 635 - 636 - if (!queue_work(kacpid_wq, task)) { 632 + INIT_WORK(&dpc->work, acpi_os_execute_deferred); 633 + if (!queue_work(kacpid_wq, &dpc->work)) { 637 634 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 638 635 "Call to queue_work() failed.\n")); 639 636 kfree(dpc);
+11 -9
drivers/ata/libata-core.c
··· 914 914 * ata_port_queue_task - Queue port_task 915 915 * @ap: The ata_port to queue port_task for 916 916 * @fn: workqueue function to be scheduled 917 - * @data: data value to pass to workqueue function 917 + * @data: data for @fn to use 918 918 * @delay: delay time for workqueue function 919 919 * 920 920 * Schedule @fn(@data) for execution after @delay jiffies using ··· 929 929 * LOCKING: 930 930 * Inherited from caller. 931 931 */ 932 - void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, 932 + void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, 933 933 unsigned long delay) 934 934 { 935 935 int rc; ··· 937 937 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) 938 938 return; 939 939 940 - PREPARE_DELAYED_WORK(&ap->port_task, fn, data); 940 + PREPARE_DELAYED_WORK(&ap->port_task, fn); 941 + ap->port_task_data = data; 941 942 942 943 rc = queue_delayed_work(ata_wq, &ap->port_task, delay); 943 944 ··· 4293 4292 return poll_next; 4294 4293 } 4295 4294 4296 - static void ata_pio_task(void *_data) 4295 + static void ata_pio_task(struct work_struct *work) 4297 4296 { 4298 - struct ata_queued_cmd *qc = _data; 4299 - struct ata_port *ap = qc->ap; 4297 + struct ata_port *ap = 4298 + container_of(work, struct ata_port, port_task.work); 4299 + struct ata_queued_cmd *qc = ap->port_task_data; 4300 4300 u8 status; 4301 4301 int poll_next; 4302 4302 ··· 5319 5317 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5320 5318 #endif 5321 5319 5322 - INIT_DELAYED_WORK(&ap->port_task, NULL, NULL); 5323 - INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap); 5324 - INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap); 5320 + INIT_DELAYED_WORK(&ap->port_task, NULL); 5321 + INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5322 + INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5325 5323 INIT_LIST_HEAD(&ap->eh_done_q); 5326 5324 init_waitqueue_head(&ap->eh_wait_q); 5327 5325
+8 -6
drivers/ata/libata-scsi.c
··· 3079 3079 3080 3080 /** 3081 3081 * ata_scsi_hotplug - SCSI part of hotplug 3082 - * @data: Pointer to ATA port to perform SCSI hotplug on 3082 + * @work: Pointer to ATA port to perform SCSI hotplug on 3083 3083 * 3084 3084 * Perform SCSI part of hotplug. It's executed from a separate 3085 3085 * workqueue after EH completes. This is necessary because SCSI ··· 3089 3089 * LOCKING: 3090 3090 * Kernel thread context (may sleep). 3091 3091 */ 3092 - void ata_scsi_hotplug(void *data) 3092 + void ata_scsi_hotplug(struct work_struct *work) 3093 3093 { 3094 - struct ata_port *ap = data; 3094 + struct ata_port *ap = 3095 + container_of(work, struct ata_port, hotplug_task.work); 3095 3096 int i; 3096 3097 3097 3098 if (ap->pflags & ATA_PFLAG_UNLOADING) { ··· 3191 3190 3192 3191 /** 3193 3192 * ata_scsi_dev_rescan - initiate scsi_rescan_device() 3194 - * @data: Pointer to ATA port to perform scsi_rescan_device() 3193 + * @work: Pointer to ATA port to perform scsi_rescan_device() 3195 3194 * 3196 3195 * After ATA pass thru (SAT) commands are executed successfully, 3197 3196 * libata need to propagate the changes to SCSI layer. This ··· 3201 3200 * LOCKING: 3202 3201 * Kernel thread context (may sleep). 3203 3202 */ 3204 - void ata_scsi_dev_rescan(void *data) 3203 + void ata_scsi_dev_rescan(struct work_struct *work) 3205 3204 { 3206 - struct ata_port *ap = data; 3205 + struct ata_port *ap = 3206 + container_of(work, struct ata_port, scsi_rescan_task); 3207 3207 struct ata_device *dev; 3208 3208 unsigned int i; 3209 3209
+2 -2
drivers/ata/libata.h
··· 81 81 82 82 extern void ata_scsi_scan_host(struct ata_port *ap); 83 83 extern int ata_scsi_offline_dev(struct ata_device *dev); 84 - extern void ata_scsi_hotplug(void *data); 84 + extern void ata_scsi_hotplug(struct work_struct *work); 85 85 extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 86 86 unsigned int buflen); 87 87 ··· 111 111 unsigned int (*actor) (struct ata_scsi_args *args, 112 112 u8 *rbuf, unsigned int buflen)); 113 113 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); 114 - extern void ata_scsi_dev_rescan(void *data); 114 + extern void ata_scsi_dev_rescan(struct work_struct *work); 115 115 extern int ata_bus_probe(struct ata_port *ap); 116 116 117 117 /* libata-eh.c */
+3 -3
drivers/block/floppy.c
··· 992 992 { 993 993 } 994 994 995 - static DECLARE_WORK(floppy_work, NULL, NULL); 995 + static DECLARE_WORK(floppy_work, NULL); 996 996 997 997 static void schedule_bh(void (*handler) (void)) 998 998 { 999 - PREPARE_WORK(&floppy_work, (work_func_t)handler, NULL); 999 + PREPARE_WORK(&floppy_work, (work_func_t)handler); 1000 1000 schedule_work(&floppy_work); 1001 1001 } 1002 1002 ··· 1008 1008 1009 1009 spin_lock_irqsave(&floppy_lock, flags); 1010 1010 do_floppy = NULL; 1011 - PREPARE_WORK(&floppy_work, (work_func_t)empty, NULL); 1011 + PREPARE_WORK(&floppy_work, (work_func_t)empty); 1012 1012 del_timer(&fd_timer); 1013 1013 spin_unlock_irqrestore(&floppy_lock, flags); 1014 1014 }
+3 -3
drivers/char/random.c
··· 1422 1422 1423 1423 static unsigned int ip_cnt; 1424 1424 1425 - static void rekey_seq_generator(void *private_); 1425 + static void rekey_seq_generator(struct work_struct *work); 1426 1426 1427 - static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator, NULL); 1427 + static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator); 1428 1428 1429 1429 /* 1430 1430 * Lock avoidance: ··· 1438 1438 * happen, and even if that happens only a not perfectly compliant 1439 1439 * ISN is generated, nothing fatal. 1440 1440 */ 1441 - static void rekey_seq_generator(void *private_) 1441 + static void rekey_seq_generator(struct work_struct *work) 1442 1442 { 1443 1443 struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; 1444 1444
+2 -2
drivers/char/sysrq.c
··· 219 219 .enable_mask = SYSRQ_ENABLE_SIGNAL, 220 220 }; 221 221 222 - static void moom_callback(void *ignored) 222 + static void moom_callback(struct work_struct *ignored) 223 223 { 224 224 out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], 225 225 GFP_KERNEL, 0); 226 226 } 227 227 228 - static DECLARE_WORK(moom_work, moom_callback, NULL); 228 + static DECLARE_WORK(moom_work, moom_callback); 229 229 230 230 static void sysrq_handle_moom(int key, struct tty_struct *tty) 231 231 {
+17 -14
drivers/char/tty_io.c
··· 1254 1254 1255 1255 /** 1256 1256 * do_tty_hangup - actual handler for hangup events 1257 - * @data: tty device 1257 + * @work: tty device 1258 1258 * 1259 1259 * This can be called by the "eventd" kernel thread. That is process 1260 1260 * synchronous but doesn't hold any locks, so we need to make sure we ··· 1274 1274 * tasklist_lock to walk task list for hangup event 1275 1275 * 1276 1276 */ 1277 - static void do_tty_hangup(void *data) 1277 + static void do_tty_hangup(struct work_struct *work) 1278 1278 { 1279 - struct tty_struct *tty = (struct tty_struct *) data; 1279 + struct tty_struct *tty = 1280 + container_of(work, struct tty_struct, hangup_work); 1280 1281 struct file * cons_filp = NULL; 1281 1282 struct file *filp, *f = NULL; 1282 1283 struct task_struct *p; ··· 1434 1433 1435 1434 printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf)); 1436 1435 #endif 1437 - do_tty_hangup((void *) tty); 1436 + do_tty_hangup(&tty->hangup_work); 1438 1437 } 1439 1438 EXPORT_SYMBOL(tty_vhangup); 1440 1439 ··· 3305 3304 * Nasty bug: do_SAK is being called in interrupt context. This can 3306 3305 * deadlock. We punt it up to process context. AKPM - 16Mar2001 3307 3306 */ 3308 - static void __do_SAK(void *arg) 3307 + static void __do_SAK(struct work_struct *work) 3309 3308 { 3309 + struct tty_struct *tty = 3310 + container_of(work, struct tty_struct, SAK_work); 3310 3311 #ifdef TTY_SOFT_SAK 3311 3312 tty_hangup(tty); 3312 3313 #else 3313 - struct tty_struct *tty = arg; 3314 3314 struct task_struct *g, *p; 3315 3315 int session; 3316 3316 int i; ··· 3390 3388 { 3391 3389 if (!tty) 3392 3390 return; 3393 - PREPARE_WORK(&tty->SAK_work, __do_SAK, tty); 3391 + PREPARE_WORK(&tty->SAK_work, __do_SAK); 3394 3392 schedule_work(&tty->SAK_work); 3395 3393 } 3396 3394 ··· 3398 3396 3399 3397 /** 3400 3398 * flush_to_ldisc 3401 - * @private_: tty structure passed from work queue. 3399 + * @work: tty structure passed from work queue. 3402 3400 * 3403 3401 * This routine is called out of the software interrupt to flush data 3404 3402 * from the buffer chain to the line discipline. ··· 3408 3406 * receive_buf method is single threaded for each tty instance. 3409 3407 */ 3410 3408 3411 - static void flush_to_ldisc(void *private_) 3409 + static void flush_to_ldisc(struct work_struct *work) 3412 3410 { 3413 - struct tty_struct *tty = (struct tty_struct *) private_; 3411 + struct tty_struct *tty = 3412 + container_of(work, struct tty_struct, buf.work.work); 3414 3413 unsigned long flags; 3415 3414 struct tty_ldisc *disc; 3416 3415 struct tty_buffer *tbuf, *head; ··· 3556 3553 spin_unlock_irqrestore(&tty->buf.lock, flags); 3557 3554 3558 3555 if (tty->low_latency) 3559 - flush_to_ldisc((void *) tty); 3556 + flush_to_ldisc(&tty->buf.work.work); 3560 3557 else 3561 3558 schedule_delayed_work(&tty->buf.work, 1); 3562 3559 } ··· 3583 3580 tty->overrun_time = jiffies; 3584 3581 tty->buf.head = tty->buf.tail = NULL; 3585 3582 tty_buffer_init(tty); 3586 - INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc, tty); 3583 + INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc); 3587 3584 init_MUTEX(&tty->buf.pty_sem); 3588 3585 mutex_init(&tty->termios_mutex); 3589 3586 init_waitqueue_head(&tty->write_wait); 3590 3587 init_waitqueue_head(&tty->read_wait); 3591 - INIT_WORK(&tty->hangup_work, do_tty_hangup, tty); 3588 + INIT_WORK(&tty->hangup_work, do_tty_hangup); 3592 3589 mutex_init(&tty->atomic_read_lock); 3593 3590 mutex_init(&tty->atomic_write_lock); 3594 3591 spin_lock_init(&tty->read_lock); 3595 3592 INIT_LIST_HEAD(&tty->tty_files); 3596 - INIT_WORK(&tty->SAK_work, NULL, NULL); 3593 + INIT_WORK(&tty->SAK_work, NULL); 3597 3594 } 3598 3595 3599 3596 /*
+3 -3
drivers/char/vt.c
··· 155 155 static void set_vesa_blanking(char __user *p); 156 156 static void set_cursor(struct vc_data *vc); 157 157 static void hide_cursor(struct vc_data *vc); 158 - static void console_callback(void *ignored); 158 + static void console_callback(struct work_struct *ignored); 159 159 static void blank_screen_t(unsigned long dummy); 160 160 static void set_palette(struct vc_data *vc); 161 161 ··· 174 174 static int blankinterval = 10*60*HZ; 175 175 static int vesa_off_interval; 176 176 177 - static DECLARE_WORK(console_work, console_callback, NULL); 177 + static DECLARE_WORK(console_work, console_callback); 178 178 179 179 /* 180 180 * fg_console is the current virtual console, ··· 2154 2154 * with other console code and prevention of re-entrancy is 2155 2155 * ensured with console_sem. 2156 2156 */ 2157 - static void console_callback(void *ignored) 2157 + static void console_callback(struct work_struct *ignored) 2158 2158 { 2159 2159 acquire_console_sem(); 2160 2160
+6 -4
drivers/cpufreq/cpufreq.c
··· 42 42 43 43 /* internal prototypes */ 44 44 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 45 - static void handle_update(void *data); 45 + static void handle_update(struct work_struct *work); 46 46 47 47 /** 48 48 * Two notifier lists: the "policy" list is involved in the ··· 665 665 mutex_init(&policy->lock); 666 666 mutex_lock(&policy->lock); 667 667 init_completion(&policy->kobj_unregister); 668 - INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); 668 + INIT_WORK(&policy->update, handle_update); 669 669 670 670 /* call driver. From then on the cpufreq must be able 671 671 * to accept all calls to ->verify and ->setpolicy for this CPU ··· 895 895 } 896 896 897 897 898 - static void handle_update(void *data) 898 + static void handle_update(struct work_struct *work) 899 899 { 900 - unsigned int cpu = (unsigned int)(long)data; 900 + struct cpufreq_policy *policy = 901 + container_of(work, struct cpufreq_policy, update); 902 + unsigned int cpu = policy->cpu; 901 903 dprintk("handle_update for cpu %u called\n", cpu); 902 904 cpufreq_update_policy(cpu); 903 905 }
+3 -3
drivers/input/keyboard/atkbd.c
··· 567 567 * interrupt context. 568 568 */ 569 569 570 - static void atkbd_event_work(void *data) 570 + static void atkbd_event_work(struct work_struct *work) 571 571 { 572 - struct atkbd *atkbd = data; 572 + struct atkbd *atkbd = container_of(work, struct atkbd, event_work); 573 573 574 574 mutex_lock(&atkbd->event_mutex); 575 575 ··· 943 943 944 944 atkbd->dev = dev; 945 945 ps2_init(&atkbd->ps2dev, serio); 946 - INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd); 946 + INIT_WORK(&atkbd->event_work, atkbd_event_work); 947 947 mutex_init(&atkbd->event_mutex); 948 948 949 949 switch (serio->id.type) {
+3 -3
drivers/input/serio/libps2.c
··· 251 251 * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.) 252 252 */ 253 253 254 - static void ps2_execute_scheduled_command(void *data) 254 + static void ps2_execute_scheduled_command(struct work_struct *work) 255 255 { 256 - struct ps2work *ps2work = data; 256 + struct ps2work *ps2work = container_of(work, struct ps2work, work); 257 257 258 258 ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command); 259 259 kfree(ps2work); ··· 278 278 ps2work->ps2dev = ps2dev; 279 279 ps2work->command = command; 280 280 memcpy(ps2work->param, param, send); 281 - INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work); 281 + INIT_WORK(&ps2work->work, ps2_execute_scheduled_command); 282 282 283 283 if (!schedule_work(&ps2work->work)) { 284 284 kfree(ps2work);
+5 -5
drivers/net/e1000/e1000_main.c
··· 183 183 static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 184 184 static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 185 185 static void e1000_tx_timeout(struct net_device *dev); 186 - static void e1000_reset_task(struct net_device *dev); 186 + static void e1000_reset_task(struct work_struct *work); 187 187 static void e1000_smartspeed(struct e1000_adapter *adapter); 188 188 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 189 189 struct sk_buff *skb); ··· 908 908 adapter->phy_info_timer.function = &e1000_update_phy_info; 909 909 adapter->phy_info_timer.data = (unsigned long) adapter; 910 910 911 - INIT_WORK(&adapter->reset_task, 912 - (void (*)(void *))e1000_reset_task, netdev); 911 + INIT_WORK(&adapter->reset_task, e1000_reset_task); 913 912 914 913 e1000_check_options(adapter); 915 914 ··· 3153 3154 } 3154 3155 3155 3156 static void 3156 - e1000_reset_task(struct net_device *netdev) 3157 + e1000_reset_task(struct work_struct *work) 3157 3158 { 3158 - struct e1000_adapter *adapter = netdev_priv(netdev); 3159 + struct e1000_adapter *adapter = 3160 + container_of(work, struct e1000_adapter, reset_task); 3159 3161 3160 3162 e1000_reinit_locked(adapter); 3161 3163 }
+1 -1
drivers/pci/pcie/aer/aerdrv.c
··· 160 160 rpc->e_lock = SPIN_LOCK_UNLOCKED; 161 161 162 162 rpc->rpd = dev; 163 - INIT_WORK(&rpc->dpc_handler, aer_isr, (void *)dev); 163 + INIT_WORK(&rpc->dpc_handler, aer_isr); 164 164 rpc->prod_idx = rpc->cons_idx = 0; 165 165 mutex_init(&rpc->rpc_mutex); 166 166 init_waitqueue_head(&rpc->wait_release);
+1 -1
drivers/pci/pcie/aer/aerdrv.h
··· 118 118 extern void aer_enable_rootport(struct aer_rpc *rpc); 119 119 extern void aer_delete_rootport(struct aer_rpc *rpc); 120 120 extern int aer_init(struct pcie_device *dev); 121 - extern void aer_isr(void *context); 121 + extern void aer_isr(struct work_struct *work); 122 122 extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); 123 123 extern int aer_osc_setup(struct pci_dev *dev); 124 124
+4 -4
drivers/pci/pcie/aer/aerdrv_core.c
··· 690 690 691 691 /** 692 692 * aer_isr - consume errors detected by root port 693 - * @context: pointer to a private data of pcie device 693 + * @work: definition of this work item 694 694 * 695 695 * Invoked, as DPC, when root port records new detected error 696 696 **/ 697 - void aer_isr(void *context) 697 + void aer_isr(struct work_struct *work) 698 698 { 699 - struct pcie_device *p_device = (struct pcie_device *) context; 700 - struct aer_rpc *rpc = get_service_data(p_device); 699 + struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); 700 + struct pcie_device *p_device = rpc->rpd; 701 701 struct aer_err_source *e_src; 702 702 703 703 mutex_lock(&rpc->rpc_mutex);
+4 -3
drivers/scsi/scsi_scan.c
··· 362 362 goto retry; 363 363 } 364 364 365 - static void scsi_target_reap_usercontext(void *data) 365 + static void scsi_target_reap_usercontext(struct work_struct *work) 366 366 { 367 - struct scsi_target *starget = data; 367 + struct scsi_target *starget = 368 + container_of(work, struct scsi_target, ew.work); 368 369 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 369 370 unsigned long flags; 370 371 ··· 401 400 starget->state = STARGET_DEL; 402 401 spin_unlock_irqrestore(shost->host_lock, flags); 403 402 execute_in_process_context(scsi_target_reap_usercontext, 404 - starget, &starget->ew); 403 + &starget->ew); 405 404 return; 406 405 407 406 }
+5 -5
drivers/scsi/scsi_sysfs.c
··· 218 218 put_device(&sdev->sdev_gendev); 219 219 } 220 220 221 - static void scsi_device_dev_release_usercontext(void *data) 221 + static void scsi_device_dev_release_usercontext(struct work_struct *work) 222 222 { 223 - struct device *dev = data; 224 223 struct scsi_device *sdev; 225 224 struct device *parent; 226 225 struct scsi_target *starget; 227 226 unsigned long flags; 228 227 229 - parent = dev->parent; 230 - sdev = to_scsi_device(dev); 228 + sdev = container_of(work, struct scsi_device, ew.work); 229 + 230 + parent = sdev->sdev_gendev.parent; 231 231 starget = to_scsi_target(parent); 232 232 233 233 spin_lock_irqsave(sdev->host->host_lock, flags); ··· 258 258 static void scsi_device_dev_release(struct device *dev) 259 259 { 260 260 struct scsi_device *sdp = to_scsi_device(dev); 261 - execute_in_process_context(scsi_device_dev_release_usercontext, dev, 261 + execute_in_process_context(scsi_device_dev_release_usercontext, 262 262 &sdp->ew); 263 263 } 264 264
+7 -7
fs/aio.c
··· 53 53 static struct workqueue_struct *aio_wq; 54 54 55 55 /* Used for rare fput completion. */ 56 - static void aio_fput_routine(void *); 57 - static DECLARE_WORK(fput_work, aio_fput_routine, NULL); 56 + static void aio_fput_routine(struct work_struct *); 57 + static DECLARE_WORK(fput_work, aio_fput_routine); 58 58 59 59 static DEFINE_SPINLOCK(fput_lock); 60 60 static LIST_HEAD(fput_head); 61 61 62 - static void aio_kick_handler(void *); 62 + static void aio_kick_handler(struct work_struct *); 63 63 static void aio_queue_work(struct kioctx *); 64 64 65 65 /* aio_setup ··· 227 227 228 228 INIT_LIST_HEAD(&ctx->active_reqs); 229 229 INIT_LIST_HEAD(&ctx->run_list); 230 - INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler, ctx); 230 + INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler); 231 231 232 232 if (aio_setup_ring(ctx) < 0) 233 233 goto out_freectx; ··· 470 470 wake_up(&ctx->wait); 471 471 } 472 472 473 - static void aio_fput_routine(void *data) 473 + static void aio_fput_routine(struct work_struct *data) 474 474 { 475 475 spin_lock_irq(&fput_lock); 476 476 while (likely(!list_empty(&fput_head))) { ··· 859 859 * space. 860 860 * Run on aiod's context. 861 861 */ 862 - static void aio_kick_handler(void *data) 862 + static void aio_kick_handler(struct work_struct *work) 863 863 { 864 - struct kioctx *ctx = data; 864 + struct kioctx *ctx = container_of(work, struct kioctx, wq.work); 865 865 mm_segment_t oldfs = get_fs(); 866 866 int requeue; 867 867
+3 -3
fs/bio.c
··· 955 955 * run one bio_put() against the BIO. 956 956 */ 957 957 958 - static void bio_dirty_fn(void *data); 958 + static void bio_dirty_fn(struct work_struct *work); 959 959 960 - static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); 960 + static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 961 961 static DEFINE_SPINLOCK(bio_dirty_lock); 962 962 static struct bio *bio_dirty_list; 963 963 964 964 /* 965 965 * This runs in process context 966 966 */ 967 - static void bio_dirty_fn(void *data) 967 + static void bio_dirty_fn(struct work_struct *work) 968 968 { 969 969 unsigned long flags; 970 970 struct bio *bio;
+4 -2
fs/file.c
··· 91 91 spin_unlock(&fddef->lock); 92 92 } 93 93 94 - static void free_fdtable_work(struct fdtable_defer *f) 94 + static void free_fdtable_work(struct work_struct *work) 95 95 { 96 + struct fdtable_defer *f = 97 + container_of(work, struct fdtable_defer, wq); 96 98 struct fdtable *fdt; 97 99 98 100 spin_lock_bh(&f->lock); ··· 353 351 { 354 352 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); 355 353 spin_lock_init(&fddef->lock); 356 - INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef); 354 + INIT_WORK(&fddef->wq, free_fdtable_work); 357 355 init_timer(&fddef->timer); 358 356 fddef->timer.data = (unsigned long)fddef; 359 357 fddef->timer.function = fdtable_timer;
+1 -1
fs/nfs/client.c
··· 143 143 INIT_LIST_HEAD(&clp->cl_state_owners); 144 144 INIT_LIST_HEAD(&clp->cl_unused); 145 145 spin_lock_init(&clp->cl_lock); 146 - INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state, clp); 146 + INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); 147 147 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); 148 148 clp->cl_boot_time = CURRENT_TIME; 149 149 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
+4 -5
fs/nfs/namespace.c
··· 18 18 19 19 #define NFSDBG_FACILITY NFSDBG_VFS 20 20 21 - static void nfs_expire_automounts(void *list); 21 + static void nfs_expire_automounts(struct work_struct *work); 22 22 23 23 LIST_HEAD(nfs_automount_list); 24 - static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts, 25 - &nfs_automount_list); 24 + static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts); 26 25 int nfs_mountpoint_expiry_timeout = 500 * HZ; 27 26 28 27 static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, ··· 164 165 .follow_link = nfs_follow_mountpoint, 165 166 }; 166 167 167 - static void nfs_expire_automounts(void *data) 168 + static void nfs_expire_automounts(struct work_struct *work) 168 169 { 169 - struct list_head *list = (struct list_head *)data; 170 + struct list_head *list = &nfs_automount_list; 170 171 171 172 mark_mounts_for_expiry(list); 172 173 if (!list_empty(list))
+1 -1
fs/nfs/nfs4_fs.h
··· 185 185 extern void nfs4_schedule_state_renewal(struct nfs_client *); 186 186 extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); 187 187 extern void nfs4_kill_renewd(struct nfs_client *); 188 - extern void nfs4_renew_state(void *); 188 + extern void nfs4_renew_state(struct work_struct *); 189 189 190 190 /* nfs4state.c */ 191 191 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
+3 -2
fs/nfs/nfs4renewd.c
··· 59 59 #define NFSDBG_FACILITY NFSDBG_PROC 60 60 61 61 void 62 - nfs4_renew_state(void *data) 62 + nfs4_renew_state(struct work_struct *work) 63 63 { 64 - struct nfs_client *clp = (struct nfs_client *)data; 64 + struct nfs_client *clp = 65 + container_of(work, struct nfs_client, cl_renewd.work); 65 66 struct rpc_cred *cred; 66 67 long lease, timeout; 67 68 unsigned long last, now;
+2 -1
include/linux/libata.h
··· 568 568 struct ata_host *host; 569 569 struct device *dev; 570 570 571 + void *port_task_data; 571 572 struct delayed_work port_task; 572 573 struct delayed_work hotplug_task; 573 574 struct work_struct scsi_rescan_task; ··· 748 747 extern unsigned int ata_busy_sleep(struct ata_port *ap, 749 748 unsigned long timeout_pat, 750 749 unsigned long timeout); 751 - extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), 750 + extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn, 752 751 void *data, unsigned long delay); 753 752 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 754 753 unsigned long interval_msec,
+75 -24
include/linux/workqueue.h
··· 11 11 12 12 struct workqueue_struct; 13 13 14 - typedef void (*work_func_t)(void *data); 14 + struct work_struct; 15 + typedef void (*work_func_t)(struct work_struct *work); 15 16 16 17 struct work_struct { 17 - /* the first word is the work queue pointer and the pending flag 18 - * rolled into one */ 18 + /* the first word is the work queue pointer and the flags rolled into 19 + * one */ 19 20 unsigned long management; 20 21 #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ 22 + #define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */ 21 23 #define WORK_STRUCT_FLAG_MASK (3UL) 22 24 #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) 23 25 struct list_head entry; 24 26 work_func_t func; 25 - void *data; 26 27 }; 27 28 28 29 struct delayed_work { ··· 35 34 struct work_struct work; 36 35 }; 37 36 38 - #define __WORK_INITIALIZER(n, f, d) { \ 37 + #define __WORK_INITIALIZER(n, f) { \ 38 + .management = 0, \ 39 39 .entry = { &(n).entry, &(n).entry }, \ 40 40 .func = (f), \ 41 - .data = (d), \ 42 41 } 43 42 44 - #define __DELAYED_WORK_INITIALIZER(n, f, d) { \ 45 - .work = __WORK_INITIALIZER((n).work, (f), (d)), \ 43 + #define __WORK_INITIALIZER_NAR(n, f) { \ 44 + .management = (1 << WORK_STRUCT_NOAUTOREL), \ 45 + .entry = { &(n).entry, &(n).entry }, \ 46 + .func = (f), \ 47 + } 48 + 49 + #define __DELAYED_WORK_INITIALIZER(n, f) { \ 50 + .work = __WORK_INITIALIZER((n).work, (f)), \ 46 51 .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 47 52 } 48 53 49 - #define DECLARE_WORK(n, f, d) \ 50 - struct work_struct n = __WORK_INITIALIZER(n, f, d) 54 + #define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \ 55 + .work = __WORK_INITIALIZER_NAR((n).work, (f)), \ 56 + .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 57 + } 51 58 52 - #define DECLARE_DELAYED_WORK(n, f, d) \ 53 - struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, d) 59 + #define DECLARE_WORK(n, f) \ 60 + struct work_struct n = __WORK_INITIALIZER(n, f) 61 + 62 + #define DECLARE_WORK_NAR(n, f) \ 63 + struct work_struct n = __WORK_INITIALIZER_NAR(n, f) 64 + 65 + #define DECLARE_DELAYED_WORK(n, f) \ 66 + struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) 67 + 68 + #define DECLARE_DELAYED_WORK_NAR(n, f) \ 69 + struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f) 54 70 55 71 /* 56 - * initialize a work item's function and data pointers 72 + * initialize a work item's function pointer 57 73 */ 58 - #define PREPARE_WORK(_work, _func, _data) \ 74 + #define PREPARE_WORK(_work, _func) \ 59 75 do { \ 60 76 (_work)->func = (_func); \ 61 - (_work)->data = (_data); \ 62 77 } while (0) 63 78 64 - #define PREPARE_DELAYED_WORK(_work, _func, _data) \ 65 - PREPARE_WORK(&(_work)->work, (_func), (_data)) 79 + #define PREPARE_DELAYED_WORK(_work, _func) \ 80 + PREPARE_WORK(&(_work)->work, (_func)) 66 81 67 82 /* 68 83 * initialize all of a work item in one go 69 84 */ 70 - #define INIT_WORK(_work, _func, _data) \ 85 + #define INIT_WORK(_work, _func) \ 71 86 do { \ 72 - INIT_LIST_HEAD(&(_work)->entry); \ 73 87 (_work)->management = 0; \ 74 - PREPARE_WORK((_work), (_func), (_data)); \ 88 + INIT_LIST_HEAD(&(_work)->entry); \ 89 + PREPARE_WORK((_work), (_func)); \ 75 90 } while (0) 76 91 77 - #define INIT_DELAYED_WORK(_work, _func, _data) \ 92 + #define INIT_WORK_NAR(_work, _func) \ 93 + do { \ 94 + (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \ 95 + INIT_LIST_HEAD(&(_work)->entry); \ 96 + PREPARE_WORK((_work), (_func)); \ 97 + } while (0) 98 + 99 + #define INIT_DELAYED_WORK(_work, _func) \ 78 100 do { \ 79 - INIT_WORK(&(_work)->work, (_func), (_data)); \ 101 + INIT_WORK(&(_work)->work, (_func)); \ 102 + init_timer(&(_work)->timer); \ 103 + } while (0) 104 + 105 + #define INIT_DELAYED_WORK_NAR(_work, _func) \ 106 + do { \ 107 + INIT_WORK_NAR(&(_work)->work, (_func)); \ 80 108 init_timer(&(_work)->timer); \ 81 109 } while (0) 82 110 ··· 123 93 */ 124 94 #define delayed_work_pending(work) \ 125 95 test_bit(WORK_STRUCT_PENDING, &(work)->work.management) 96 + 97 + /** 98 + * work_release - Release a work item under execution 99 + * @work: The work item to release 100 + * 101 + * This is used to release a work item that has been initialised with automatic 102 + * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work 103 + * function the opportunity to grab auxiliary data from the container of the 104 + * work_struct before clearing the pending bit as the work_struct may be 105 + * subject to deallocation the moment the pending bit is cleared. 106 + * 107 + * In such a case, this should be called in the work function after it has 108 + * fetched any data it may require from the containter of the work_struct. 109 + * After this function has been called, the work_struct may be scheduled for 110 + * further execution or it may be deallocated unless other precautions are 111 + * taken. 112 + * 113 + * This should also be used to release a delayed work item. 114 + */ 115 + #define work_release(work) \ 116 + clear_bit(WORK_STRUCT_PENDING, &(work)->management) 126 117 127 118 128 119 extern struct workqueue_struct *__create_workqueue(const char *name, ··· 163 112 extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); 164 113 165 114 extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); 166 - extern int schedule_on_each_cpu(work_func_t func, void *info); 115 + extern int schedule_on_each_cpu(work_func_t func); 167 116 extern void flush_scheduled_work(void); 168 117 extern int current_is_keventd(void); 169 118 extern int keventd_up(void); ··· 172 121 void cancel_rearming_delayed_work(struct delayed_work *work); 173 122 void cancel_rearming_delayed_workqueue(struct workqueue_struct *, 174 123 struct delayed_work *); 175 - int execute_in_process_context(work_func_t fn, void *, struct execute_work *); 124 + int execute_in_process_context(work_func_t fn, struct execute_work *); 176 125 177 126 /* 178 127 * Kill off a pending schedule_delayed_work(). Note that the work callback
+1 -1
include/net/inet_timewait_sock.h
··· 84 84 }; 85 85 86 86 extern void inet_twdr_hangman(unsigned long data); 87 - extern void inet_twdr_twkill_work(void *data); 87 + extern void inet_twdr_twkill_work(struct work_struct *work); 88 88 extern void inet_twdr_twcal_tick(unsigned long data); 89 89 90 90 #if (BITS_PER_LONG == 64)
+6 -1
ipc/util.c
··· 514 514 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; 515 515 } 516 516 517 + static void ipc_do_vfree(struct work_struct *work) 518 + { 519 + vfree(container_of(work, struct ipc_rcu_sched, work)); 520 + } 521 + 517 522 /** 518 523 * ipc_schedule_free - free ipc + rcu space 519 524 * @head: RCU callback structure for queued work ··· 533 528 struct ipc_rcu_sched *sched = 534 529 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); 535 530 536 - INIT_WORK(&sched->work, vfree, sched); 531 + INIT_WORK(&sched->work, ipc_do_vfree); 537 532 schedule_work(&sched->work); 538 533 } 539 534
+10 -6
kernel/kmod.c
··· 114 114 #endif /* CONFIG_KMOD */ 115 115 116 116 struct subprocess_info { 117 + struct work_struct work; 117 118 struct completion *complete; 118 119 char *path; 119 120 char **argv; ··· 222 221 } 223 222 224 223 /* This is run by khelper thread */ 225 - static void __call_usermodehelper(void *data) 224 + static void __call_usermodehelper(struct work_struct *work) 226 225 { 227 - struct subprocess_info *sub_info = data; 226 + struct subprocess_info *sub_info = 227 + container_of(work, struct subprocess_info, work); 228 228 pid_t pid; 229 229 int wait = sub_info->wait; 230 230 ··· 266 264 { 267 265 DECLARE_COMPLETION_ONSTACK(done); 268 266 struct subprocess_info sub_info = { 267 + .work = __WORK_INITIALIZER(sub_info.work, 268 + __call_usermodehelper), 269 269 .complete = &done, 270 270 .path = path, 271 271 .argv = argv, ··· 276 272 .wait = wait, 277 273 .retval = 0, 278 274 }; 279 - DECLARE_WORK(work, __call_usermodehelper, &sub_info); 280 275 281 276 if (!khelper_wq) 282 277 return -EBUSY; ··· 283 280 if (path[0] == '\0') 284 281 return 0; 285 282 286 - queue_work(khelper_wq, &work); 283 + queue_work(khelper_wq, &sub_info.work); 287 284 wait_for_completion(&done); 288 285 return sub_info.retval; 289 286 } ··· 294 291 { 295 292 DECLARE_COMPLETION(done); 296 293 struct subprocess_info sub_info = { 294 + .work = __WORK_INITIALIZER(sub_info.work, 295 + __call_usermodehelper), 297 296 .complete = &done, 298 297 .path = path, 299 298 .argv = argv, ··· 303 298 .retval = 0, 304 299 }; 305 300 struct file *f; 306 - DECLARE_WORK(work, __call_usermodehelper, &sub_info); 307 301 308 302 if (!khelper_wq) 309 303 return -EBUSY; ··· 322 318 } 323 319 sub_info.stdin = f; 324 320 325 - queue_work(khelper_wq, &work); 321 + queue_work(khelper_wq, &sub_info.work); 326 322 wait_for_completion(&done); 327 323 return sub_info.retval; 328 324 }
+8 -5
kernel/kthread.c
··· 31 31 /* Result passed back to kthread_create() from keventd. */ 32 32 struct task_struct *result; 33 33 struct completion done; 34 + 35 + struct work_struct work; 34 36 }; 35 37 36 38 struct kthread_stop_info ··· 113 111 } 114 112 115 113 /* We are keventd: create a thread. */ 116 - static void keventd_create_kthread(void *_create) 114 + static void keventd_create_kthread(struct work_struct *work) 117 115 { 118 - struct kthread_create_info *create = _create; 116 + struct kthread_create_info *create = 117 + container_of(work, struct kthread_create_info, work); 119 118 int pid; 120 119 121 120 /* We want our own signal handler (we take no signals by default). */ ··· 157 154 ...) 158 155 { 159 156 struct kthread_create_info create; 160 - DECLARE_WORK(work, keventd_create_kthread, &create); 161 157 162 158 create.threadfn = threadfn; 163 159 create.data = data; 164 160 init_completion(&create.started); 165 161 init_completion(&create.done); 162 + INIT_WORK(&create.work, keventd_create_kthread); 166 163 167 164 /* 168 165 * The workqueue needs to start up first: 169 166 */ 170 167 if (!helper_wq) 171 - work.func(work.data); 168 + create.work.func(&create.work); 172 169 else { 173 - queue_work(helper_wq, &work); 170 + queue_work(helper_wq, &create.work); 174 171 wait_for_completion(&create.done); 175 172 } 176 173 if (!IS_ERR(create.result)) {
+2 -2
kernel/power/poweroff.c
··· 16 16 * callback we use. 17 17 */ 18 18 19 - static void do_poweroff(void *dummy) 19 + static void do_poweroff(struct work_struct *dummy) 20 20 { 21 21 kernel_power_off(); 22 22 } 23 23 24 - static DECLARE_WORK(poweroff_work, do_poweroff, NULL); 24 + static DECLARE_WORK(poweroff_work, do_poweroff); 25 25 26 26 static void handle_poweroff(int key, struct tty_struct *tty) 27 27 {
+2 -2
kernel/sys.c
··· 880 880 return 0; 881 881 } 882 882 883 - static void deferred_cad(void *dummy) 883 + static void deferred_cad(struct work_struct *dummy) 884 884 { 885 885 kernel_restart(NULL); 886 886 } ··· 892 892 */ 893 893 void ctrl_alt_del(void) 894 894 { 895 - static DECLARE_WORK(cad_work, deferred_cad, NULL); 895 + static DECLARE_WORK(cad_work, deferred_cad); 896 896 897 897 if (C_A_D) 898 898 schedule_work(&cad_work);
+8 -11
kernel/workqueue.c
··· 241 241 struct work_struct *work = list_entry(cwq->worklist.next, 242 242 struct work_struct, entry); 243 243 work_func_t f = work->func; 244 - void *data = work->data; 245 244 246 245 list_del_init(cwq->worklist.next); 247 246 spin_unlock_irqrestore(&cwq->lock, flags); 248 247 249 248 BUG_ON(get_wq_data(work) != cwq); 250 - clear_bit(WORK_STRUCT_PENDING, &work->management); 251 - f(data); 249 + if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) 250 + work_release(work); 251 + f(work); 252 252 253 253 spin_lock_irqsave(&cwq->lock, flags); 254 254 cwq->remove_sequence++; ··· 527 527 /** 528 528 * schedule_on_each_cpu - call a function on each online CPU from keventd 529 529 * @func: the function to call 530 - * @info: a pointer to pass to func() 531 530 * 532 531 * Returns zero on success. 533 532 * Returns -ve errno on failure. ··· 535 536 * 536 537 * schedule_on_each_cpu() is very slow. 537 538 */ 538 - int schedule_on_each_cpu(work_func_t func, void *info) 539 + int schedule_on_each_cpu(work_func_t func) 539 540 { 540 541 int cpu; 541 542 struct work_struct *works; ··· 546 547 547 548 mutex_lock(&workqueue_mutex); 548 549 for_each_online_cpu(cpu) { 549 - INIT_WORK(per_cpu_ptr(works, cpu), func, info); 550 + INIT_WORK(per_cpu_ptr(works, cpu), func); 550 551 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 551 552 per_cpu_ptr(works, cpu)); 552 553 } ··· 590 591 /** 591 592 * execute_in_process_context - reliably execute the routine with user context 592 593 * @fn: the function to execute 593 - * @data: data to pass to the function 594 594 * @ew: guaranteed storage for the execute work structure (must 595 595 * be available when the work executes) 596 596 * ··· 599 601 * Returns: 0 - function was executed 600 602 * 1 - function was scheduled for execution 601 603 */ 602 - int execute_in_process_context(work_func_t fn, void *data, 603 - struct execute_work *ew) 604 + int execute_in_process_context(work_func_t fn, struct execute_work *ew) 604 605 { 605 606 if (!in_interrupt()) { 606 - fn(data); 607 + fn(&ew->work); 607 608 return 0; 608 609 } 609 610 610 - INIT_WORK(&ew->work, fn, data); 611 + INIT_WORK(&ew->work, fn); 611 612 schedule_work(&ew->work); 612 613 613 614 return 1;
+3 -3
mm/slab.c
··· 313 313 static void free_block(struct kmem_cache *cachep, void **objpp, int len, 314 314 int node); 315 315 static int enable_cpucache(struct kmem_cache *cachep); 316 - static void cache_reap(void *unused); 316 + static void cache_reap(struct work_struct *unused); 317 317 318 318 /* 319 319 * This function must be completely optimized away if a constant is passed to ··· 925 925 */ 926 926 if (keventd_up() && reap_work->work.func == NULL) { 927 927 init_reap_node(cpu); 928 - INIT_DELAYED_WORK(reap_work, cache_reap, NULL); 928 + INIT_DELAYED_WORK(reap_work, cache_reap); 929 929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 930 930 } 931 931 } ··· 3815 3815 * If we cannot acquire the cache chain mutex then just give up - we'll try 3816 3816 * again on the next iteration. 3817 3817 */ 3818 - static void cache_reap(void *unused) 3818 + static void cache_reap(struct work_struct *unused) 3819 3819 { 3820 3820 struct kmem_cache *searchp; 3821 3821 struct kmem_list3 *l3;
+3 -2
net/ipv4/inet_timewait_sock.c
··· 197 197 198 198 extern void twkill_slots_invalid(void); 199 199 200 - void inet_twdr_twkill_work(void *data) 200 + void inet_twdr_twkill_work(struct work_struct *work) 201 201 { 202 - struct inet_timewait_death_row *twdr = data; 202 + struct inet_timewait_death_row *twdr = 203 + container_of(work, struct inet_timewait_death_row, twkill_work); 203 204 int i; 204 205 205 206 if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
+1 -2
net/ipv4/tcp_minisocks.c
··· 45 45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, 46 46 (unsigned long)&tcp_death_row), 47 47 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, 48 - inet_twdr_twkill_work, 49 - &tcp_death_row), 48 + inet_twdr_twkill_work), 50 49 /* Short-time timewait calendar */ 51 50 52 51 .twcal_hand = -1,
+3 -3
net/sunrpc/cache.c
··· 284 284 static struct file_operations content_file_operations; 285 285 static struct file_operations cache_flush_operations; 286 286 287 - static void do_cache_clean(void *data); 288 - static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean, NULL); 287 + static void do_cache_clean(struct work_struct *work); 288 + static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); 289 289 290 290 void cache_register(struct cache_detail *cd) 291 291 { ··· 461 461 /* 462 462 * We want to regularly clean the cache, so we need to schedule some work ... 463 463 */ 464 - static void do_cache_clean(void *data) 464 + static void do_cache_clean(struct work_struct *work) 465 465 { 466 466 int delay = 5; 467 467 if (cache_clean() == -1)
+4 -3
net/sunrpc/rpc_pipe.c
··· 54 54 } 55 55 56 56 static void 57 - rpc_timeout_upcall_queue(void *data) 57 + rpc_timeout_upcall_queue(struct work_struct *work) 58 58 { 59 59 LIST_HEAD(free_list); 60 - struct rpc_inode *rpci = (struct rpc_inode *)data; 60 + struct rpc_inode *rpci = 61 + container_of(work, struct rpc_inode, queue_timeout.work); 61 62 struct inode *inode = &rpci->vfs_inode; 62 63 void (*destroy_msg)(struct rpc_pipe_msg *); 63 64 ··· 839 838 rpci->pipelen = 0; 840 839 init_waitqueue_head(&rpci->waitq); 841 840 INIT_DELAYED_WORK(&rpci->queue_timeout, 842 - rpc_timeout_upcall_queue, rpci); 841 + rpc_timeout_upcall_queue); 843 842 rpci->ops = NULL; 844 843 } 845 844 }
+4 -4
net/sunrpc/sched.c
··· 41 41 42 42 static void __rpc_default_timer(struct rpc_task *task); 43 43 static void rpciod_killall(void); 44 - static void rpc_async_schedule(void *); 44 + static void rpc_async_schedule(struct work_struct *); 45 45 46 46 /* 47 47 * RPC tasks sit here while waiting for conditions to improve. ··· 305 305 if (RPC_IS_ASYNC(task)) { 306 306 int status; 307 307 308 - INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); 308 + INIT_WORK(&task->u.tk_work, rpc_async_schedule); 309 309 status = queue_work(task->tk_workqueue, &task->u.tk_work); 310 310 if (status < 0) { 311 311 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); ··· 695 695 return __rpc_execute(task); 696 696 } 697 697 698 - static void rpc_async_schedule(void *arg) 698 + static void rpc_async_schedule(struct work_struct *work) 699 699 { 700 - __rpc_execute((struct rpc_task *)arg); 700 + __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 701 701 } 702 702 703 703 /**
+4 -3
net/sunrpc/xprt.c
··· 479 479 return status; 480 480 } 481 481 482 - static void xprt_autoclose(void *args) 482 + static void xprt_autoclose(struct work_struct *work) 483 483 { 484 - struct rpc_xprt *xprt = (struct rpc_xprt *)args; 484 + struct rpc_xprt *xprt = 485 + container_of(work, struct rpc_xprt, task_cleanup); 485 486 486 487 xprt_disconnect(xprt); 487 488 xprt->ops->close(xprt); ··· 933 932 934 933 INIT_LIST_HEAD(&xprt->free); 935 934 INIT_LIST_HEAD(&xprt->recv); 936 - INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); 935 + INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 937 936 init_timer(&xprt->timer); 938 937 xprt->timer.function = xprt_init_autodisconnect; 939 938 xprt->timer.data = (unsigned long) xprt;
+10 -8
net/sunrpc/xprtsock.c
··· 1060 1060 1061 1061 /** 1062 1062 * xs_udp_connect_worker - set up a UDP socket 1063 - * @args: RPC transport to connect 1063 + * @work: RPC transport to connect 1064 1064 * 1065 1065 * Invoked by a work queue tasklet. 1066 1066 */ 1067 - static void xs_udp_connect_worker(void *args) 1067 + static void xs_udp_connect_worker(struct work_struct *work) 1068 1068 { 1069 - struct rpc_xprt *xprt = (struct rpc_xprt *) args; 1069 + struct rpc_xprt *xprt = 1070 + container_of(work, struct rpc_xprt, connect_worker.work); 1070 1071 struct socket *sock = xprt->sock; 1071 1072 int err, status = -EIO; 1072 1073 ··· 1145 1144 1146 1145 /** 1147 1146 * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint 1148 - * @args: RPC transport to connect 1147 + * @work: RPC transport to connect 1149 1148 * 1150 1149 * Invoked by a work queue tasklet. 1151 1150 */ 1152 - static void xs_tcp_connect_worker(void *args) 1151 + static void xs_tcp_connect_worker(struct work_struct *work) 1153 1152 { 1154 - struct rpc_xprt *xprt = (struct rpc_xprt *)args; 1153 + struct rpc_xprt *xprt = 1154 + container_of(work, struct rpc_xprt, connect_worker.work); 1155 1155 struct socket *sock = xprt->sock; 1156 1156 int err, status = -EIO; 1157 1157 ··· 1377 1375 /* XXX: header size can vary due to auth type, IPv6, etc. */ 1378 1376 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 1379 1377 1380 - INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); 1378 + INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker); 1381 1379 xprt->bind_timeout = XS_BIND_TO; 1382 1380 xprt->connect_timeout = XS_UDP_CONN_TO; 1383 1381 xprt->reestablish_timeout = XS_UDP_REEST_TO; ··· 1422 1420 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 1423 1421 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 1424 1422 1425 - INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); 1423 + INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker); 1426 1424 xprt->bind_timeout = XS_BIND_TO; 1427 1425 xprt->connect_timeout = XS_TCP_CONN_TO; 1428 1426 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+3 -3
security/keys/key.c
··· 30 30 static LIST_HEAD(key_types_list); 31 31 static DECLARE_RWSEM(key_types_sem); 32 32 33 - static void key_cleanup(void *data); 34 - static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL); 33 + static void key_cleanup(struct work_struct *work); 34 + static DECLARE_WORK(key_cleanup_task, key_cleanup); 35 35 36 36 /* we serialise key instantiation and link */ 37 37 DECLARE_RWSEM(key_construction_sem); ··· 552 552 * do cleaning up in process context so that we don't have to disable 553 553 * interrupts all over the place 554 554 */ 555 - static void key_cleanup(void *data) 555 + static void key_cleanup(struct work_struct *work) 556 556 { 557 557 struct rb_node *_n; 558 558 struct key *key;