Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

WorkStruct: make allyesconfig

Fix up for make allyesconfig.

Signed-Off-By: David Howells <dhowells@redhat.com>

+1775 -1454
+3 -3
arch/i386/kernel/cpu/mcheck/non-fatal.c
··· 51 51 } 52 52 } 53 53 54 - static void mce_work_fn(void *data); 55 - static DECLARE_WORK(mce_work, mce_work_fn, NULL); 54 + static void mce_work_fn(struct work_struct *work); 55 + static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); 56 56 57 - static void mce_work_fn(void *data) 57 + static void mce_work_fn(struct work_struct *work) 58 58 { 59 59 on_each_cpu(mce_checkregs, NULL, 1, 1); 60 60 schedule_delayed_work(&mce_work, MCE_RATE);
+6 -5
arch/i386/kernel/smpboot.c
··· 1049 1049 1050 1050 struct warm_boot_cpu_info { 1051 1051 struct completion *complete; 1052 + struct work_struct task; 1052 1053 int apicid; 1053 1054 int cpu; 1054 1055 }; 1055 1056 1056 - static void __cpuinit do_warm_boot_cpu(void *p) 1057 + static void __cpuinit do_warm_boot_cpu(struct work_struct *work) 1057 1058 { 1058 - struct warm_boot_cpu_info *info = p; 1059 + struct warm_boot_cpu_info *info = 1060 + container_of(work, struct warm_boot_cpu_info, task); 1059 1061 do_boot_cpu(info->apicid, info->cpu); 1060 1062 complete(info->complete); 1061 1063 } ··· 1066 1064 { 1067 1065 DECLARE_COMPLETION_ONSTACK(done); 1068 1066 struct warm_boot_cpu_info info; 1069 - struct work_struct task; 1070 1067 int apicid, ret; 1071 1068 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); 1072 1069 ··· 1090 1089 info.complete = &done; 1091 1090 info.apicid = apicid; 1092 1091 info.cpu = cpu; 1093 - INIT_WORK(&task, do_warm_boot_cpu, &info); 1092 + INIT_WORK(&info.task, do_warm_boot_cpu); 1094 1093 1095 1094 tsc_sync_disabled = 1; 1096 1095 ··· 1098 1097 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, 1099 1098 KERNEL_PGD_PTRS); 1100 1099 flush_tlb_all(); 1101 - schedule_work(&task); 1100 + schedule_work(&info.task); 1102 1101 wait_for_completion(&done); 1103 1102 1104 1103 tsc_sync_disabled = 0;
+2 -2
arch/i386/kernel/tsc.c
··· 217 217 static unsigned int cpufreq_init = 0; 218 218 static struct work_struct cpufreq_delayed_get_work; 219 219 220 - static void handle_cpufreq_delayed_get(void *v) 220 + static void handle_cpufreq_delayed_get(struct work_struct *work) 221 221 { 222 222 unsigned int cpu; 223 223 ··· 306 306 { 307 307 int ret; 308 308 309 - INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); 309 + INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); 310 310 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, 311 311 CPUFREQ_TRANSITION_NOTIFIER); 312 312 if (!ret)
+3 -3
arch/powerpc/platforms/pseries/eeh_event.c
··· 37 37 /* EEH event workqueue setup. */ 38 38 static DEFINE_SPINLOCK(eeh_eventlist_lock); 39 39 LIST_HEAD(eeh_eventlist); 40 - static void eeh_thread_launcher(void *); 41 - DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); 40 + static void eeh_thread_launcher(struct work_struct *); 41 + DECLARE_WORK(eeh_event_wq, eeh_thread_launcher); 42 42 43 43 /* Serialize reset sequences for a given pci device */ 44 44 DEFINE_MUTEX(eeh_event_mutex); ··· 103 103 * eeh_thread_launcher 104 104 * @dummy - unused 105 105 */ 106 - static void eeh_thread_launcher(void *dummy) 106 + static void eeh_thread_launcher(struct work_struct *dummy) 107 107 { 108 108 if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) 109 109 printk(KERN_ERR "Failed to start EEH daemon\n");
+5 -4
drivers/atm/idt77252.c
··· 135 135 int flags); 136 136 static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos, 137 137 char *page); 138 - static void idt77252_softint(void *dev_id); 138 + static void idt77252_softint(struct work_struct *work); 139 139 140 140 141 141 static struct atmdev_ops idt77252_ops = ··· 2866 2866 } 2867 2867 2868 2868 static void 2869 - idt77252_softint(void *dev_id) 2869 + idt77252_softint(struct work_struct *work) 2870 2870 { 2871 - struct idt77252_dev *card = dev_id; 2871 + struct idt77252_dev *card = 2872 + container_of(work, struct idt77252_dev, tqueue); 2872 2873 u32 stat; 2873 2874 int done; 2874 2875 ··· 3698 3697 card->pcidev = pcidev; 3699 3698 sprintf(card->name, "idt77252-%d", card->index); 3700 3699 3701 - INIT_WORK(&card->tqueue, idt77252_softint, (void *)card); 3700 + INIT_WORK(&card->tqueue, idt77252_softint); 3702 3701 3703 3702 membase = pci_resource_start(pcidev, 1); 3704 3703 srambase = pci_resource_start(pcidev, 2);
+1 -1
drivers/block/aoe/aoe.h
··· 159 159 void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor); 160 160 void aoecmd_ata_rsp(struct sk_buff *); 161 161 void aoecmd_cfg_rsp(struct sk_buff *); 162 - void aoecmd_sleepwork(void *vp); 162 + void aoecmd_sleepwork(struct work_struct *); 163 163 struct sk_buff *new_skb(ulong); 164 164 165 165 int aoedev_init(void);
+2 -2
drivers/block/aoe/aoecmd.c
··· 408 408 /* this function performs work that has been deferred until sleeping is OK 409 409 */ 410 410 void 411 - aoecmd_sleepwork(void *vp) 411 + aoecmd_sleepwork(struct work_struct *work) 412 412 { 413 - struct aoedev *d = (struct aoedev *) vp; 413 + struct aoedev *d = container_of(work, struct aoedev, work); 414 414 415 415 if (d->flags & DEVFL_GDALLOC) 416 416 aoeblk_gdalloc(d);
+1 -1
drivers/block/aoe/aoedev.c
··· 88 88 kfree(d); 89 89 return NULL; 90 90 } 91 - INIT_WORK(&d->work, aoecmd_sleepwork, d); 91 + INIT_WORK(&d->work, aoecmd_sleepwork); 92 92 spin_lock_init(&d->lock); 93 93 init_timer(&d->timer); 94 94 d->timer.data = (ulong) d;
+4 -4
drivers/block/paride/pd.c
··· 352 352 353 353 static void run_fsm(void); 354 354 355 - static void ps_tq_int( void *data); 355 + static void ps_tq_int(struct work_struct *work); 356 356 357 - static DECLARE_WORK(fsm_tq, ps_tq_int, NULL); 357 + static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int); 358 358 359 359 static void schedule_fsm(void) 360 360 { 361 361 if (!nice) 362 - schedule_work(&fsm_tq); 362 + schedule_delayed_work(&fsm_tq, 0); 363 363 else 364 364 schedule_delayed_work(&fsm_tq, nice-1); 365 365 } 366 366 367 - static void ps_tq_int(void *data) 367 + static void ps_tq_int(struct work_struct *work) 368 368 { 369 369 run_fsm(); 370 370 }
+5 -5
drivers/block/paride/pseudo.h
··· 35 35 #include <linux/sched.h> 36 36 #include <linux/workqueue.h> 37 37 38 - static void ps_tq_int( void *data); 38 + static void ps_tq_int(struct work_struct *work); 39 39 40 40 static void (* ps_continuation)(void); 41 41 static int (* ps_ready)(void); ··· 45 45 46 46 static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused))); 47 47 48 - static DECLARE_WORK(ps_tq, ps_tq_int, NULL); 48 + static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int); 49 49 50 50 static void ps_set_intr(void (*continuation)(void), 51 51 int (*ready)(void), ··· 63 63 if (!ps_tq_active) { 64 64 ps_tq_active = 1; 65 65 if (!ps_nice) 66 - schedule_work(&ps_tq); 66 + schedule_delayed_work(&ps_tq, 0); 67 67 else 68 68 schedule_delayed_work(&ps_tq, ps_nice-1); 69 69 } 70 70 spin_unlock_irqrestore(&ps_spinlock,flags); 71 71 } 72 72 73 - static void ps_tq_int(void *data) 73 + static void ps_tq_int(struct work_struct *work) 74 74 { 75 75 void (*con)(void); 76 76 unsigned long flags; ··· 92 92 } 93 93 ps_tq_active = 1; 94 94 if (!ps_nice) 95 - schedule_work(&ps_tq); 95 + schedule_delayed_work(&ps_tq, 0); 96 96 else 97 97 schedule_delayed_work(&ps_tq, ps_nice-1); 98 98 spin_unlock_irqrestore(&ps_spinlock,flags);
+4 -3
drivers/block/sx8.c
··· 1244 1244 return IRQ_RETVAL(handled); 1245 1245 } 1246 1246 1247 - static void carm_fsm_task (void *_data) 1247 + static void carm_fsm_task (struct work_struct *work) 1248 1248 { 1249 - struct carm_host *host = _data; 1249 + struct carm_host *host = 1250 + container_of(work, struct carm_host, fsm_task); 1250 1251 unsigned long flags; 1251 1252 unsigned int state; 1252 1253 int rc, i, next_dev; ··· 1620 1619 host->pdev = pdev; 1621 1620 host->flags = pci_dac ? FL_DAC : 0; 1622 1621 spin_lock_init(&host->lock); 1623 - INIT_WORK(&host->fsm_task, carm_fsm_task, host); 1622 + INIT_WORK(&host->fsm_task, carm_fsm_task); 1624 1623 init_completion(&host->probe_comp); 1625 1624 1626 1625 for (i = 0; i < ARRAY_SIZE(host->req); i++)
+4 -4
drivers/block/ub.c
··· 376 376 int stalled_pipe); 377 377 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); 378 378 static void ub_reset_enter(struct ub_dev *sc, int try); 379 - static void ub_reset_task(void *arg); 379 + static void ub_reset_task(struct work_struct *work); 380 380 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); 381 381 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, 382 382 struct ub_capacity *ret); ··· 1558 1558 schedule_work(&sc->reset_work); 1559 1559 } 1560 1560 1561 - static void ub_reset_task(void *arg) 1561 + static void ub_reset_task(struct work_struct *work) 1562 1562 { 1563 - struct ub_dev *sc = arg; 1563 + struct ub_dev *sc = container_of(work, struct ub_dev, reset_work); 1564 1564 unsigned long flags; 1565 1565 struct list_head *p; 1566 1566 struct ub_lun *lun; ··· 2179 2179 usb_init_urb(&sc->work_urb); 2180 2180 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); 2181 2181 atomic_set(&sc->poison, 0); 2182 - INIT_WORK(&sc->reset_work, ub_reset_task, sc); 2182 + INIT_WORK(&sc->reset_work, ub_reset_task); 2183 2183 init_waitqueue_head(&sc->reset_wait); 2184 2184 2185 2185 init_timer(&sc->work_timer);
+4 -3
drivers/bluetooth/bcm203x.c
··· 157 157 } 158 158 } 159 159 160 - static void bcm203x_work(void *user_data) 160 + static void bcm203x_work(struct work_struct *work) 161 161 { 162 - struct bcm203x_data *data = user_data; 162 + struct bcm203x_data *data = 163 + container_of(work, struct bcm203x_data, work); 163 164 164 165 if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) 165 166 BT_ERR("Can't submit URB"); ··· 247 246 248 247 release_firmware(firmware); 249 248 250 - INIT_WORK(&data->work, bcm203x_work, (void *) data); 249 + INIT_WORK(&data->work, bcm203x_work); 251 250 252 251 usb_set_intfdata(intf, data); 253 252
+5 -4
drivers/char/cyclades.c
··· 926 926 * had to poll every port to see if that port needed servicing. 927 927 */ 928 928 static void 929 - do_softint(void *private_) 929 + do_softint(struct work_struct *work) 930 930 { 931 - struct cyclades_port *info = (struct cyclades_port *) private_; 931 + struct cyclades_port *info = 932 + container_of(work, struct cyclades_port, tqueue); 932 933 struct tty_struct *tty; 933 934 934 935 tty = info->tty; ··· 5329 5328 info->blocked_open = 0; 5330 5329 info->default_threshold = 0; 5331 5330 info->default_timeout = 0; 5332 - INIT_WORK(&info->tqueue, do_softint, info); 5331 + INIT_WORK(&info->tqueue, do_softint); 5333 5332 init_waitqueue_head(&info->open_wait); 5334 5333 init_waitqueue_head(&info->close_wait); 5335 5334 init_waitqueue_head(&info->shutdown_wait); ··· 5404 5403 info->blocked_open = 0; 5405 5404 info->default_threshold = 0; 5406 5405 info->default_timeout = 0; 5407 - INIT_WORK(&info->tqueue, do_softint, info); 5406 + INIT_WORK(&info->tqueue, do_softint); 5408 5407 init_waitqueue_head(&info->open_wait); 5409 5408 init_waitqueue_head(&info->close_wait); 5410 5409 init_waitqueue_head(&info->shutdown_wait);
+3 -3
drivers/char/drm/via_dmablit.c
··· 500 500 501 501 502 502 static void 503 - via_dmablit_workqueue(void *data) 503 + via_dmablit_workqueue(struct work_struct *work) 504 504 { 505 - drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; 505 + drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); 506 506 drm_device_t *dev = blitq->dev; 507 507 unsigned long irqsave; 508 508 drm_via_sg_info_t *cur_sg; ··· 571 571 DRM_INIT_WAITQUEUE(blitq->blit_queue + j); 572 572 } 573 573 DRM_INIT_WAITQUEUE(&blitq->busy_queue); 574 - INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq); 574 + INIT_WORK(&blitq->wq, via_dmablit_workqueue); 575 575 init_timer(&blitq->poll_timer); 576 576 blitq->poll_timer.function = &via_dmablit_timer; 577 577 blitq->poll_timer.data = (unsigned long) blitq;
+4 -4
drivers/char/epca.c
··· 200 200 static int info_ioctl(struct tty_struct *, struct file *, 201 201 unsigned int, unsigned long); 202 202 static void pc_set_termios(struct tty_struct *, struct termios *); 203 - static void do_softint(void *); 203 + static void do_softint(struct work_struct *work); 204 204 static void pc_stop(struct tty_struct *); 205 205 static void pc_start(struct tty_struct *); 206 206 static void pc_throttle(struct tty_struct * tty); ··· 1505 1505 1506 1506 ch->brdchan = bc; 1507 1507 ch->mailbox = gd; 1508 - INIT_WORK(&ch->tqueue, do_softint, ch); 1508 + INIT_WORK(&ch->tqueue, do_softint); 1509 1509 ch->board = &boards[crd]; 1510 1510 1511 1511 spin_lock_irqsave(&epca_lock, flags); ··· 2566 2566 2567 2567 /* --------------------- Begin do_softint ----------------------- */ 2568 2568 2569 - static void do_softint(void *private_) 2569 + static void do_softint(struct work_struct *work) 2570 2570 { /* Begin do_softint */ 2571 - struct channel *ch = (struct channel *) private_; 2571 + struct channel *ch = container_of(work, struct channel, tqueue); 2572 2572 /* Called in response to a modem change event */ 2573 2573 if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */ 2574 2574 struct tty_struct *tty = ch->tty;
+8 -6
drivers/char/esp.c
··· 723 723 * ------------------------------------------------------------------- 724 724 */ 725 725 726 - static void do_softint(void *private_) 726 + static void do_softint(struct work_struct *work) 727 727 { 728 - struct esp_struct *info = (struct esp_struct *) private_; 728 + struct esp_struct *info = 729 + container_of(work, struct esp_struct, tqueue); 729 730 struct tty_struct *tty; 730 731 731 732 tty = info->tty; ··· 747 746 * do_serial_hangup() -> tty->hangup() -> esp_hangup() 748 747 * 749 748 */ 750 - static void do_serial_hangup(void *private_) 749 + static void do_serial_hangup(struct work_struct *work) 751 750 { 752 - struct esp_struct *info = (struct esp_struct *) private_; 751 + struct esp_struct *info = 752 + container_of(work, struct esp_struct, tqueue_hangup); 753 753 struct tty_struct *tty; 754 754 755 755 tty = info->tty; ··· 2503 2501 info->magic = ESP_MAGIC; 2504 2502 info->close_delay = 5*HZ/10; 2505 2503 info->closing_wait = 30*HZ; 2506 - INIT_WORK(&info->tqueue, do_softint, info); 2507 - INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info); 2504 + INIT_WORK(&info->tqueue, do_softint); 2505 + INIT_WORK(&info->tqueue_hangup, do_serial_hangup); 2508 2506 info->config.rx_timeout = rx_timeout; 2509 2507 info->config.flow_on = flow_on; 2510 2508 info->config.flow_off = flow_off;
+2 -2
drivers/char/genrtc.c
··· 102 102 * Routine to poll RTC seconds field for change as often as possible, 103 103 * after first RTC_UIE use timer to reduce polling 104 104 */ 105 - static void genrtc_troutine(void *data) 105 + static void genrtc_troutine(struct work_struct *work) 106 106 { 107 107 unsigned int tmp = get_rtc_ss(); 108 108 ··· 255 255 irq_active = 1; 256 256 stop_rtc_timers = 0; 257 257 lostint = 0; 258 - INIT_WORK(&genrtc_task, genrtc_troutine, NULL); 258 + INIT_WORK(&genrtc_task, genrtc_troutine); 259 259 oldsecs = get_rtc_ss(); 260 260 init_timer(&timer_task); 261 261
+9 -7
drivers/char/hvsi.c
··· 69 69 #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) 70 70 71 71 struct hvsi_struct { 72 - struct work_struct writer; 72 + struct delayed_work writer; 73 73 struct work_struct handshaker; 74 74 wait_queue_head_t emptyq; /* woken when outbuf is emptied */ 75 75 wait_queue_head_t stateq; /* woken when HVSI state changes */ ··· 744 744 return 0; 745 745 } 746 746 747 - static void hvsi_handshaker(void *arg) 747 + static void hvsi_handshaker(struct work_struct *work) 748 748 { 749 - struct hvsi_struct *hp = (struct hvsi_struct *)arg; 749 + struct hvsi_struct *hp = 750 + container_of(work, struct hvsi_struct, handshaker); 750 751 751 752 if (hvsi_handshake(hp) >= 0) 752 753 return; ··· 952 951 } 953 952 954 953 /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */ 955 - static void hvsi_write_worker(void *arg) 954 + static void hvsi_write_worker(struct work_struct *work) 956 955 { 957 - struct hvsi_struct *hp = (struct hvsi_struct *)arg; 956 + struct hvsi_struct *hp = 957 + container_of(work, struct hvsi_struct, writer.work); 958 958 unsigned long flags; 959 959 #ifdef DEBUG 960 960 static long start_j = 0; ··· 1289 1287 } 1290 1288 1291 1289 hp = &hvsi_ports[hvsi_count]; 1292 - INIT_WORK(&hp->writer, hvsi_write_worker, hp); 1293 - INIT_WORK(&hp->handshaker, hvsi_handshaker, hp); 1290 + INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker); 1291 + INIT_WORK(&hp->handshaker, hvsi_handshaker); 1294 1292 init_waitqueue_head(&hp->emptyq); 1295 1293 init_waitqueue_head(&hp->stateq); 1296 1294 spin_lock_init(&hp->lock);
+6 -6
drivers/char/ip2/i2lib.c
··· 84 84 static void serviceOutgoingFifo(i2eBordStrPtr); 85 85 86 86 // Functions defined in ip2.c as part of interrupt handling 87 - static void do_input(void *); 88 - static void do_status(void *); 87 + static void do_input(struct work_struct *); 88 + static void do_status(struct work_struct *); 89 89 90 90 //*************** 91 91 //* Debug Data * ··· 331 331 pCh->ClosingWaitTime = 30*HZ; 332 332 333 333 // Initialize task queue objects 334 - INIT_WORK(&pCh->tqueue_input, do_input, pCh); 335 - INIT_WORK(&pCh->tqueue_status, do_status, pCh); 334 + INIT_WORK(&pCh->tqueue_input, do_input); 335 + INIT_WORK(&pCh->tqueue_status, do_status); 336 336 337 337 #ifdef IP2DEBUG_TRACE 338 338 pCh->trace = ip2trace; ··· 1573 1573 #ifdef USE_IQ 1574 1574 schedule_work(&pCh->tqueue_input); 1575 1575 #else 1576 - do_input(pCh); 1576 + do_input(&pCh->tqueue_input); 1577 1577 #endif 1578 1578 1579 1579 // Note we do not need to maintain any flow-control credits at this ··· 1810 1810 #ifdef USE_IQ 1811 1811 schedule_work(&pCh->tqueue_status); 1812 1812 #else 1813 - do_status(pCh); 1813 + do_status(&pCh->tqueue_status); 1814 1814 #endif 1815 1815 } 1816 1816 }
+12 -11
drivers/char/ip2/ip2main.c
··· 189 189 unsigned int set, unsigned int clear); 190 190 191 191 static void set_irq(int, int); 192 - static void ip2_interrupt_bh(i2eBordStrPtr pB); 192 + static void ip2_interrupt_bh(struct work_struct *work); 193 193 static irqreturn_t ip2_interrupt(int irq, void *dev_id); 194 194 static void ip2_poll(unsigned long arg); 195 195 static inline void service_all_boards(void); 196 - static void do_input(void *p); 197 - static void do_status(void *p); 196 + static void do_input(struct work_struct *); 197 + static void do_status(struct work_struct *); 198 198 199 199 static void ip2_wait_until_sent(PTTY,int); 200 200 ··· 918 918 pCh++; 919 919 } 920 920 ex_exit: 921 - INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB); 921 + INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh); 922 922 return; 923 923 924 924 err_release_region: ··· 1125 1125 1126 1126 1127 1127 /******************************************************************************/ 1128 - /* Function: ip2_interrupt_bh(pB) */ 1129 - /* Parameters: pB - pointer to the board structure */ 1128 + /* Function: ip2_interrupt_bh(work) */ 1129 + /* Parameters: work - pointer to the board structure */ 1130 1130 /* Returns: Nothing */ 1131 1131 /* */ 1132 1132 /* Description: */ ··· 1135 1135 /* */ 1136 1136 /******************************************************************************/ 1137 1137 static void 1138 - ip2_interrupt_bh(i2eBordStrPtr pB) 1138 + ip2_interrupt_bh(struct work_struct *work) 1139 1139 { 1140 + i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt); 1140 1141 // pB better well be set or we have a problem! We can only get 1141 1142 // here from the IMMEDIATE queue. Here, we process the boards. 1142 1143 // Checking pB doesn't cost much and it saves us from the sanity checkers. ··· 1246 1245 ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 ); 1247 1246 } 1248 1247 1249 - static void do_input(void *p) 1248 + static void do_input(struct work_struct *work) 1250 1249 { 1251 - i2ChanStrPtr pCh = p; 1250 + i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input); 1252 1251 unsigned long flags; 1253 1252 1254 1253 ip2trace(CHANN, ITRC_INPUT, 21, 0 ); ··· 1280 1279 } 1281 1280 } 1282 1281 1283 - static void do_status(void *p) 1282 + static void do_status(struct work_struct *work) 1284 1283 { 1285 - i2ChanStrPtr pCh = p; 1284 + i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status); 1286 1285 int status; 1287 1286 1288 1287 status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) );
+6 -6
drivers/char/isicom.c
··· 530 530 /* Interrupt handlers */ 531 531 532 532 533 - static void isicom_bottomhalf(void *data) 533 + static void isicom_bottomhalf(struct work_struct *work) 534 534 { 535 - struct isi_port *port = (struct isi_port *) data; 535 + struct isi_port *port = container_of(work, struct isi_port, bh_tqueue); 536 536 struct tty_struct *tty = port->tty; 537 537 538 538 if (!tty) ··· 1474 1474 } 1475 1475 1476 1476 /* hangup et all */ 1477 - static void do_isicom_hangup(void *data) 1477 + static void do_isicom_hangup(struct work_struct *work) 1478 1478 { 1479 - struct isi_port *port = data; 1479 + struct isi_port *port = container_of(work, struct isi_port, hangup_tq); 1480 1480 struct tty_struct *tty; 1481 1481 1482 1482 tty = port->tty; ··· 1966 1966 port->channel = channel; 1967 1967 port->close_delay = 50 * HZ/100; 1968 1968 port->closing_wait = 3000 * HZ/100; 1969 - INIT_WORK(&port->hangup_tq, do_isicom_hangup, port); 1970 - INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port); 1969 + INIT_WORK(&port->hangup_tq, do_isicom_hangup); 1970 + INIT_WORK(&port->bh_tqueue, isicom_bottomhalf); 1971 1971 port->status = 0; 1972 1972 init_waitqueue_head(&port->open_wait); 1973 1973 init_waitqueue_head(&port->close_wait);
+4 -4
drivers/char/moxa.c
··· 222 222 /* 223 223 * static functions: 224 224 */ 225 - static void do_moxa_softint(void *); 225 + static void do_moxa_softint(struct work_struct *); 226 226 static int moxa_open(struct tty_struct *, struct file *); 227 227 static void moxa_close(struct tty_struct *, struct file *); 228 228 static int moxa_write(struct tty_struct *, const unsigned char *, int); ··· 363 363 for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) { 364 364 ch->type = PORT_16550A; 365 365 ch->port = i; 366 - INIT_WORK(&ch->tqueue, do_moxa_softint, ch); 366 + INIT_WORK(&ch->tqueue, do_moxa_softint); 367 367 ch->tty = NULL; 368 368 ch->close_delay = 5 * HZ / 10; 369 369 ch->closing_wait = 30 * HZ; ··· 509 509 module_init(moxa_init); 510 510 module_exit(moxa_exit); 511 511 512 - static void do_moxa_softint(void *private_) 512 + static void do_moxa_softint(struct work_struct *work) 513 513 { 514 - struct moxa_str *ch = (struct moxa_str *) private_; 514 + struct moxa_str *ch = container_of(work, struct moxa_str, tqueue); 515 515 struct tty_struct *tty; 516 516 517 517 if (ch && (tty = ch->tty)) {
+5 -4
drivers/char/mxser.c
··· 389 389 /* static void mxser_poll(unsigned long); */ 390 390 static int mxser_get_ISA_conf(int, struct mxser_hwconf *); 391 391 static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *); 392 - static void mxser_do_softint(void *); 392 + static void mxser_do_softint(struct work_struct *); 393 393 static int mxser_open(struct tty_struct *, struct file *); 394 394 static void mxser_close(struct tty_struct *, struct file *); 395 395 static int mxser_write(struct tty_struct *, const unsigned char *, int); ··· 590 590 info->custom_divisor = hwconf->baud_base[i] * 16; 591 591 info->close_delay = 5 * HZ / 10; 592 592 info->closing_wait = 30 * HZ; 593 - INIT_WORK(&info->tqueue, mxser_do_softint, info); 593 + INIT_WORK(&info->tqueue, mxser_do_softint); 594 594 info->normal_termios = mxvar_sdriver->init_termios; 595 595 init_waitqueue_head(&info->open_wait); 596 596 init_waitqueue_head(&info->close_wait); ··· 917 917 return 0; 918 918 } 919 919 920 - static void mxser_do_softint(void *private_) 920 + static void mxser_do_softint(struct work_struct *work) 921 921 { 922 - struct mxser_struct *info = private_; 922 + struct mxser_struct *info = 923 + container_of(work, struct mxser_struct, tqueue); 923 924 struct tty_struct *tty; 924 925 925 926 tty = info->tty;
+2 -2
drivers/char/sonypi.c
··· 765 765 sonypi_device.bluetooth_power = state; 766 766 } 767 767 768 - static void input_keyrelease(void *data) 768 + static void input_keyrelease(struct work_struct *work) 769 769 { 770 770 struct sonypi_keypress kp; 771 771 ··· 1412 1412 goto err_inpdev_unregister; 1413 1413 } 1414 1414 1415 - INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL); 1415 + INIT_WORK(&sonypi_device.input_work, input_keyrelease); 1416 1416 } 1417 1417 1418 1418 sonypi_enable(0);
+8 -6
drivers/char/specialix.c
··· 2261 2261 * do_sx_hangup() -> tty->hangup() -> sx_hangup() 2262 2262 * 2263 2263 */ 2264 - static void do_sx_hangup(void *private_) 2264 + static void do_sx_hangup(struct work_struct *work) 2265 2265 { 2266 - struct specialix_port *port = (struct specialix_port *) private_; 2266 + struct specialix_port *port = 2267 + container_of(work, struct specialix_port, tqueue_hangup); 2267 2268 struct tty_struct *tty; 2268 2269 2269 2270 func_enter(); ··· 2337 2336 } 2338 2337 2339 2338 2340 - static void do_softint(void *private_) 2339 + static void do_softint(struct work_struct *work) 2341 2340 { 2342 - struct specialix_port *port = (struct specialix_port *) private_; 2341 + struct specialix_port *port = 2342 + container_of(work, struct specialix_port, tqueue); 2343 2343 struct tty_struct *tty; 2344 2344 2345 2345 func_enter(); ··· 2413 2411 memset(sx_port, 0, sizeof(sx_port)); 2414 2412 for (i = 0; i < SX_NPORT * SX_NBOARD; i++) { 2415 2413 sx_port[i].magic = SPECIALIX_MAGIC; 2416 - INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]); 2417 - INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]); 2414 + INIT_WORK(&sx_port[i].tqueue, do_softint); 2415 + INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup); 2418 2416 sx_port[i].close_delay = 50 * HZ/100; 2419 2417 sx_port[i].closing_wait = 3000 * HZ/100; 2420 2418 init_waitqueue_head(&sx_port[i].open_wait);
+5 -4
drivers/char/synclink.c
··· 802 802 /* 803 803 * Bottom half interrupt handlers 804 804 */ 805 - static void mgsl_bh_handler(void* Context); 805 + static void mgsl_bh_handler(struct work_struct *work); 806 806 static void mgsl_bh_receive(struct mgsl_struct *info); 807 807 static void mgsl_bh_transmit(struct mgsl_struct *info); 808 808 static void mgsl_bh_status(struct mgsl_struct *info); ··· 1071 1071 /* 1072 1072 * Perform bottom half processing of work items queued by ISR. 1073 1073 */ 1074 - static void mgsl_bh_handler(void* Context) 1074 + static void mgsl_bh_handler(struct work_struct *work) 1075 1075 { 1076 - struct mgsl_struct *info = (struct mgsl_struct*)Context; 1076 + struct mgsl_struct *info = 1077 + container_of(work, struct mgsl_struct, task); 1077 1078 int action; 1078 1079 1079 1080 if (!info) ··· 4338 4337 } else { 4339 4338 memset(info, 0, sizeof(struct mgsl_struct)); 4340 4339 info->magic = MGSL_MAGIC; 4341 - INIT_WORK(&info->task, mgsl_bh_handler, info); 4340 + INIT_WORK(&info->task, mgsl_bh_handler); 4342 4341 info->max_frame_size = 4096; 4343 4342 info->close_delay = 5*HZ/10; 4344 4343 info->closing_wait = 30*HZ;
+4 -4
drivers/char/synclinkmp.c
··· 602 602 static void set_rate(SLMP_INFO *info, u32 data_rate); 603 603 604 604 static int bh_action(SLMP_INFO *info); 605 - static void bh_handler(void* Context); 605 + static void bh_handler(struct work_struct *work); 606 606 static void bh_receive(SLMP_INFO *info); 607 607 static void bh_transmit(SLMP_INFO *info); 608 608 static void bh_status(SLMP_INFO *info); ··· 2063 2063 2064 2064 /* Perform bottom half processing of work items queued by ISR. 2065 2065 */ 2066 - void bh_handler(void* Context) 2066 + void bh_handler(struct work_struct *work) 2067 2067 { 2068 - SLMP_INFO *info = (SLMP_INFO*)Context; 2068 + SLMP_INFO *info = container_of(work, SLMP_INFO, task); 2069 2069 int action; 2070 2070 2071 2071 if (!info) ··· 3805 3805 } else { 3806 3806 memset(info, 0, sizeof(SLMP_INFO)); 3807 3807 info->magic = MGSL_MAGIC; 3808 - INIT_WORK(&info->task, bh_handler, info); 3808 + INIT_WORK(&info->task, bh_handler); 3809 3809 info->max_frame_size = 4096; 3810 3810 info->close_delay = 5*HZ/10; 3811 3811 info->closing_wait = 30*HZ;
+3 -3
drivers/char/tpm/tpm.c
··· 325 325 schedule_work(&chip->work); 326 326 } 327 327 328 - static void timeout_work(void *ptr) 328 + static void timeout_work(struct work_struct *work) 329 329 { 330 - struct tpm_chip *chip = ptr; 330 + struct tpm_chip *chip = container_of(work, struct tpm_chip, work); 331 331 332 332 down(&chip->buffer_mutex); 333 333 atomic_set(&chip->data_pending, 0); ··· 1105 1105 init_MUTEX(&chip->tpm_mutex); 1106 1106 INIT_LIST_HEAD(&chip->list); 1107 1107 1108 - INIT_WORK(&chip->work, timeout_work, chip); 1108 + INIT_WORK(&chip->work, timeout_work); 1109 1109 1110 1110 init_timer(&chip->user_read_timer); 1111 1111 chip->user_read_timer.function = user_reader_timeout;
+5 -3
drivers/connector/cn_queue.c
··· 31 31 #include <linux/connector.h> 32 32 #include <linux/delay.h> 33 33 34 - void cn_queue_wrapper(void *data) 34 + void cn_queue_wrapper(struct work_struct *work) 35 35 { 36 - struct cn_callback_data *d = data; 36 + struct cn_callback_entry *cbq = 37 + container_of(work, struct cn_callback_entry, work.work); 38 + struct cn_callback_data *d = &cbq->data; 37 39 38 40 d->callback(d->callback_priv); 39 41 ··· 59 57 memcpy(&cbq->id.id, id, sizeof(struct cb_id)); 60 58 cbq->data.callback = callback; 61 59 62 - INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data); 60 + INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper); 63 61 return cbq; 64 62 } 65 63
+15 -16
drivers/connector/connector.c
··· 135 135 spin_lock_bh(&dev->cbdev->queue_lock); 136 136 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { 137 137 if (cn_cb_equal(&__cbq->id.id, &msg->id)) { 138 - if (likely(!test_bit(0, &__cbq->work.pending) && 138 + if (likely(!test_bit(WORK_STRUCT_PENDING, 139 + &__cbq->work.work.management) && 139 140 __cbq->data.ddata == NULL)) { 140 141 __cbq->data.callback_priv = msg; 141 142 142 143 __cbq->data.ddata = data; 143 144 __cbq->data.destruct_data = destruct_data; 144 145 145 - if (queue_work(dev->cbdev->cn_queue, 146 - &__cbq->work)) 146 + if (queue_delayed_work( 147 + dev->cbdev->cn_queue, 148 + &__cbq->work, 0)) 147 149 err = 0; 148 150 } else { 149 - struct work_struct *w; 150 151 struct cn_callback_data *d; 151 152 152 - w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC); 153 - if (w) { 154 - d = (struct cn_callback_data *)(w+1); 155 - 153 + __cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC); 154 + if (__cbq) { 155 + d = &__cbq->data; 156 156 d->callback_priv = msg; 157 157 d->callback = __cbq->data.callback; 158 158 d->ddata = data; 159 159 d->destruct_data = destruct_data; 160 - d->free = w; 160 + d->free = __cbq; 161 161 162 - INIT_LIST_HEAD(&w->entry); 163 - w->pending = 0; 164 - w->func = &cn_queue_wrapper; 165 - w->data = d; 166 - init_timer(&w->timer); 162 + INIT_DELAYED_WORK(&__cbq->work, 163 + &cn_queue_wrapper); 167 164 168 - if (queue_work(dev->cbdev->cn_queue, w)) 165 + if (queue_delayed_work( 166 + dev->cbdev->cn_queue, 167 + &__cbq->work, 0)) 169 168 err = 0; 170 169 else { 171 - kfree(w); 170 + kfree(__cbq); 172 171 err = -EINVAL; 173 172 } 174 173 } else
+3 -4
drivers/cpufreq/cpufreq_conservative.c
··· 59 59 #define MAX_SAMPLING_DOWN_FACTOR (10) 60 60 #define TRANSITION_LATENCY_LIMIT (10 * 1000) 61 61 62 - static void do_dbs_timer(void *data); 62 + static void do_dbs_timer(struct work_struct *work); 63 63 64 64 struct cpu_dbs_info_s { 65 65 struct cpufreq_policy *cur_policy; ··· 82 82 * is recursive for the same process. -Venki 83 83 */ 84 84 static DEFINE_MUTEX (dbs_mutex); 85 - static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 85 + static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); 86 86 87 87 struct dbs_tuners { 88 88 unsigned int sampling_rate; ··· 420 420 } 421 421 } 422 422 423 - static void do_dbs_timer(void *data) 423 + static void do_dbs_timer(struct work_struct *work) 424 424 { 425 425 int i; 426 426 lock_cpu_hotplug(); ··· 435 435 436 436 static inline void dbs_timer_init(void) 437 437 { 438 - INIT_WORK(&dbs_work, do_dbs_timer, NULL); 439 438 schedule_delayed_work(&dbs_work, 440 439 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 441 440 return;
+17 -11
drivers/cpufreq/cpufreq_ondemand.c
··· 47 47 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 48 48 #define TRANSITION_LATENCY_LIMIT (10 * 1000) 49 49 50 - static void do_dbs_timer(void *data); 50 + static void do_dbs_timer(struct work_struct *work); 51 + 52 + /* Sampling types */ 53 + enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; 51 54 52 55 struct cpu_dbs_info_s { 53 56 cputime64_t prev_cpu_idle; 54 57 cputime64_t prev_cpu_wall; 55 58 struct cpufreq_policy *cur_policy; 56 - struct work_struct work; 59 + struct delayed_work work; 60 + enum dbs_sample sample_type; 57 61 unsigned int enable; 58 62 struct cpufreq_frequency_table *freq_table; 59 63 unsigned int freq_lo; ··· 411 407 } 412 408 } 413 409 414 - /* Sampling types */ 415 - enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; 416 - 417 - static void do_dbs_timer(void *data) 410 + static void do_dbs_timer(struct work_struct *work) 418 411 { 419 412 unsigned int cpu = smp_processor_id(); 420 413 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 414 + enum dbs_sample sample_type = dbs_info->sample_type; 421 415 /* We want all CPUs to do sampling nearly on same jiffy */ 422 416 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 417 + 418 + /* Permit rescheduling of this work item */ 419 + work_release(work); 420 + 423 421 delay -= jiffies % delay; 424 422 425 423 if (!dbs_info->enable) 426 424 return; 427 425 /* Common NORMAL_SAMPLE setup */ 428 - INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE); 426 + dbs_info->sample_type = DBS_NORMAL_SAMPLE; 429 427 if (!dbs_tuners_ins.powersave_bias || 430 - (unsigned long) data == DBS_NORMAL_SAMPLE) { 428 + sample_type == DBS_NORMAL_SAMPLE) { 431 429 lock_cpu_hotplug(); 432 430 dbs_check_cpu(dbs_info); 433 431 unlock_cpu_hotplug(); 434 432 if (dbs_info->freq_lo) { 435 433 /* Setup timer for SUB_SAMPLE */ 436 - INIT_WORK(&dbs_info->work, do_dbs_timer, 437 - (void *)DBS_SUB_SAMPLE); 434 + dbs_info->sample_type = DBS_SUB_SAMPLE; 438 435 delay = dbs_info->freq_hi_jiffies; 439 436 } 440 437 } else { ··· 454 449 delay -= jiffies % delay; 455 450 456 451 ondemand_powersave_bias_init(); 457 - INIT_WORK(&dbs_info->work, do_dbs_timer, NULL); 452 + INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); 453 + dbs_info->sample_type = DBS_NORMAL_SAMPLE; 458 454 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 459 455 } 460 456
+6 -6
drivers/i2c/chips/ds1374.c
··· 140 140 return t1; 141 141 } 142 142 143 - static void ds1374_set_work(void *arg) 143 + static ulong new_time; 144 + 145 + static void ds1374_set_work(struct work_struct *work) 144 146 { 145 147 ulong t1, t2; 146 148 int limit = 10; /* arbitrary retry limit */ 147 149 148 - t1 = *(ulong *) arg; 150 + t1 = new_time; 149 151 150 152 mutex_lock(&ds1374_mutex); 151 153 ··· 169 167 "can't confirm time set from rtc chip\n"); 170 168 } 171 169 172 - static ulong new_time; 173 - 174 170 static struct workqueue_struct *ds1374_workqueue; 175 171 176 - static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time); 172 + static DECLARE_WORK(ds1374_work, ds1374_set_work); 177 173 178 174 int ds1374_set_rtc_time(ulong nowtime) 179 175 { ··· 180 180 if (in_interrupt()) 181 181 queue_work(ds1374_workqueue, &ds1374_work); 182 182 else 183 - ds1374_set_work(&new_time); 183 + ds1374_set_work(NULL); 184 184 185 185 return 0; 186 186 }
+5 -4
drivers/ieee1394/hosts.c
··· 31 31 #include "config_roms.h" 32 32 33 33 34 - static void delayed_reset_bus(void * __reset_info) 34 + static void delayed_reset_bus(struct work_struct *work) 35 35 { 36 - struct hpsb_host *host = (struct hpsb_host*)__reset_info; 36 + struct hpsb_host *host = 37 + container_of(work, struct hpsb_host, delayed_reset.work); 37 38 int generation = host->csr.generation + 1; 38 39 39 40 /* The generation field rolls over to 2 rather than 0 per IEEE ··· 146 145 147 146 atomic_set(&h->generation, 0); 148 147 149 - INIT_WORK(&h->delayed_reset, delayed_reset_bus, h); 148 + INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus); 150 149 151 150 init_timer(&h->timeout); 152 151 h->timeout.data = (unsigned long) h; ··· 235 234 * Config ROM in the near future. */ 236 235 reset_delay = HZ; 237 236 238 - PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host); 237 + PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus); 239 238 schedule_delayed_work(&host->delayed_reset, reset_delay); 240 239 241 240 return 0;
+1 -1
drivers/ieee1394/hosts.h
··· 62 62 struct class_device class_dev; 63 63 64 64 int update_config_rom; 65 - struct work_struct delayed_reset; 65 + struct delayed_work delayed_reset; 66 66 unsigned int config_roms; 67 67 68 68 struct list_head addr_space;
+16 -12
drivers/ieee1394/sbp2.c
··· 493 493 scsi_unblock_requests(scsi_id->scsi_host); 494 494 } 495 495 496 - static void sbp2util_write_orb_pointer(void *p) 496 + static void sbp2util_write_orb_pointer(struct work_struct *work) 497 497 { 498 + struct scsi_id_instance_data *scsi_id = 499 + container_of(work, struct scsi_id_instance_data, 500 + protocol_work.work); 498 501 quadlet_t data[2]; 499 502 500 - data[0] = ORB_SET_NODE_ID( 501 - ((struct scsi_id_instance_data *)p)->hi->host->node_id); 502 - data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma; 503 + data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id); 504 + data[1] = scsi_id->last_orb_dma; 503 505 sbp2util_cpu_to_be32_buffer(data, 8); 504 - sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8); 506 + sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8); 505 507 } 506 508 507 - static void sbp2util_write_doorbell(void *p) 509 + static void sbp2util_write_doorbell(struct work_struct *work) 508 510 { 509 - sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4); 511 + struct scsi_id_instance_data *scsi_id = 512 + container_of(work, struct scsi_id_instance_data, 513 + protocol_work.work); 514 + sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4); 510 515 } 511 516 512 517 /* ··· 848 843 INIT_LIST_HEAD(&scsi_id->scsi_list); 849 844 spin_lock_init(&scsi_id->sbp2_command_orb_lock); 850 845 atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); 851 - INIT_WORK(&scsi_id->protocol_work, NULL, NULL); 846 + INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL); 852 847 853 848 ud->device.driver_data = scsi_id; 854 849 ··· 2052 2047 * We do not accept new commands until the job is over. 2053 2048 */ 2054 2049 scsi_block_requests(scsi_id->scsi_host); 2055 - PREPARE_WORK(&scsi_id->protocol_work, 2050 + PREPARE_DELAYED_WORK(&scsi_id->protocol_work, 2056 2051 last_orb ? sbp2util_write_doorbell: 2057 - sbp2util_write_orb_pointer, 2058 - scsi_id); 2059 - schedule_work(&scsi_id->protocol_work); 2052 + sbp2util_write_orb_pointer); 2053 + schedule_delayed_work(&scsi_id->protocol_work, 0); 2060 2054 } 2061 2055 } 2062 2056
+1 -1
drivers/ieee1394/sbp2.h
··· 348 348 unsigned workarounds; 349 349 350 350 atomic_t state; 351 - struct work_struct protocol_work; 351 + struct delayed_work protocol_work; 352 352 }; 353 353 354 354 /* For use in scsi_id_instance_data.state */
+3 -3
drivers/infiniband/core/addr.c
··· 55 55 int status; 56 56 }; 57 57 58 - static void process_req(void *data); 58 + static void process_req(struct work_struct *work); 59 59 60 60 static DEFINE_MUTEX(lock); 61 61 static LIST_HEAD(req_list); 62 - static DECLARE_WORK(work, process_req, NULL); 62 + static DECLARE_DELAYED_WORK(work, process_req); 63 63 static struct workqueue_struct *addr_wq; 64 64 65 65 void rdma_addr_register_client(struct rdma_addr_client *client) ··· 215 215 return ret; 216 216 } 217 217 218 - static void process_req(void *data) 218 + static void process_req(struct work_struct *work) 219 219 { 220 220 struct addr_req *req, *temp_req; 221 221 struct sockaddr_in *src_in, *dst_in;
+4 -3
drivers/infiniband/core/cache.c
··· 285 285 kfree(tprops); 286 286 } 287 287 288 - static void ib_cache_task(void *work_ptr) 288 + static void ib_cache_task(struct work_struct *_work) 289 289 { 290 - struct ib_update_work *work = work_ptr; 290 + struct ib_update_work *work = 291 + container_of(_work, struct ib_update_work, work); 291 292 292 293 ib_cache_update(work->device, work->port_num); 293 294 kfree(work); ··· 307 306 event->event == IB_EVENT_CLIENT_REREGISTER) { 308 307 work = kmalloc(sizeof *work, GFP_ATOMIC); 309 308 if (work) { 310 - INIT_WORK(&work->work, ib_cache_task, work); 309 + INIT_WORK(&work->work, ib_cache_task); 311 310 work->device = event->device; 312 311 work->port_num = event->element.port_num; 313 312 schedule_work(&work->work);
+9 -10
drivers/infiniband/core/cm.c
··· 101 101 }; 102 102 103 103 struct cm_work { 104 - struct work_struct work; 104 + struct delayed_work work; 105 105 struct list_head list; 106 106 struct cm_port *port; 107 107 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ ··· 161 161 atomic_t work_count; 162 162 }; 163 163 164 - static void cm_work_handler(void *data); 164 + static void cm_work_handler(struct work_struct *work); 165 165 166 166 static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 167 167 { ··· 669 669 return ERR_PTR(-ENOMEM); 670 670 671 671 timewait_info->work.local_id = local_id; 672 - INIT_WORK(&timewait_info->work.work, cm_work_handler, 673 - &timewait_info->work); 672 + INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); 674 673 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; 675 674 return timewait_info; 676 675 } ··· 2986 2987 } 2987 2988 } 2988 2989 2989 - static void cm_work_handler(void *data) 2990 + static void cm_work_handler(struct work_struct *_work) 2990 2991 { 2991 - struct cm_work *work = data; 2992 + struct cm_work *work = container_of(_work, struct cm_work, work.work); 2992 2993 int ret; 2993 2994 2994 2995 switch (work->cm_event.event) { ··· 3078 3079 * we need to find the cm_id once we're in the context of the 3079 3080 * worker thread, rather than holding a reference on it. 3080 3081 */ 3081 - INIT_WORK(&work->work, cm_work_handler, work); 3082 + INIT_DELAYED_WORK(&work->work, cm_work_handler); 3082 3083 work->local_id = cm_id->local_id; 3083 3084 work->remote_id = cm_id->remote_id; 3084 3085 work->mad_recv_wc = NULL; 3085 3086 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3086 - queue_work(cm.wq, &work->work); 3087 + queue_delayed_work(cm.wq, &work->work, 0); 3087 3088 out: 3088 3089 return ret; 3089 3090 } ··· 3145 3146 return; 3146 3147 } 3147 3148 3148 - INIT_WORK(&work->work, cm_work_handler, work); 3149 + INIT_DELAYED_WORK(&work->work, cm_work_handler); 3149 3150 work->cm_event.event = event; 3150 3151 work->mad_recv_wc = mad_recv_wc; 3151 3152 work->port = (struct cm_port *)mad_agent->context; 3152 - queue_work(cm.wq, &work->work); 3153 + queue_delayed_work(cm.wq, &work->work, 0); 3153 3154 } 3154 3155 3155 3156 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
+5 -5
drivers/infiniband/core/cma.c
··· 1341 1341 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 1342 1342 } 1343 1343 1344 - static void cma_work_handler(void *data) 1344 + static void cma_work_handler(struct work_struct *_work) 1345 1345 { 1346 - struct cma_work *work = data; 1346 + struct cma_work *work = container_of(_work, struct cma_work, work); 1347 1347 struct rdma_id_private *id_priv = work->id; 1348 1348 int destroy = 0; 1349 1349 ··· 1374 1374 return -ENOMEM; 1375 1375 1376 1376 work->id = id_priv; 1377 - INIT_WORK(&work->work, cma_work_handler, work); 1377 + INIT_WORK(&work->work, cma_work_handler); 1378 1378 work->old_state = CMA_ROUTE_QUERY; 1379 1379 work->new_state = CMA_ROUTE_RESOLVED; 1380 1380 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; ··· 1431 1431 return -ENOMEM; 1432 1432 1433 1433 work->id = id_priv; 1434 - INIT_WORK(&work->work, cma_work_handler, work); 1434 + INIT_WORK(&work->work, cma_work_handler); 1435 1435 work->old_state = CMA_ROUTE_QUERY; 1436 1436 work->new_state = CMA_ROUTE_RESOLVED; 1437 1437 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; ··· 1585 1585 } 1586 1586 1587 1587 work->id = id_priv; 1588 - INIT_WORK(&work->work, cma_work_handler, work); 1588 + INIT_WORK(&work->work, cma_work_handler); 1589 1589 work->old_state = CMA_ADDR_QUERY; 1590 1590 work->new_state = CMA_ADDR_RESOLVED; 1591 1591 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+4 -3
drivers/infiniband/core/iwcm.c
··· 828 828 * thread asleep on the destroy_comp list vs. an object destroyed 829 829 * here synchronously when the last reference is removed. 830 830 */ 831 - static void cm_work_handler(void *arg) 831 + static void cm_work_handler(struct work_struct *_work) 832 832 { 833 - struct iwcm_work *work = arg, lwork; 833 + struct iwcm_work lwork, *work = 834 + container_of(_work, struct iwcm_work, work); 834 835 struct iwcm_id_private *cm_id_priv = work->cm_id; 835 836 unsigned long flags; 836 837 int empty; ··· 900 899 goto out; 901 900 } 902 901 903 - INIT_WORK(&work->work, cm_work_handler, work); 902 + INIT_WORK(&work->work, cm_work_handler); 904 903 work->cm_id = cm_id_priv; 905 904 work->event = *iw_event; 906 905
+13 -12
drivers/infiniband/core/mad.c
··· 65 65 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 66 66 struct ib_mad_private *mad); 67 67 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 68 - static void timeout_sends(void *data); 69 - static void local_completions(void *data); 68 + static void timeout_sends(struct work_struct *work); 69 + static void local_completions(struct work_struct *work); 70 70 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 71 71 struct ib_mad_agent_private *agent_priv, 72 72 u8 mgmt_class); ··· 356 356 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 357 357 INIT_LIST_HEAD(&mad_agent_priv->done_list); 358 358 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 359 - INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); 359 + INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); 360 360 INIT_LIST_HEAD(&mad_agent_priv->local_list); 361 - INIT_WORK(&mad_agent_priv->local_work, local_completions, 362 - mad_agent_priv); 361 + INIT_WORK(&mad_agent_priv->local_work, local_completions); 363 362 atomic_set(&mad_agent_priv->refcount, 1); 364 363 init_completion(&mad_agent_priv->comp); 365 364 ··· 2197 2198 /* 2198 2199 * IB MAD completion callback 2199 2200 */ 2200 - static void ib_mad_completion_handler(void *data) 2201 + static void ib_mad_completion_handler(struct work_struct *work) 2201 2202 { 2202 2203 struct ib_mad_port_private *port_priv; 2203 2204 struct ib_wc wc; 2204 2205 2205 - port_priv = (struct ib_mad_port_private *)data; 2206 + port_priv = container_of(work, struct ib_mad_port_private, work); 2206 2207 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 2207 2208 2208 2209 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { ··· 2323 2324 } 2324 2325 EXPORT_SYMBOL(ib_cancel_mad); 2325 2326 2326 - static void local_completions(void *data) 2327 + static void local_completions(struct work_struct *work) 2327 2328 { 2328 2329 struct ib_mad_agent_private *mad_agent_priv; 2329 2330 struct ib_mad_local_private *local; ··· 2333 2334 struct ib_wc wc; 2334 2335 struct ib_mad_send_wc mad_send_wc; 2335 2336 2336 - mad_agent_priv = (struct ib_mad_agent_private *)data; 2337 + mad_agent_priv = 2338 + container_of(work, struct ib_mad_agent_private, local_work); 2337 2339 2338 2340 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2339 2341 while (!list_empty(&mad_agent_priv->local_list)) { ··· 2434 2434 return ret; 2435 2435 } 2436 2436 2437 - static void timeout_sends(void *data) 2437 + static void timeout_sends(struct work_struct *work) 2438 2438 { 2439 2439 struct ib_mad_agent_private *mad_agent_priv; 2440 2440 struct ib_mad_send_wr_private *mad_send_wr; 2441 2441 struct ib_mad_send_wc mad_send_wc; 2442 2442 unsigned long flags, delay; 2443 2443 2444 - mad_agent_priv = (struct ib_mad_agent_private *)data; 2444 + mad_agent_priv = container_of(work, struct ib_mad_agent_private, 2445 + timed_work.work); 2445 2446 mad_send_wc.vendor_err = 0; 2446 2447 2447 2448 spin_lock_irqsave(&mad_agent_priv->lock, flags); ··· 2800 2799 ret = -ENOMEM; 2801 2800 goto error8; 2802 2801 } 2803 - INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv); 2802 + INIT_WORK(&port_priv->work, ib_mad_completion_handler); 2804 2803 2805 2804 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 2806 2805 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
+1 -1
drivers/infiniband/core/mad_priv.h
··· 102 102 struct list_head send_list; 103 103 struct list_head wait_list; 104 104 struct list_head done_list; 105 - struct work_struct timed_work; 105 + struct delayed_work timed_work; 106 106 unsigned long timeout; 107 107 struct list_head local_list; 108 108 struct work_struct local_work;
+10 -8
drivers/infiniband/core/mad_rmpp.c
··· 45 45 struct mad_rmpp_recv { 46 46 struct ib_mad_agent_private *agent; 47 47 struct list_head list; 48 - struct work_struct timeout_work; 49 - struct work_struct cleanup_work; 48 + struct delayed_work timeout_work; 49 + struct delayed_work cleanup_work; 50 50 struct completion comp; 51 51 enum rmpp_state state; 52 52 spinlock_t lock; ··· 233 233 } 234 234 } 235 235 236 - static void recv_timeout_handler(void *data) 236 + static void recv_timeout_handler(struct work_struct *work) 237 237 { 238 - struct mad_rmpp_recv *rmpp_recv = data; 238 + struct mad_rmpp_recv *rmpp_recv = 239 + container_of(work, struct mad_rmpp_recv, timeout_work.work); 239 240 struct ib_mad_recv_wc *rmpp_wc; 240 241 unsigned long flags; 241 242 ··· 255 254 ib_free_recv_mad(rmpp_wc); 256 255 } 257 256 258 - static void recv_cleanup_handler(void *data) 257 + static void recv_cleanup_handler(struct work_struct *work) 259 258 { 260 - struct mad_rmpp_recv *rmpp_recv = data; 259 + struct mad_rmpp_recv *rmpp_recv = 260 + container_of(work, struct mad_rmpp_recv, cleanup_work.work); 261 261 unsigned long flags; 262 262 263 263 spin_lock_irqsave(&rmpp_recv->agent->lock, flags); ··· 287 285 288 286 rmpp_recv->agent = agent; 289 287 init_completion(&rmpp_recv->comp); 290 - INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); 291 - INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); 288 + INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler); 289 + INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler); 292 290 spin_lock_init(&rmpp_recv->lock); 293 291 rmpp_recv->state = RMPP_STATE_ACTIVE; 294 292 atomic_set(&rmpp_recv->refcount, 1);
+5 -5
drivers/infiniband/core/sa_query.c
··· 360 360 kfree(sm_ah); 361 361 } 362 362 363 - static void update_sm_ah(void *port_ptr) 363 + static void update_sm_ah(struct work_struct *work) 364 364 { 365 - struct ib_sa_port *port = port_ptr; 365 + struct ib_sa_port *port = 366 + container_of(work, struct ib_sa_port, update_task); 366 367 struct ib_sa_sm_ah *new_ah, *old_ah; 367 368 struct ib_port_attr port_attr; 368 369 struct ib_ah_attr ah_attr; ··· 993 992 if (IS_ERR(sa_dev->port[i].agent)) 994 993 goto err; 995 994 996 - INIT_WORK(&sa_dev->port[i].update_task, 997 - update_sm_ah, &sa_dev->port[i]); 995 + INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 998 996 } 999 997 1000 998 ib_set_client_data(device, &sa_client, sa_dev); ··· 1010 1010 goto err; 1011 1011 1012 1012 for (i = 0; i <= e - s; ++i) 1013 - update_sm_ah(&sa_dev->port[i]); 1013 + update_sm_ah(&sa_dev->port[i].update_task); 1014 1014 1015 1015 return; 1016 1016
+4 -3
drivers/infiniband/core/uverbs_mem.c
··· 179 179 up_write(&current->mm->mmap_sem); 180 180 } 181 181 182 - static void ib_umem_account(void *work_ptr) 182 + static void ib_umem_account(struct work_struct *_work) 183 183 { 184 - struct ib_umem_account_work *work = work_ptr; 184 + struct ib_umem_account_work *work = 185 + container_of(_work, struct ib_umem_account_work, work); 185 186 186 187 down_write(&work->mm->mmap_sem); 187 188 work->mm->locked_vm -= work->diff; ··· 217 216 return; 218 217 } 219 218 220 - INIT_WORK(&work->work, ib_umem_account, work); 219 + INIT_WORK(&work->work, ib_umem_account); 221 220 work->mm = mm; 222 221 work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; 223 222
+4 -3
drivers/infiniband/hw/ipath/ipath_user_pages.c
··· 214 214 unsigned long num_pages; 215 215 }; 216 216 217 - static void user_pages_account(void *ptr) 217 + static void user_pages_account(struct work_struct *_work) 218 218 { 219 - struct ipath_user_pages_work *work = ptr; 219 + struct ipath_user_pages_work *work = 220 + container_of(_work, struct ipath_user_pages_work, work); 220 221 221 222 down_write(&work->mm->mmap_sem); 222 223 work->mm->locked_vm -= work->num_pages; ··· 243 242 244 243 goto bail; 245 244 246 - INIT_WORK(&work->work, user_pages_account, work); 245 + INIT_WORK(&work->work, user_pages_account); 247 246 work->mm = mm; 248 247 work->num_pages = num_pages; 249 248
+2 -2
drivers/infiniband/hw/mthca/mthca_catas.c
··· 57 57 module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); 58 58 MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); 59 59 60 - static void catas_reset(void *work_ptr) 60 + static void catas_reset(struct work_struct *work) 61 61 { 62 62 struct mthca_dev *dev, *tmpdev; 63 63 LIST_HEAD(tlist); ··· 203 203 204 204 int __init mthca_catas_init(void) 205 205 { 206 - INIT_WORK(&catas_work, catas_reset, NULL); 206 + INIT_WORK(&catas_work, catas_reset); 207 207 208 208 catas_wq = create_singlethread_workqueue("mthca_catas"); 209 209 if (!catas_wq)
+8 -8
drivers/infiniband/ulp/ipoib/ipoib.h
··· 136 136 struct list_head multicast_list; 137 137 struct rb_root multicast_tree; 138 138 139 - struct work_struct pkey_task; 140 - struct work_struct mcast_task; 139 + struct delayed_work pkey_task; 140 + struct delayed_work mcast_task; 141 141 struct work_struct flush_task; 142 142 struct work_struct restart_task; 143 - struct work_struct ah_reap_task; 143 + struct delayed_work ah_reap_task; 144 144 145 145 struct ib_device *ca; 146 146 u8 port; ··· 254 254 255 255 void ipoib_send(struct net_device *dev, struct sk_buff *skb, 256 256 struct ipoib_ah *address, u32 qpn); 257 - void ipoib_reap_ah(void *dev_ptr); 257 + void ipoib_reap_ah(struct work_struct *work); 258 258 259 259 void ipoib_flush_paths(struct net_device *dev); 260 260 struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); 261 261 262 262 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 263 - void ipoib_ib_dev_flush(void *dev); 263 + void ipoib_ib_dev_flush(struct work_struct *work); 264 264 void ipoib_ib_dev_cleanup(struct net_device *dev); 265 265 266 266 int ipoib_ib_dev_open(struct net_device *dev); ··· 271 271 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 272 272 void ipoib_dev_cleanup(struct net_device *dev); 273 273 274 - void ipoib_mcast_join_task(void *dev_ptr); 274 + void ipoib_mcast_join_task(struct work_struct *work); 275 275 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); 276 276 277 - void ipoib_mcast_restart_task(void *dev_ptr); 277 + void ipoib_mcast_restart_task(struct work_struct *work); 278 278 int ipoib_mcast_start_thread(struct net_device *dev); 279 279 int ipoib_mcast_stop_thread(struct net_device *dev, int flush); 280 280 ··· 312 312 int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); 313 313 int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); 314 314 315 - void ipoib_pkey_poll(void *dev); 315 + void ipoib_pkey_poll(struct work_struct *work); 316 316 int ipoib_pkey_dev_delay_open(struct net_device *dev); 317 317 318 318 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+14 -11
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 400 400 spin_unlock_irq(&priv->tx_lock); 401 401 } 402 402 403 - void ipoib_reap_ah(void *dev_ptr) 403 + void ipoib_reap_ah(struct work_struct *work) 404 404 { 405 - struct net_device *dev = dev_ptr; 406 - struct ipoib_dev_priv *priv = netdev_priv(dev); 405 + struct ipoib_dev_priv *priv = 406 + container_of(work, struct ipoib_dev_priv, ah_reap_task.work); 407 + struct net_device *dev = priv->dev; 407 408 408 409 __ipoib_reap_ah(dev); 409 410 ··· 614 613 return 0; 615 614 } 616 615 617 - void ipoib_ib_dev_flush(void *_dev) 616 + void ipoib_ib_dev_flush(struct work_struct *work) 618 617 { 619 - struct net_device *dev = (struct net_device *)_dev; 620 - struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; 618 + struct ipoib_dev_priv *cpriv, *priv = 619 + container_of(work, struct ipoib_dev_priv, flush_task); 620 + struct net_device *dev = priv->dev; 621 621 622 622 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { 623 623 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); ··· 640 638 */ 641 639 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { 642 640 ipoib_ib_dev_up(dev); 643 - ipoib_mcast_restart_task(dev); 641 + ipoib_mcast_restart_task(&priv->restart_task); 644 642 } 645 643 646 644 mutex_lock(&priv->vlan_mutex); 647 645 648 646 /* Flush any child interfaces too */ 649 647 list_for_each_entry(cpriv, &priv->child_intfs, list) 650 - ipoib_ib_dev_flush(cpriv->dev); 648 + ipoib_ib_dev_flush(&cpriv->flush_task); 651 649 652 650 mutex_unlock(&priv->vlan_mutex); 653 651 } ··· 674 672 * change async notification is available. 675 673 */ 676 674 677 - void ipoib_pkey_poll(void *dev_ptr) 675 + void ipoib_pkey_poll(struct work_struct *work) 678 676 { 679 - struct net_device *dev = dev_ptr; 680 - struct ipoib_dev_priv *priv = netdev_priv(dev); 677 + struct ipoib_dev_priv *priv = 678 + container_of(work, struct ipoib_dev_priv, pkey_task.work); 679 + struct net_device *dev = priv->dev; 681 680 682 681 ipoib_pkey_dev_check_presence(dev); 683 682
+5 -5
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 933 933 INIT_LIST_HEAD(&priv->dead_ahs); 934 934 INIT_LIST_HEAD(&priv->multicast_list); 935 935 936 - INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev); 937 - INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev); 938 - INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev); 939 - INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev); 940 - INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev); 936 + INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll); 937 + INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 938 + INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush); 939 + INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 940 + INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 941 941 } 942 942 943 943 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
+13 -9
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 399 399 mcast->backoff = 1; 400 400 mutex_lock(&mcast_mutex); 401 401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 402 - queue_work(ipoib_workqueue, &priv->mcast_task); 402 + queue_delayed_work(ipoib_workqueue, 403 + &priv->mcast_task, 0); 403 404 mutex_unlock(&mcast_mutex); 404 405 complete(&mcast->done); 405 406 return; ··· 436 435 437 436 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { 438 437 if (status == -ETIMEDOUT) 439 - queue_work(ipoib_workqueue, &priv->mcast_task); 438 + queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 439 + 0); 440 440 else 441 441 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 442 442 mcast->backoff * HZ); ··· 519 517 mcast->query_id = ret; 520 518 } 521 519 522 - void ipoib_mcast_join_task(void *dev_ptr) 520 + void ipoib_mcast_join_task(struct work_struct *work) 523 521 { 524 - struct net_device *dev = dev_ptr; 525 - struct ipoib_dev_priv *priv = netdev_priv(dev); 522 + struct ipoib_dev_priv *priv = 523 + container_of(work, struct ipoib_dev_priv, mcast_task.work); 524 + struct net_device *dev = priv->dev; 526 525 527 526 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) 528 527 return; ··· 613 610 614 611 mutex_lock(&mcast_mutex); 615 612 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 616 - queue_work(ipoib_workqueue, &priv->mcast_task); 613 + queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); 617 614 mutex_unlock(&mcast_mutex); 618 615 619 616 spin_lock_irq(&priv->lock); ··· 821 818 } 822 819 } 823 820 824 - void ipoib_mcast_restart_task(void *dev_ptr) 821 + void ipoib_mcast_restart_task(struct work_struct *work) 825 822 { 826 - struct net_device *dev = dev_ptr; 827 - struct ipoib_dev_priv *priv = netdev_priv(dev); 823 + struct ipoib_dev_priv *priv = 824 + container_of(work, struct ipoib_dev_priv, restart_task); 825 + struct net_device *dev = priv->dev; 828 826 struct dev_mc_list *mclist; 829 827 struct ipoib_mcast *mcast, *tmcast; 830 828 LIST_HEAD(remove_list);
+5 -5
drivers/infiniband/ulp/iser/iser_verbs.c
··· 48 48 49 49 static void iser_cq_tasklet_fn(unsigned long data); 50 50 static void iser_cq_callback(struct ib_cq *cq, void *cq_context); 51 - static void iser_comp_error_worker(void *data); 51 + static void iser_comp_error_worker(struct work_struct *work); 52 52 53 53 static void iser_cq_event_callback(struct ib_event *cause, void *context) 54 54 { ··· 480 480 init_waitqueue_head(&ib_conn->wait); 481 481 atomic_set(&ib_conn->post_recv_buf_count, 0); 482 482 atomic_set(&ib_conn->post_send_buf_count, 0); 483 - INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker, 484 - ib_conn); 483 + INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker); 485 484 INIT_LIST_HEAD(&ib_conn->conn_list); 486 485 spin_lock_init(&ib_conn->lock); 487 486 ··· 753 754 return ret_val; 754 755 } 755 756 756 - static void iser_comp_error_worker(void *data) 757 + static void iser_comp_error_worker(struct work_struct *work) 757 758 { 758 - struct iser_conn *ib_conn = data; 759 + struct iser_conn *ib_conn = 760 + container_of(work, struct iser_conn, comperror_work); 759 761 760 762 /* getting here when the state is UP means that the conn is being * 761 763 * terminated asynchronously from the iSCSI layer's perspective. */
+4 -3
drivers/infiniband/ulp/srp/ib_srp.c
··· 390 390 wait_for_completion(&target->done); 391 391 } 392 392 393 - static void srp_remove_work(void *target_ptr) 393 + static void srp_remove_work(struct work_struct *work) 394 394 { 395 - struct srp_target_port *target = target_ptr; 395 + struct srp_target_port *target = 396 + container_of(work, struct srp_target_port, work); 396 397 397 398 spin_lock_irq(target->scsi_host->host_lock); 398 399 if (target->state != SRP_TARGET_DEAD) { ··· 576 575 spin_lock_irq(target->scsi_host->host_lock); 577 576 if (target->state == SRP_TARGET_CONNECTING) { 578 577 target->state = SRP_TARGET_DEAD; 579 - INIT_WORK(&target->work, srp_remove_work, target); 578 + INIT_WORK(&target->work, srp_remove_work); 580 579 schedule_work(&target->work); 581 580 } 582 581 spin_unlock_irq(target->scsi_host->host_lock);
+3 -3
drivers/input/keyboard/lkkbd.c
··· 572 572 * were in. 573 573 */ 574 574 static void 575 - lkkbd_reinit (void *data) 575 + lkkbd_reinit (struct work_struct *work) 576 576 { 577 - struct lkkbd *lk = data; 577 + struct lkkbd *lk = container_of(work, struct lkkbd, tq); 578 578 int division; 579 579 unsigned char leds_on = 0; 580 580 unsigned char leds_off = 0; ··· 651 651 652 652 lk->serio = serio; 653 653 lk->dev = input_dev; 654 - INIT_WORK (&lk->tq, lkkbd_reinit, lk); 654 + INIT_WORK (&lk->tq, lkkbd_reinit); 655 655 lk->bell_volume = bell_volume; 656 656 lk->keyclick_volume = keyclick_volume; 657 657 lk->ctrlclick_volume = ctrlclick_volume;
+3 -3
drivers/input/keyboard/sunkbd.c
··· 208 208 * were in. 209 209 */ 210 210 211 - static void sunkbd_reinit(void *data) 211 + static void sunkbd_reinit(struct work_struct *work) 212 212 { 213 - struct sunkbd *sunkbd = data; 213 + struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq); 214 214 215 215 wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ); 216 216 ··· 248 248 sunkbd->serio = serio; 249 249 sunkbd->dev = input_dev; 250 250 init_waitqueue_head(&sunkbd->wait); 251 - INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd); 251 + INIT_WORK(&sunkbd->tq, sunkbd_reinit); 252 252 snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys); 253 253 254 254 serio_set_drvdata(serio, sunkbd);
+4 -3
drivers/input/mouse/psmouse-base.c
··· 888 888 * psmouse_resync() attempts to re-validate current protocol. 889 889 */ 890 890 891 - static void psmouse_resync(void *p) 891 + static void psmouse_resync(struct work_struct *work) 892 892 { 893 - struct psmouse *psmouse = p, *parent = NULL; 893 + struct psmouse *parent = NULL, *psmouse = 894 + container_of(work, struct psmouse, resync_work); 894 895 struct serio *serio = psmouse->ps2dev.serio; 895 896 psmouse_ret_t rc = PSMOUSE_GOOD_DATA; 896 897 int failed = 0, enabled = 0; ··· 1122 1121 goto out; 1123 1122 1124 1123 ps2_init(&psmouse->ps2dev, serio); 1125 - INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse); 1124 + INIT_WORK(&psmouse->resync_work, psmouse_resync); 1126 1125 psmouse->dev = input_dev; 1127 1126 snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys); 1128 1127
+3 -1
drivers/isdn/act2000/capi.c
··· 627 627 } 628 628 629 629 void 630 - actcapi_dispatch(act2000_card *card) 630 + actcapi_dispatch(struct work_struct *work) 631 631 { 632 + struct act2000_card *card = 633 + container_of(work, struct act2000_card, rcv_tq); 632 634 struct sk_buff *skb; 633 635 actcapi_msg *msg; 634 636 __u16 ccmd;
+1 -1
drivers/isdn/act2000/capi.h
··· 356 356 extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *); 357 357 extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *); 358 358 extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8); 359 - extern void actcapi_dispatch(act2000_card *); 359 + extern void actcapi_dispatch(struct work_struct *); 360 360 #ifdef DEBUG_MSG 361 361 extern void actcapi_debug_msg(struct sk_buff *skb, int); 362 362 #else
+12 -6
drivers/isdn/act2000/module.c
··· 192 192 } 193 193 194 194 static void 195 - act2000_transmit(struct act2000_card *card) 195 + act2000_transmit(struct work_struct *work) 196 196 { 197 + struct act2000_card *card = 198 + container_of(work, struct act2000_card, snd_tq); 199 + 197 200 switch (card->bus) { 198 201 case ACT2000_BUS_ISA: 199 202 act2000_isa_send(card); ··· 210 207 } 211 208 212 209 static void 213 - act2000_receive(struct act2000_card *card) 210 + act2000_receive(struct work_struct *work) 214 211 { 212 + struct act2000_card *card = 213 + container_of(work, struct act2000_card, poll_tq); 214 + 215 215 switch (card->bus) { 216 216 case ACT2000_BUS_ISA: 217 217 act2000_isa_receive(card); ··· 233 227 act2000_card * card = (act2000_card *)data; 234 228 unsigned long flags; 235 229 236 - act2000_receive(card); 230 + act2000_receive(&card->poll_tq); 237 231 spin_lock_irqsave(&card->lock, flags); 238 232 mod_timer(&card->ptimer, jiffies+3); 239 233 spin_unlock_irqrestore(&card->lock, flags); ··· 584 578 skb_queue_head_init(&card->sndq); 585 579 skb_queue_head_init(&card->rcvq); 586 580 skb_queue_head_init(&card->ackq); 587 - INIT_WORK(&card->snd_tq, (void *) (void *) act2000_transmit, card); 588 - INIT_WORK(&card->rcv_tq, (void *) (void *) actcapi_dispatch, card); 589 - INIT_WORK(&card->poll_tq, (void *) (void *) act2000_receive, card); 581 + INIT_WORK(&card->snd_tq, act2000_transmit); 582 + INIT_WORK(&card->rcv_tq, actcapi_dispatch); 583 + INIT_WORK(&card->poll_tq, act2000_receive); 590 584 init_timer(&card->ptimer); 591 585 card->interface.owner = THIS_MODULE; 592 586 card->interface.channels = ACT2000_BCH;
+8 -6
drivers/isdn/capi/kcapi.c
··· 208 208 } 209 209 } 210 210 211 - static void notify_handler(void *data) 211 + static void notify_handler(struct work_struct *work) 212 212 { 213 - struct capi_notifier *np = data; 213 + struct capi_notifier *np = 214 + container_of(work, struct capi_notifier, work); 214 215 215 216 switch (np->cmd) { 216 217 case KCI_CONTRUP: ··· 236 235 if (!np) 237 236 return -ENOMEM; 238 237 239 - INIT_WORK(&np->work, notify_handler, np); 238 + INIT_WORK(&np->work, notify_handler); 240 239 np->cmd = cmd; 241 240 np->controller = controller; 242 241 np->applid = applid; ··· 249 248 250 249 /* -------- Receiver ------------------------------------------ */ 251 250 252 - static void recv_handler(void *_ap) 251 + static void recv_handler(struct work_struct *work) 253 252 { 254 253 struct sk_buff *skb; 255 - struct capi20_appl *ap = (struct capi20_appl *) _ap; 254 + struct capi20_appl *ap = 255 + container_of(work, struct capi20_appl, recv_work); 256 256 257 257 if ((!ap) || (ap->release_in_progress)) 258 258 return; ··· 529 527 ap->callback = NULL; 530 528 init_MUTEX(&ap->recv_sem); 531 529 skb_queue_head_init(&ap->recv_queue); 532 - INIT_WORK(&ap->recv_work, recv_handler, (void *)ap); 530 + INIT_WORK(&ap->recv_work, recv_handler); 533 531 ap->release_in_progress = 0; 534 532 535 533 write_unlock_irqrestore(&application_lock, flags);
+4 -3
drivers/isdn/hisax/amd7930_fn.c
··· 232 232 233 233 234 234 static void 235 - Amd7930_bh(struct IsdnCardState *cs) 235 + Amd7930_bh(struct work_struct *work) 236 236 { 237 - 237 + struct IsdnCardState *cs = 238 + container_of(work, struct IsdnCardState, tqueue); 238 239 struct PStack *stptr; 239 240 240 241 if (!cs) ··· 790 789 void __devinit 791 790 setup_Amd7930(struct IsdnCardState *cs) 792 791 { 793 - INIT_WORK(&cs->tqueue, (void *)(void *) Amd7930_bh, cs); 792 + INIT_WORK(&cs->tqueue, Amd7930_bh); 794 793 cs->dbusytimer.function = (void *) dbusy_timer_handler; 795 794 cs->dbusytimer.data = (long) cs; 796 795 init_timer(&cs->dbusytimer);
+5 -4
drivers/isdn/hisax/config.c
··· 1137 1137 cs->tx_skb = NULL; 1138 1138 cs->tx_cnt = 0; 1139 1139 cs->event = 0; 1140 - cs->tqueue.data = cs; 1141 1140 1142 1141 skb_queue_head_init(&cs->rq); 1143 1142 skb_queue_head_init(&cs->sq); ··· 1553 1554 static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg); 1554 1555 static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs); 1555 1556 static void hisax_bc_close(struct BCState *bcs); 1556 - static void hisax_bh(struct IsdnCardState *cs); 1557 + static void hisax_bh(struct work_struct *work); 1557 1558 static void EChannel_proc_rcv(struct hisax_d_if *d_if); 1558 1559 1559 1560 int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], ··· 1585 1586 hisax_d_if->cs = cs; 1586 1587 cs->hw.hisax_d_if = hisax_d_if; 1587 1588 cs->cardmsg = hisax_cardmsg; 1588 - INIT_WORK(&cs->tqueue, (void *)(void *)hisax_bh, cs); 1589 + INIT_WORK(&cs->tqueue, hisax_bh); 1589 1590 cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1; 1590 1591 for (i = 0; i < 2; i++) { 1591 1592 cs->bcs[i].BC_SetStack = hisax_bc_setstack; ··· 1617 1618 schedule_work(&cs->tqueue); 1618 1619 } 1619 1620 1620 - static void hisax_bh(struct IsdnCardState *cs) 1621 + static void hisax_bh(struct work_struct *work) 1621 1622 { 1623 + struct IsdnCardState *cs = 1624 + container_of(work, struct IsdnCardState, tqueue); 1622 1625 struct PStack *st; 1623 1626 int pr; 1624 1627
+3 -2
drivers/isdn/hisax/hfc4s8s_l1.c
··· 1083 1083 /* bottom half handler for interrupt */ 1084 1084 /*************************************/ 1085 1085 static void 1086 - hfc4s8s_bh(hfc4s8s_hw * hw) 1086 + hfc4s8s_bh(struct work_struct *work) 1087 1087 { 1088 + hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue); 1088 1089 u_char b; 1089 1090 struct hfc4s8s_l1 *l1p; 1090 1091 volatile u_char *fifo_stat; ··· 1551 1550 goto out; 1552 1551 } 1553 1552 1554 - INIT_WORK(&hw->tqueue, (void *) (void *) hfc4s8s_bh, hw); 1553 + INIT_WORK(&hw->tqueue, hfc4s8s_bh); 1555 1554 1556 1555 if (request_irq 1557 1556 (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) {
+5 -4
drivers/isdn/hisax/hfc_2bds0.c
··· 549 549 } 550 550 551 551 static void 552 - hfcd_bh(struct IsdnCardState *cs) 552 + hfcd_bh(struct work_struct *work) 553 553 { 554 - if (!cs) 555 - return; 554 + struct IsdnCardState *cs = 555 + container_of(work, struct IsdnCardState, tqueue); 556 + 556 557 if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { 557 558 switch (cs->dc.hfcd.ph_state) { 558 559 case (0): ··· 1073 1072 cs->dbusytimer.function = (void *) hfc_dbusy_timer; 1074 1073 cs->dbusytimer.data = (long) cs; 1075 1074 init_timer(&cs->dbusytimer); 1076 - INIT_WORK(&cs->tqueue, (void *)(void *) hfcd_bh, cs); 1075 + INIT_WORK(&cs->tqueue, hfcd_bh); 1077 1076 }
+4 -2
drivers/isdn/hisax/hfc_pci.c
··· 1506 1506 /* handle L1 state changes */ 1507 1507 /***************************/ 1508 1508 static void 1509 - hfcpci_bh(struct IsdnCardState *cs) 1509 + hfcpci_bh(struct work_struct *work) 1510 1510 { 1511 + struct IsdnCardState *cs = 1512 + container_of(work, struct IsdnCardState, tqueue); 1511 1513 u_long flags; 1512 1514 // struct PStack *stptr; 1513 1515 ··· 1724 1722 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); 1725 1723 /* At this point the needed PCI config is done */ 1726 1724 /* fifos are still not enabled */ 1727 - INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs); 1725 + INIT_WORK(&cs->tqueue, hfcpci_bh); 1728 1726 cs->setstack_d = setstack_hfcpci; 1729 1727 cs->BC_Send_Data = &hfcpci_send_data; 1730 1728 cs->readisac = NULL;
+4 -2
drivers/isdn/hisax/hfc_sx.c
··· 1251 1251 /* handle L1 state changes */ 1252 1252 /***************************/ 1253 1253 static void 1254 - hfcsx_bh(struct IsdnCardState *cs) 1254 + hfcsx_bh(struct work_struct *work) 1255 1255 { 1256 + struct IsdnCardState *cs = 1257 + container_of(work, struct IsdnCardState, tqueue); 1256 1258 u_long flags; 1257 1259 1258 1260 if (!cs) ··· 1501 1499 cs->dbusytimer.function = (void *) hfcsx_dbusy_timer; 1502 1500 cs->dbusytimer.data = (long) cs; 1503 1501 init_timer(&cs->dbusytimer); 1504 - INIT_WORK(&cs->tqueue, (void *)(void *) hfcsx_bh, cs); 1502 + INIT_WORK(&cs->tqueue, hfcsx_bh); 1505 1503 cs->readisac = NULL; 1506 1504 cs->writeisac = NULL; 1507 1505 cs->readisacfifo = NULL;
+4 -2
drivers/isdn/hisax/icc.c
··· 77 77 } 78 78 79 79 static void 80 - icc_bh(struct IsdnCardState *cs) 80 + icc_bh(struct work_struct *work) 81 81 { 82 + struct IsdnCardState *cs = 83 + container_of(work, struct IsdnCardState, tqueue); 82 84 struct PStack *stptr; 83 85 84 86 if (!cs) ··· 676 674 void __devinit 677 675 setup_icc(struct IsdnCardState *cs) 678 676 { 679 - INIT_WORK(&cs->tqueue, (void *)(void *) icc_bh, cs); 677 + INIT_WORK(&cs->tqueue, icc_bh); 680 678 cs->dbusytimer.function = (void *) dbusy_timer_handler; 681 679 cs->dbusytimer.data = (long) cs; 682 680 init_timer(&cs->dbusytimer);
+4 -2
drivers/isdn/hisax/isac.c
··· 81 81 } 82 82 83 83 static void 84 - isac_bh(struct IsdnCardState *cs) 84 + isac_bh(struct work_struct *work) 85 85 { 86 + struct IsdnCardState *cs = 87 + container_of(work, struct IsdnCardState, tqueue); 86 88 struct PStack *stptr; 87 89 88 90 if (!cs) ··· 676 674 void __devinit 677 675 setup_isac(struct IsdnCardState *cs) 678 676 { 679 - INIT_WORK(&cs->tqueue, (void *)(void *) isac_bh, cs); 677 + INIT_WORK(&cs->tqueue, isac_bh); 680 678 cs->dbusytimer.function = (void *) dbusy_timer_handler; 681 679 cs->dbusytimer.data = (long) cs; 682 680 init_timer(&cs->dbusytimer);
+4 -2
drivers/isdn/hisax/isar.c
··· 437 437 #define B_LL_OK 10 438 438 439 439 static void 440 - isar_bh(struct BCState *bcs) 440 + isar_bh(struct work_struct *work) 441 441 { 442 + struct BCState *bcs = container_of(work, struct BCState, tqueue); 443 + 442 444 BChannel_bh(bcs); 443 445 if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event)) 444 446 ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR); ··· 1582 1580 cs->bcs[i].mode = 0; 1583 1581 cs->bcs[i].hw.isar.dpath = i + 1; 1584 1582 modeisar(&cs->bcs[i], 0, 0); 1585 - INIT_WORK(&cs->bcs[i].tqueue, (void *)(void *) isar_bh, &cs->bcs[i]); 1583 + INIT_WORK(&cs->bcs[i].tqueue, isar_bh); 1586 1584 } 1587 1585 } 1588 1586
+4 -2
drivers/isdn/hisax/isdnl1.c
··· 315 315 } 316 316 317 317 void 318 - BChannel_bh(struct BCState *bcs) 318 + BChannel_bh(struct work_struct *work) 319 319 { 320 + struct BCState *bcs = container_of(work, struct BCState, tqueue); 321 + 320 322 if (!bcs) 321 323 return; 322 324 if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event)) ··· 364 362 365 363 bcs->cs = cs; 366 364 bcs->channel = bc; 367 - INIT_WORK(&bcs->tqueue, (void *)(void *) BChannel_bh, bcs); 365 + INIT_WORK(&bcs->tqueue, BChannel_bh); 368 366 spin_lock_init(&bcs->aclock); 369 367 bcs->BC_SetStack = NULL; 370 368 bcs->BC_Close = NULL;
+4 -2
drivers/isdn/hisax/w6692.c
··· 101 101 } 102 102 103 103 static void 104 - W6692_bh(struct IsdnCardState *cs) 104 + W6692_bh(struct work_struct *work) 105 105 { 106 + struct IsdnCardState *cs = 107 + container_of(work, struct IsdnCardState, tqueue); 106 108 struct PStack *stptr; 107 109 108 110 if (!cs) ··· 1072 1070 id_list[cs->subtyp].card_name, cs->irq, 1073 1071 cs->hw.w6692.iobase); 1074 1072 1075 - INIT_WORK(&cs->tqueue, (void *)(void *) W6692_bh, cs); 1073 + INIT_WORK(&cs->tqueue, W6692_bh); 1076 1074 cs->readW6692 = &ReadW6692; 1077 1075 cs->writeW6692 = &WriteW6692; 1078 1076 cs->readisacfifo = &ReadISACfifo;
+3 -3
drivers/isdn/i4l/isdn_net.c
··· 984 984 /* 985 985 * called from tq_immediate 986 986 */ 987 - static void isdn_net_softint(void *private) 987 + static void isdn_net_softint(struct work_struct *work) 988 988 { 989 - isdn_net_local *lp = private; 989 + isdn_net_local *lp = container_of(work, isdn_net_local, tqueue); 990 990 struct sk_buff *skb; 991 991 992 992 spin_lock_bh(&lp->xmit_lock); ··· 2596 2596 netdev->local->netdev = netdev; 2597 2597 netdev->local->next = netdev->local; 2598 2598 2599 - INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local); 2599 + INIT_WORK(&netdev->local->tqueue, isdn_net_softint); 2600 2600 spin_lock_init(&netdev->local->xmit_lock); 2601 2601 2602 2602 netdev->local->isdn_device = -1;
+1 -3
drivers/isdn/pcbit/drv.c
··· 68 68 static int pcbit_check_msn(struct pcbit_dev *dev, char *msn); 69 69 70 70 71 - extern void pcbit_deliver(void * data); 72 - 73 71 int pcbit_init_dev(int board, int mem_base, int irq) 74 72 { 75 73 struct pcbit_dev *dev; ··· 127 129 memset(dev->b2, 0, sizeof(struct pcbit_chan)); 128 130 dev->b2->id = 1; 129 131 130 - INIT_WORK(&dev->qdelivery, pcbit_deliver, dev); 132 + INIT_WORK(&dev->qdelivery, pcbit_deliver); 131 133 132 134 /* 133 135 * interrupts
+3 -3
drivers/isdn/pcbit/layer2.c
··· 67 67 * Prototypes 68 68 */ 69 69 70 - void pcbit_deliver(void *data); 71 70 static void pcbit_transmit(struct pcbit_dev *dev); 72 71 73 72 static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack); ··· 298 299 */ 299 300 300 301 void 301 - pcbit_deliver(void *data) 302 + pcbit_deliver(struct work_struct *work) 302 303 { 303 304 struct frame_buf *frame; 304 305 unsigned long flags, msg; 305 - struct pcbit_dev *dev = (struct pcbit_dev *) data; 306 + struct pcbit_dev *dev = 307 + container_of(work, struct pcbit_dev, qdelivery); 306 308 307 309 spin_lock_irqsave(&dev->lock, flags); 308 310
+2
drivers/isdn/pcbit/pcbit.h
··· 166 166 #define L2_RUNNING 5 167 167 #define L2_ERROR 6 168 168 169 + extern void pcbit_deliver(struct work_struct *work); 170 + 169 171 #endif
+2 -2
drivers/macintosh/smu.c
··· 600 600 * sysfs visibility 601 601 */ 602 602 603 - static void smu_expose_childs(void *unused) 603 + static void smu_expose_childs(struct work_struct *unused) 604 604 { 605 605 struct device_node *np; 606 606 ··· 610 610 &smu->of_dev->dev); 611 611 } 612 612 613 - static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL); 613 + static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs); 614 614 615 615 static int smu_platform_probe(struct of_device* dev, 616 616 const struct of_device_id *match)
+4 -4
drivers/md/dm-crypt.c
··· 458 458 * interrupt context. 459 459 */ 460 460 static struct workqueue_struct *_kcryptd_workqueue; 461 - static void kcryptd_do_work(void *data); 461 + static void kcryptd_do_work(struct work_struct *work); 462 462 463 463 static void kcryptd_queue_io(struct crypt_io *io) 464 464 { 465 - INIT_WORK(&io->work, kcryptd_do_work, io); 465 + INIT_WORK(&io->work, kcryptd_do_work); 466 466 queue_work(_kcryptd_workqueue, &io->work); 467 467 } 468 468 ··· 618 618 dec_pending(io, crypt_convert(cc, &ctx)); 619 619 } 620 620 621 - static void kcryptd_do_work(void *data) 621 + static void kcryptd_do_work(struct work_struct *work) 622 622 { 623 - struct crypt_io *io = data; 623 + struct crypt_io *io = container_of(work, struct crypt_io, work); 624 624 625 625 if (io->post_process) 626 626 process_read_endio(io);
+10 -8
drivers/md/dm-mpath.c
··· 104 104 static kmem_cache_t *_mpio_cache; 105 105 106 106 struct workqueue_struct *kmultipathd; 107 - static void process_queued_ios(void *data); 108 - static void trigger_event(void *data); 107 + static void process_queued_ios(struct work_struct *work); 108 + static void trigger_event(struct work_struct *work); 109 109 110 110 111 111 /*----------------------------------------------- ··· 173 173 INIT_LIST_HEAD(&m->priority_groups); 174 174 spin_lock_init(&m->lock); 175 175 m->queue_io = 1; 176 - INIT_WORK(&m->process_queued_ios, process_queued_ios, m); 177 - INIT_WORK(&m->trigger_event, trigger_event, m); 176 + INIT_WORK(&m->process_queued_ios, process_queued_ios); 177 + INIT_WORK(&m->trigger_event, trigger_event); 178 178 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 179 179 if (!m->mpio_pool) { 180 180 kfree(m); ··· 379 379 } 380 380 } 381 381 382 - static void process_queued_ios(void *data) 382 + static void process_queued_ios(struct work_struct *work) 383 383 { 384 - struct multipath *m = (struct multipath *) data; 384 + struct multipath *m = 385 + container_of(work, struct multipath, process_queued_ios); 385 386 struct hw_handler *hwh = &m->hw_handler; 386 387 struct pgpath *pgpath = NULL; 387 388 unsigned init_required = 0, must_queue = 1; ··· 422 421 * An event is triggered whenever a path is taken out of use. 423 422 * Includes path failure and PG bypass. 424 423 */ 425 - static void trigger_event(void *data) 424 + static void trigger_event(struct work_struct *work) 426 425 { 427 - struct multipath *m = (struct multipath *) data; 426 + struct multipath *m = 427 + container_of(work, struct multipath, trigger_event); 428 428 429 429 dm_table_event(m->ti->table); 430 430 }
+2 -2
drivers/md/dm-raid1.c
··· 883 883 do_writes(ms, &writes); 884 884 } 885 885 886 - static void do_work(void *ignored) 886 + static void do_work(struct work_struct *ignored) 887 887 { 888 888 struct mirror_set *ms; 889 889 ··· 1269 1269 dm_dirty_log_exit(); 1270 1270 return r; 1271 1271 } 1272 - INIT_WORK(&_kmirrord_work, do_work, NULL); 1272 + INIT_WORK(&_kmirrord_work, do_work); 1273 1273 1274 1274 r = dm_register_target(&mirror_target); 1275 1275 if (r < 0) {
+5 -4
drivers/md/dm-snap.c
··· 40 40 #define SNAPSHOT_PAGES 256 41 41 42 42 struct workqueue_struct *ksnapd; 43 - static void flush_queued_bios(void *data); 43 + static void flush_queued_bios(struct work_struct *work); 44 44 45 45 struct pending_exception { 46 46 struct exception e; ··· 528 528 } 529 529 530 530 bio_list_init(&s->queued_bios); 531 - INIT_WORK(&s->queued_bios_work, flush_queued_bios, s); 531 + INIT_WORK(&s->queued_bios_work, flush_queued_bios); 532 532 533 533 /* Add snapshot to the list of snapshots for this origin */ 534 534 /* Exceptions aren't triggered till snapshot_resume() is called */ ··· 603 603 } 604 604 } 605 605 606 - static void flush_queued_bios(void *data) 606 + static void flush_queued_bios(struct work_struct *work) 607 607 { 608 - struct dm_snapshot *s = (struct dm_snapshot *) data; 608 + struct dm_snapshot *s = 609 + container_of(work, struct dm_snapshot, queued_bios_work); 609 610 struct bio *queued_bios; 610 611 unsigned long flags; 611 612
+2 -2
drivers/md/kcopyd.c
··· 417 417 /* 418 418 * kcopyd does this every time it's woken up. 419 419 */ 420 - static void do_work(void *ignored) 420 + static void do_work(struct work_struct *ignored) 421 421 { 422 422 /* 423 423 * The order that these are called is *very* important. ··· 628 628 } 629 629 630 630 kcopyd_clients++; 631 - INIT_WORK(&_kcopyd_work, do_work, NULL); 631 + INIT_WORK(&_kcopyd_work, do_work); 632 632 mutex_unlock(&kcopyd_init_lock); 633 633 return 0; 634 634 }
+5 -4
drivers/media/dvb/b2c2/flexcop-pci.c
··· 63 63 64 64 unsigned long last_irq; 65 65 66 - struct work_struct irq_check_work; 66 + struct delayed_work irq_check_work; 67 67 68 68 struct flexcop_device *fc_dev; 69 69 }; ··· 97 97 return 0; 98 98 } 99 99 100 - static void flexcop_pci_irq_check_work(void *data) 100 + static void flexcop_pci_irq_check_work(struct work_struct *work) 101 101 { 102 - struct flexcop_pci *fc_pci = data; 102 + struct flexcop_pci *fc_pci = 103 + container_of(work, struct flexcop_pci, irq_check_work.work); 103 104 struct flexcop_device *fc = fc_pci->fc_dev; 104 105 105 106 flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714); ··· 372 371 if ((ret = flexcop_pci_dma_init(fc_pci)) != 0) 373 372 goto err_fc_exit; 374 373 375 - INIT_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work, fc_pci); 374 + INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work); 376 375 377 376 return ret; 378 377
+10 -8
drivers/media/dvb/cinergyT2/cinergyT2.c
··· 127 127 128 128 struct dvbt_set_parameters_msg param; 129 129 struct dvbt_get_status_msg status; 130 - struct work_struct query_work; 130 + struct delayed_work query_work; 131 131 132 132 wait_queue_head_t poll_wq; 133 133 int pending_fe_events; ··· 141 141 #ifdef ENABLE_RC 142 142 struct input_dev *rc_input_dev; 143 143 char phys[64]; 144 - struct work_struct rc_query_work; 144 + struct delayed_work rc_query_work; 145 145 int rc_input_event; 146 146 u32 rc_last_code; 147 147 unsigned long last_event_jiffies; ··· 724 724 725 725 #ifdef ENABLE_RC 726 726 727 - static void cinergyt2_query_rc (void *data) 727 + static void cinergyt2_query_rc (struct work_struct *work) 728 728 { 729 - struct cinergyt2 *cinergyt2 = data; 729 + struct cinergyt2 *cinergyt2 = 730 + container_of(work, struct cinergyt2, rc_query_work.work); 730 731 char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS }; 731 732 struct cinergyt2_rc_event rc_events[12]; 732 733 int n, len, i; ··· 808 807 strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys)); 809 808 cinergyt2->rc_input_event = KEY_MAX; 810 809 cinergyt2->rc_last_code = ~0; 811 - INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2); 810 + INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc); 812 811 813 812 input_dev->name = DRIVER_NAME " remote control"; 814 813 input_dev->phys = cinergyt2->phys; ··· 849 848 850 849 #endif /* ENABLE_RC */ 851 850 852 - static void cinergyt2_query (void *data) 851 + static void cinergyt2_query (struct work_struct *work) 853 852 { 854 - struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data; 853 + struct cinergyt2 *cinergyt2 = 854 + container_of(work, struct cinergyt2, query_work.work); 855 855 char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS }; 856 856 struct dvbt_get_status_msg *s = &cinergyt2->status; 857 857 uint8_t lock_bits; ··· 896 894 897 895 mutex_init(&cinergyt2->sem); 898 896 init_waitqueue_head (&cinergyt2->poll_wq); 899 - INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2); 897 + INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query); 900 898 901 899 cinergyt2->udev = interface_to_usbdev(intf); 902 900 cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
+12 -7
drivers/media/dvb/dvb-core/dvb_net.c
··· 127 127 int in_use; 128 128 struct net_device_stats stats; 129 129 u16 pid; 130 + struct net_device *net; 130 131 struct dvb_net *host; 131 132 struct dmx_demux *demux; 132 133 struct dmx_section_feed *secfeed; ··· 1124 1123 } 1125 1124 1126 1125 1127 - static void wq_set_multicast_list (void *data) 1126 + static void wq_set_multicast_list (struct work_struct *work) 1128 1127 { 1129 - struct net_device *dev = data; 1130 - struct dvb_net_priv *priv = dev->priv; 1128 + struct dvb_net_priv *priv = 1129 + container_of(work, struct dvb_net_priv, set_multicast_list_wq); 1130 + struct net_device *dev = priv->net; 1131 1131 1132 1132 dvb_net_feed_stop(dev); 1133 1133 priv->rx_mode = RX_MODE_UNI; ··· 1169 1167 } 1170 1168 1171 1169 1172 - static void wq_restart_net_feed (void *data) 1170 + static void wq_restart_net_feed (struct work_struct *work) 1173 1171 { 1174 - struct net_device *dev = data; 1172 + struct dvb_net_priv *priv = 1173 + container_of(work, struct dvb_net_priv, restart_net_feed_wq); 1174 + struct net_device *dev = priv->net; 1175 1175 1176 1176 if (netif_running(dev)) { 1177 1177 dvb_net_feed_stop(dev); ··· 1280 1276 dvbnet->device[if_num] = net; 1281 1277 1282 1278 priv = net->priv; 1279 + priv->net = net; 1283 1280 priv->demux = dvbnet->demux; 1284 1281 priv->pid = pid; 1285 1282 priv->rx_mode = RX_MODE_UNI; ··· 1289 1284 priv->feedtype = feedtype; 1290 1285 reset_ule(priv); 1291 1286 1292 - INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net); 1293 - INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net); 1287 + INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list); 1288 + INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed); 1294 1289 mutex_init(&priv->mutex); 1295 1290 1296 1291 net->base_addr = pid;
+4 -3
drivers/media/dvb/dvb-usb/dvb-usb-remote.c
··· 13 13 * 14 14 * TODO: Fix the repeat rate of the input device. 15 15 */ 16 - static void dvb_usb_read_remote_control(void *data) 16 + static void dvb_usb_read_remote_control(struct work_struct *work) 17 17 { 18 - struct dvb_usb_device *d = data; 18 + struct dvb_usb_device *d = 19 + container_of(work, struct dvb_usb_device, rc_query_work.work); 19 20 u32 event; 20 21 int state; 21 22 ··· 129 128 130 129 input_register_device(d->rc_input_dev); 131 130 132 - INIT_WORK(&d->rc_query_work, dvb_usb_read_remote_control, d); 131 + INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control); 133 132 134 133 info("schedule remote query interval to %d msecs.", d->props.rc_interval); 135 134 schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval));
+1 -1
drivers/media/dvb/dvb-usb/dvb-usb.h
··· 369 369 /* remote control */ 370 370 struct input_dev *rc_input_dev; 371 371 char rc_phys[64]; 372 - struct work_struct rc_query_work; 372 + struct delayed_work rc_query_work; 373 373 u32 last_event; 374 374 int last_state; 375 375
+19 -1
drivers/media/video/cpia_pp.c
··· 82 82 struct pardevice *pdev; 83 83 struct parport *port; 84 84 struct work_struct cb_task; 85 + void (*cb_func)(void *cbdata); 86 + void *cb_data; 85 87 int open_count; 86 88 wait_queue_head_t wq_stream; 87 89 /* image state flags */ ··· 131 129 132 130 #define PARPORT_CHUNK_SIZE PAGE_SIZE 133 131 132 + 133 + static void cpia_pp_run_callback(struct work_struct *work) 134 + { 135 + void (*cb_func)(void *cbdata); 136 + void *cb_data; 137 + struct pp_cam_entry *cam; 138 + 139 + cam = container_of(work, struct pp_cam_entry, cb_task); 140 + cb_func = cam->cb_func; 141 + cb_data = cam->cb_data; 142 + work_release(work); 143 + 144 + cb_func(cb_data); 145 + } 134 146 135 147 /**************************************************************************** 136 148 * ··· 680 664 int retval = 0; 681 665 682 666 if(cam->port->irq != PARPORT_IRQ_NONE) { 683 - INIT_WORK(&cam->cb_task, cb, cbdata); 667 + cam->cb_func = cb; 668 + cam->cb_data = cbdata; 669 + INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback); 684 670 } else { 685 671 retval = -1; 686 672 }
+3 -3
drivers/media/video/cx88/cx88-input.c
··· 145 145 schedule_work(&ir->work); 146 146 } 147 147 148 - static void cx88_ir_work(void *data) 148 + static void cx88_ir_work(struct work_struct *work) 149 149 { 150 - struct cx88_IR *ir = data; 150 + struct cx88_IR *ir = container_of(work, struct cx88_IR, work); 151 151 unsigned long timeout; 152 152 153 153 cx88_ir_handle_key(ir); ··· 308 308 core->ir = ir; 309 309 310 310 if (ir->polling) { 311 - INIT_WORK(&ir->work, cx88_ir_work, ir); 311 + INIT_WORK(&ir->work, cx88_ir_work); 312 312 init_timer(&ir->timer); 313 313 ir->timer.function = ir_timer; 314 314 ir->timer.data = (unsigned long)ir;
+3 -3
drivers/media/video/ir-kbd-i2c.c
··· 268 268 schedule_work(&ir->work); 269 269 } 270 270 271 - static void ir_work(void *data) 271 + static void ir_work(struct work_struct *work) 272 272 { 273 - struct IR_i2c *ir = data; 273 + struct IR_i2c *ir = container_of(work, struct IR_i2c, work); 274 274 ir_key_poll(ir); 275 275 mod_timer(&ir->timer, jiffies+HZ/10); 276 276 } ··· 400 400 ir->input->name,ir->input->phys,adap->name); 401 401 402 402 /* start polling via eventd */ 403 - INIT_WORK(&ir->work, ir_work, ir); 403 + INIT_WORK(&ir->work, ir_work); 404 404 init_timer(&ir->timer); 405 405 ir->timer.function = ir_timer; 406 406 ir->timer.data = (unsigned long)ir;
+9 -4
drivers/media/video/pvrusb2/pvrusb2-context.c
··· 45 45 } 46 46 47 47 48 - static void pvr2_context_poll(struct pvr2_context *mp) 48 + static void pvr2_context_poll(struct work_struct *work) 49 49 { 50 + struct pvr2_context *mp = 51 + container_of(work, struct pvr2_context, workpoll); 50 52 pvr2_context_enter(mp); do { 51 53 pvr2_hdw_poll(mp->hdw); 52 54 } while (0); pvr2_context_exit(mp); 53 55 } 54 56 55 57 56 - static void pvr2_context_setup(struct pvr2_context *mp) 58 + static void pvr2_context_setup(struct work_struct *work) 57 59 { 60 + struct pvr2_context *mp = 61 + container_of(work, struct pvr2_context, workinit); 62 + 58 63 pvr2_context_enter(mp); do { 59 64 if (!pvr2_hdw_dev_ok(mp->hdw)) break; 60 65 pvr2_hdw_setup(mp->hdw); ··· 97 92 } 98 93 99 94 mp->workqueue = create_singlethread_workqueue("pvrusb2"); 100 - INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp); 101 - INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp); 95 + INIT_WORK(&mp->workinit, pvr2_context_setup); 96 + INIT_WORK(&mp->workpoll, pvr2_context_poll); 102 97 queue_work(mp->workqueue,&mp->workinit); 103 98 done: 104 99 return mp;
+3 -3
drivers/media/video/saa6588.c
··· 322 322 schedule_work(&s->work); 323 323 } 324 324 325 - static void saa6588_work(void *data) 325 + static void saa6588_work(struct work_struct *work) 326 326 { 327 - struct saa6588 *s = (struct saa6588 *)data; 327 + struct saa6588 *s = container_of(work, struct saa6588, work); 328 328 329 329 saa6588_i2c_poll(s); 330 330 mod_timer(&s->timer, jiffies + msecs_to_jiffies(20)); ··· 417 417 saa6588_configure(s); 418 418 419 419 /* start polling via eventd */ 420 - INIT_WORK(&s->work, saa6588_work, s); 420 + INIT_WORK(&s->work, saa6588_work); 421 421 init_timer(&s->timer); 422 422 s->timer.function = saa6588_timer; 423 423 s->timer.data = (unsigned long)s;
+5 -4
drivers/media/video/saa7134/saa7134-empress.c
··· 343 343 .minor = -1, 344 344 }; 345 345 346 - static void empress_signal_update(void* data) 346 + static void empress_signal_update(struct work_struct *work) 347 347 { 348 - struct saa7134_dev* dev = (struct saa7134_dev*) data; 348 + struct saa7134_dev* dev = 349 + container_of(work, struct saa7134_dev, empress_workqueue); 349 350 350 351 if (dev->nosignal) { 351 352 dprintk("no video signal\n"); ··· 379 378 "%s empress (%s)", dev->name, 380 379 saa7134_boards[dev->board].name); 381 380 382 - INIT_WORK(&dev->empress_workqueue, empress_signal_update, (void*) dev); 381 + INIT_WORK(&dev->empress_workqueue, empress_signal_update); 383 382 384 383 err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER, 385 384 empress_nr[dev->nr]); ··· 400 399 sizeof(struct saa7134_buf), 401 400 dev); 402 401 403 - empress_signal_update(dev); 402 + empress_signal_update(&dev->empress_workqueue); 404 403 return 0; 405 404 } 406 405
+8 -6
drivers/message/fusion/mptfc.c
··· 1018 1018 } 1019 1019 1020 1020 static void 1021 - mptfc_setup_reset(void *arg) 1021 + mptfc_setup_reset(struct work_struct *work) 1022 1022 { 1023 - MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 1023 + MPT_ADAPTER *ioc = 1024 + container_of(work, MPT_ADAPTER, fc_setup_reset_work); 1024 1025 u64 pn; 1025 1026 struct mptfc_rport_info *ri; 1026 1027 ··· 1044 1043 } 1045 1044 1046 1045 static void 1047 - mptfc_rescan_devices(void *arg) 1046 + mptfc_rescan_devices(struct work_struct *work) 1048 1047 { 1049 - MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 1048 + MPT_ADAPTER *ioc = 1049 + container_of(work, MPT_ADAPTER, fc_rescan_work); 1050 1050 int ii; 1051 1051 u64 pn; 1052 1052 struct mptfc_rport_info *ri; ··· 1156 1154 } 1157 1155 1158 1156 spin_lock_init(&ioc->fc_rescan_work_lock); 1159 - INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc); 1160 - INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc); 1157 + INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices); 1158 + INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset); 1161 1159 1162 1160 spin_lock_irqsave(&ioc->FreeQlock, flags); 1163 1161
+19 -10
drivers/message/fusion/mptlan.c
··· 111 111 u32 total_received; 112 112 struct net_device_stats stats; /* Per device statistics */ 113 113 114 - struct work_struct post_buckets_task; 114 + struct delayed_work post_buckets_task; 115 + struct net_device *dev; 115 116 unsigned long post_buckets_active; 116 117 }; 117 118 ··· 133 132 static int mpt_lan_open(struct net_device *dev); 134 133 static int mpt_lan_reset(struct net_device *dev); 135 134 static int mpt_lan_close(struct net_device *dev); 136 - static void mpt_lan_post_receive_buckets(void *dev_id); 135 + static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv); 137 136 static void mpt_lan_wake_post_buckets_task(struct net_device *dev, 138 137 int priority); 139 138 static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); ··· 346 345 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; 347 346 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 348 347 } else { 349 - mpt_lan_post_receive_buckets(dev); 348 + mpt_lan_post_receive_buckets(priv); 350 349 netif_wake_queue(dev); 351 350 } 352 351 ··· 442 441 443 442 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); 444 443 445 - mpt_lan_post_receive_buckets(dev); 444 + mpt_lan_post_receive_buckets(priv); 446 445 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", 447 446 IOC_AND_NETDEV_NAMES_s_s(dev)); 448 447 ··· 855 854 856 855 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { 857 856 if (priority) { 858 - schedule_work(&priv->post_buckets_task); 857 + schedule_delayed_work(&priv->post_buckets_task, 0); 859 858 } else { 860 859 schedule_delayed_work(&priv->post_buckets_task, 1); 861 860 dioprintk((KERN_INFO MYNAM ": post_buckets queued on " ··· 1189 1188 /* Simple SGE's only at the moment */ 1190 1189 1191 1190 static void 1192 - mpt_lan_post_receive_buckets(void *dev_id) 1191 + mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv) 1193 1192 { 1194 - struct net_device *dev = dev_id; 1195 - struct mpt_lan_priv *priv = dev->priv; 1193 + struct net_device *dev = priv->dev; 1196 1194 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 1197 1195 MPT_FRAME_HDR *mf; 1198 1196 LANReceivePostRequest_t *pRecvReq; ··· 1335 1335 clear_bit(0, &priv->post_buckets_active); 1336 1336 } 1337 1337 1338 + static void 1339 + mpt_lan_post_receive_buckets_work(struct work_struct *work) 1340 + { 1341 + mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv, 1342 + post_buckets_task.work)); 1343 + } 1344 + 1338 1345 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1339 1346 static struct net_device * 1340 1347 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) ··· 1357 1350 1358 1351 priv = netdev_priv(dev); 1359 1352 1353 + priv->dev = dev; 1360 1354 priv->mpt_dev = mpt_dev; 1361 1355 priv->pnum = pnum; 1362 1356 1363 - memset(&priv->post_buckets_task, 0, sizeof(struct work_struct)); 1364 - INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev); 1357 + memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task)); 1358 + INIT_DELAYED_WORK(&priv->post_buckets_task, 1359 + mpt_lan_post_receive_buckets_work); 1365 1360 priv->post_buckets_active = 0; 1366 1361 1367 1362 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
+13 -12
drivers/message/fusion/mptsas.c
··· 2006 2006 *(Mutex LOCKED) 2007 2007 */ 2008 2008 static void 2009 - mptsas_discovery_work(void * arg) 2009 + mptsas_discovery_work(struct work_struct *work) 2010 2010 { 2011 - struct mptsas_discovery_event *ev = arg; 2011 + struct mptsas_discovery_event *ev = 2012 + container_of(work, struct mptsas_discovery_event, work); 2012 2013 MPT_ADAPTER *ioc = ev->ioc; 2013 2014 2014 2015 mutex_lock(&ioc->sas_discovery_mutex); ··· 2069 2068 * Work queue thread to clear the persitency table 2070 2069 */ 2071 2070 static void 2072 - mptsas_persist_clear_table(void * arg) 2071 + mptsas_persist_clear_table(struct work_struct *work) 2073 2072 { 2074 - MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 2073 + MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task); 2075 2074 2076 2075 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT); 2077 2076 } ··· 2094 2093 * Work queue thread to handle SAS hotplug events 2095 2094 */ 2096 2095 static void 2097 - mptsas_hotplug_work(void *arg) 2096 + mptsas_hotplug_work(struct work_struct *work) 2098 2097 { 2099 - struct mptsas_hotplug_event *ev = arg; 2098 + struct mptsas_hotplug_event *ev = 2099 + container_of(work, struct mptsas_hotplug_event, work); 2100 2100 MPT_ADAPTER *ioc = ev->ioc; 2101 2101 struct mptsas_phyinfo *phy_info; 2102 2102 struct sas_rphy *rphy; ··· 2343 2341 break; 2344 2342 } 2345 2343 2346 - INIT_WORK(&ev->work, mptsas_hotplug_work, ev); 2344 + INIT_WORK(&ev->work, mptsas_hotplug_work); 2347 2345 ev->ioc = ioc; 2348 2346 ev->handle = le16_to_cpu(sas_event_data->DevHandle); 2349 2347 ev->parent_handle = ··· 2368 2366 * Persistent table is full. 2369 2367 */ 2370 2368 INIT_WORK(&ioc->sas_persist_task, 2371 - mptsas_persist_clear_table, (void *)ioc); 2369 + mptsas_persist_clear_table); 2372 2370 schedule_work(&ioc->sas_persist_task); 2373 2371 break; 2374 2372 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: ··· 2397 2395 return; 2398 2396 } 2399 2397 2400 - INIT_WORK(&ev->work, mptsas_hotplug_work, ev); 2398 + INIT_WORK(&ev->work, mptsas_hotplug_work); 2401 2399 ev->ioc = ioc; 2402 2400 ev->id = raid_event_data->VolumeID; 2403 2401 ev->event_type = MPTSAS_IGNORE_EVENT; ··· 2476 2474 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 2477 2475 if (!ev) 2478 2476 return; 2479 - INIT_WORK(&ev->work, mptsas_discovery_work, ev); 2477 + INIT_WORK(&ev->work, mptsas_discovery_work); 2480 2478 ev->ioc = ioc; 2481 2479 schedule_work(&ev->work); 2482 2480 }; ··· 2513 2511 break; 2514 2512 case MPI_EVENT_PERSISTENT_TABLE_FULL: 2515 2513 INIT_WORK(&ioc->sas_persist_task, 2516 - mptsas_persist_clear_table, 2517 - (void *)ioc); 2514 + mptsas_persist_clear_table); 2518 2515 schedule_work(&ioc->sas_persist_task); 2519 2516 break; 2520 2517 case MPI_EVENT_SAS_DISCOVERY:
+8 -6
drivers/message/fusion/mptspi.c
··· 646 646 int disk; 647 647 }; 648 648 649 - static void mpt_work_wrapper(void *data) 649 + static void mpt_work_wrapper(struct work_struct *work) 650 650 { 651 - struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 651 + struct work_queue_wrapper *wqw = 652 + container_of(work, struct work_queue_wrapper, work); 652 653 struct _MPT_SCSI_HOST *hd = wqw->hd; 653 654 struct Scsi_Host *shost = hd->ioc->sh; 654 655 struct scsi_device *sdev; ··· 696 695 disk); 697 696 return; 698 697 } 699 - INIT_WORK(&wqw->work, mpt_work_wrapper, wqw); 698 + INIT_WORK(&wqw->work, mpt_work_wrapper); 700 699 wqw->hd = hd; 701 700 wqw->disk = disk; 702 701 ··· 785 784 * renegotiate for a given target 786 785 */ 787 786 static void 788 - mptspi_dv_renegotiate_work(void *data) 787 + mptspi_dv_renegotiate_work(struct work_struct *work) 789 788 { 790 - struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 789 + struct work_queue_wrapper *wqw = 790 + container_of(work, struct work_queue_wrapper, work); 791 791 struct _MPT_SCSI_HOST *hd = wqw->hd; 792 792 struct scsi_device *sdev; 793 793 ··· 806 804 if (!wqw) 807 805 return; 808 806 809 - INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw); 807 + INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work); 810 808 wqw->hd = hd; 811 809 812 810 schedule_work(&wqw->work);
+1 -1
drivers/message/i2o/driver.c
··· 232 232 break; 233 233 } 234 234 235 - INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt); 235 + INIT_WORK(&evt->work, drv->event); 236 236 queue_work(drv->event_queue, &evt->work); 237 237 return 1; 238 238 }
+8 -5
drivers/message/i2o/exec-osm.c
··· 371 371 * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY 372 372 * again, otherwise send LCT NOTIFY to get informed on next LCT change. 373 373 */ 374 - static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work) 374 + static void i2o_exec_lct_modified(struct work_struct *_work) 375 375 { 376 + struct i2o_exec_lct_notify_work *work = 377 + container_of(_work, struct i2o_exec_lct_notify_work, work); 376 378 u32 change_ind = 0; 377 379 struct i2o_controller *c = work->c; 378 380 ··· 441 439 442 440 work->c = c; 443 441 444 - INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified, 445 - work); 442 + INIT_WORK(&work->work, i2o_exec_lct_modified); 446 443 queue_work(i2o_exec_driver.event_queue, &work->work); 447 444 return 1; 448 445 } ··· 461 460 462 461 /** 463 462 * i2o_exec_event - Event handling function 464 - * @evt: Event which occurs 463 + * @work: Work item in occurring event 465 464 * 466 465 * Handles events send by the Executive device. At the moment does not do 467 466 * anything useful. 468 467 */ 469 - static void i2o_exec_event(struct i2o_event *evt) 468 + static void i2o_exec_event(struct work_struct *work) 470 469 { 470 + struct i2o_event *evt = container_of(work, struct i2o_event, work); 471 + 471 472 if (likely(evt->i2o_dev)) 472 473 osm_debug("Event received from device: %d\n", 473 474 evt->i2o_dev->lct_data.tid);
+9 -6
drivers/message/i2o/i2o_block.c
··· 419 419 420 420 /** 421 421 * i2o_block_delayed_request_fn - delayed request queue function 422 - * delayed_request: the delayed request with the queue to start 422 + * @work: the delayed request with the queue to start 423 423 * 424 424 * If the request queue is stopped for a disk, and there is no open 425 425 * request, a new event is created, which calls this function to start 426 426 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never 427 427 * be started again. 428 428 */ 429 - static void i2o_block_delayed_request_fn(void *delayed_request) 429 + static void i2o_block_delayed_request_fn(struct work_struct *work) 430 430 { 431 - struct i2o_block_delayed_request *dreq = delayed_request; 431 + struct i2o_block_delayed_request *dreq = 432 + container_of(work, struct i2o_block_delayed_request, 433 + work.work); 432 434 struct request_queue *q = dreq->queue; 433 435 unsigned long flags; 434 436 ··· 540 538 return 1; 541 539 }; 542 540 543 - static void i2o_block_event(struct i2o_event *evt) 541 + static void i2o_block_event(struct work_struct *work) 544 542 { 543 + struct i2o_event *evt = container_of(work, struct i2o_event, work); 545 544 osm_debug("event received\n"); 546 545 kfree(evt); 547 546 }; ··· 941 938 continue; 942 939 943 940 dreq->queue = q; 944 - INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, 945 - dreq); 941 + INIT_DELAYED_WORK(&dreq->work, 942 + i2o_block_delayed_request_fn); 946 943 947 944 if (!queue_delayed_work(i2o_block_driver.event_queue, 948 945 &dreq->work,
+1 -1
drivers/message/i2o/i2o_block.h
··· 96 96 97 97 /* I2O Block device delayed request */ 98 98 struct i2o_block_delayed_request { 99 - struct work_struct work; 99 + struct delayed_work work; 100 100 struct request_queue *queue; 101 101 }; 102 102
+10 -8
drivers/misc/tifm_7xx1.c
··· 33 33 spin_unlock_irqrestore(&fm->lock, flags); 34 34 } 35 35 36 - static void tifm_7xx1_remove_media(void *adapter) 36 + static void tifm_7xx1_remove_media(struct work_struct *work) 37 37 { 38 - struct tifm_adapter *fm = adapter; 38 + struct tifm_adapter *fm = 39 + container_of(work, struct tifm_adapter, media_remover); 39 40 unsigned long flags; 40 41 int cnt; 41 42 struct tifm_dev *sock; ··· 170 169 return base_addr + ((sock_num + 1) << 10); 171 170 } 172 171 173 - static void tifm_7xx1_insert_media(void *adapter) 172 + static void tifm_7xx1_insert_media(struct work_struct *work) 174 173 { 175 - struct tifm_adapter *fm = adapter; 174 + struct tifm_adapter *fm = 175 + container_of(work, struct tifm_adapter, media_inserter); 176 176 unsigned long flags; 177 177 tifm_media_id media_id; 178 178 char *card_name = "xx"; ··· 263 261 spin_unlock_irqrestore(&fm->lock, flags); 264 262 flush_workqueue(fm->wq); 265 263 266 - tifm_7xx1_remove_media(fm); 264 + tifm_7xx1_remove_media(&fm->media_remover); 267 265 268 266 pci_set_power_state(dev, PCI_D3hot); 269 267 pci_disable_device(dev); ··· 330 328 if (!fm->sockets) 331 329 goto err_out_free; 332 330 333 - INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm); 334 - INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm); 331 + INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media); 332 + INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media); 335 333 fm->eject = tifm_7xx1_eject; 336 334 pci_set_drvdata(dev, fm); 337 335 ··· 386 384 387 385 flush_workqueue(fm->wq); 388 386 389 - tifm_7xx1_remove_media(fm); 387 + tifm_7xx1_remove_media(&fm->media_remover); 390 388 391 389 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 392 390 free_irq(dev->irq, fm);
+6 -8
drivers/mmc/mmc.c
··· 1165 1165 */ 1166 1166 void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1167 1167 { 1168 - if (delay) 1169 - mmc_schedule_delayed_work(&host->detect, delay); 1170 - else 1171 - mmc_schedule_work(&host->detect); 1168 + mmc_schedule_delayed_work(&host->detect, delay); 1172 1169 } 1173 1170 1174 1171 EXPORT_SYMBOL(mmc_detect_change); 1175 1172 1176 1173 1177 - static void mmc_rescan(void *data) 1174 + static void mmc_rescan(struct work_struct *work) 1178 1175 { 1179 - struct mmc_host *host = data; 1176 + struct mmc_host *host = 1177 + container_of(work, struct mmc_host, detect.work); 1180 1178 struct list_head *l, *n; 1181 1179 unsigned char power_mode; 1182 1180 ··· 1257 1259 spin_lock_init(&host->lock); 1258 1260 init_waitqueue_head(&host->wq); 1259 1261 INIT_LIST_HEAD(&host->cards); 1260 - INIT_WORK(&host->detect, mmc_rescan, host); 1262 + INIT_DELAYED_WORK(&host->detect, mmc_rescan); 1261 1263 1262 1264 /* 1263 1265 * By default, hosts do not support SGIO or large requests. ··· 1355 1357 */ 1356 1358 int mmc_resume_host(struct mmc_host *host) 1357 1359 { 1358 - mmc_rescan(host); 1360 + mmc_rescan(&host->detect.work); 1359 1361 1360 1362 return 0; 1361 1363 }
+1 -1
drivers/mmc/mmc.h
··· 20 20 void mmc_free_host_sysfs(struct mmc_host *host); 21 21 22 22 int mmc_schedule_work(struct work_struct *work); 23 - int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay); 23 + int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay); 24 24 void mmc_flush_scheduled_work(void); 25 25 #endif
+1 -9
drivers/mmc/mmc_sysfs.c
··· 321 321 static struct workqueue_struct *workqueue; 322 322 323 323 /* 324 - * Internal function. Schedule work in the MMC work queue. 325 - */ 326 - int mmc_schedule_work(struct work_struct *work) 327 - { 328 - return queue_work(workqueue, work); 329 - } 330 - 331 - /* 332 324 * Internal function. Schedule delayed work in the MMC work queue. 333 325 */ 334 - int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay) 326 + int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) 335 327 { 336 328 return queue_delayed_work(workqueue, work, delay); 337 329 }
+15 -13
drivers/mmc/tifm_sd.c
··· 99 99 100 100 struct mmc_request *req; 101 101 struct work_struct cmd_handler; 102 - struct work_struct abort_handler; 102 + struct delayed_work abort_handler; 103 103 wait_queue_head_t can_eject; 104 104 105 105 size_t written_blocks; ··· 496 496 mmc_request_done(mmc, mrq); 497 497 } 498 498 499 - static void tifm_sd_end_cmd(void *data) 499 + static void tifm_sd_end_cmd(struct work_struct *work) 500 500 { 501 - struct tifm_sd *host = data; 501 + struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 502 502 struct tifm_dev *sock = host->dev; 503 503 struct mmc_host *mmc = tifm_get_drvdata(sock); 504 504 struct mmc_request *mrq; ··· 608 608 mmc_request_done(mmc, mrq); 609 609 } 610 610 611 - static void tifm_sd_end_cmd_nodma(void *data) 611 + static void tifm_sd_end_cmd_nodma(struct work_struct *work) 612 612 { 613 - struct tifm_sd *host = (struct tifm_sd*)data; 613 + struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 614 614 struct tifm_dev *sock = host->dev; 615 615 struct mmc_host *mmc = tifm_get_drvdata(sock); 616 616 struct mmc_request *mrq; ··· 661 661 mmc_request_done(mmc, mrq); 662 662 } 663 663 664 - static void tifm_sd_abort(void *data) 664 + static void tifm_sd_abort(struct work_struct *work) 665 665 { 666 + struct tifm_sd *host = 667 + container_of(work, struct tifm_sd, abort_handler.work); 668 + 666 669 printk(KERN_ERR DRIVER_NAME 667 670 ": card failed to respond for a long period of time"); 668 - tifm_eject(((struct tifm_sd*)data)->dev); 671 + tifm_eject(host->dev); 669 672 } 670 673 671 674 static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) ··· 765 762 .get_ro = tifm_sd_ro 766 763 }; 767 764 768 - static void tifm_sd_register_host(void *data) 765 + static void tifm_sd_register_host(struct work_struct *work) 769 766 { 770 - struct tifm_sd *host = (struct tifm_sd*)data; 767 + struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 771 768 struct tifm_dev *sock = host->dev; 772 769 struct mmc_host *mmc = tifm_get_drvdata(sock); 773 770 unsigned long flags; ··· 775 772 spin_lock_irqsave(&sock->lock, flags); 776 773 host->flags |= HOST_REG; 777 774 PREPARE_WORK(&host->cmd_handler, 778 - no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd, 779 - data); 775 + no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd); 780 776 spin_unlock_irqrestore(&sock->lock, flags); 781 777 dev_dbg(&sock->dev, "adding host\n"); 782 778 mmc_add_host(mmc); ··· 801 799 host->dev = sock; 802 800 host->clk_div = 61; 803 801 init_waitqueue_head(&host->can_eject); 804 - INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host); 805 - INIT_WORK(&host->abort_handler, tifm_sd_abort, host); 802 + INIT_WORK(&host->cmd_handler, tifm_sd_register_host); 803 + INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort); 806 804 807 805 tifm_set_drvdata(sock, mmc); 808 806 sock->signal_irq = tifm_sd_signal_irq;
+14 -12
drivers/net/8139too.c
··· 594 594 u32 rx_config; 595 595 struct rtl_extra_stats xstats; 596 596 597 - struct work_struct thread; 597 + struct delayed_work thread; 598 598 599 599 struct mii_if_info mii; 600 600 unsigned int regs_len; ··· 636 636 static void rtl8139_set_rx_mode (struct net_device *dev); 637 637 static void __set_rx_mode (struct net_device *dev); 638 638 static void rtl8139_hw_start (struct net_device *dev); 639 - static void rtl8139_thread (void *_data); 640 - static void rtl8139_tx_timeout_task(void *_data); 639 + static void rtl8139_thread (struct work_struct *work); 640 + static void rtl8139_tx_timeout_task(struct work_struct *work); 641 641 static const struct ethtool_ops rtl8139_ethtool_ops; 642 642 643 643 /* write MMIO register, with flush */ ··· 1010 1010 (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); 1011 1011 spin_lock_init (&tp->lock); 1012 1012 spin_lock_init (&tp->rx_lock); 1013 - INIT_WORK(&tp->thread, rtl8139_thread, dev); 1013 + INIT_DELAYED_WORK(&tp->thread, rtl8139_thread); 1014 1014 tp->mii.dev = dev; 1015 1015 tp->mii.mdio_read = mdio_read; 1016 1016 tp->mii.mdio_write = mdio_write; ··· 1596 1596 RTL_R8 (Config1)); 1597 1597 } 1598 1598 1599 - static void rtl8139_thread (void *_data) 1599 + static void rtl8139_thread (struct work_struct *work) 1600 1600 { 1601 - struct net_device *dev = _data; 1602 - struct rtl8139_private *tp = netdev_priv(dev); 1601 + struct rtl8139_private *tp = 1602 + container_of(work, struct rtl8139_private, thread.work); 1603 + struct net_device *dev = tp->mii.dev; 1603 1604 unsigned long thr_delay = next_tick; 1604 1605 1605 1606 if (tp->watchdog_fired) { 1606 1607 tp->watchdog_fired = 0; 1607 - rtl8139_tx_timeout_task(_data); 1608 + rtl8139_tx_timeout_task(work); 1608 1609 } else if (rtnl_trylock()) { 1609 1610 rtl8139_thread_iter (dev, tp, tp->mmio_addr); 1610 1611 rtnl_unlock (); ··· 1647 1646 /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ 1648 1647 } 1649 1648 1650 - static void rtl8139_tx_timeout_task (void *_data) 1649 + static void rtl8139_tx_timeout_task (struct work_struct *work) 1651 1650 { 1652 - struct net_device *dev = _data; 1653 - struct rtl8139_private *tp = netdev_priv(dev); 1651 + struct rtl8139_private *tp = 1652 + container_of(work, struct rtl8139_private, thread.work); 1653 + struct net_device *dev = tp->mii.dev; 1654 1654 void __iomem *ioaddr = tp->mmio_addr; 1655 1655 int i; 1656 1656 u8 tmp8; ··· 1697 1695 struct rtl8139_private *tp = netdev_priv(dev); 1698 1696 1699 1697 if (!tp->have_thread) { 1700 - INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev); 1698 + INIT_DELAYED_WORK(&tp->thread, rtl8139_tx_timeout_task); 1701 1699 schedule_delayed_work(&tp->thread, next_tick); 1702 1700 } else 1703 1701 tp->watchdog_fired = 1;
+3 -3
drivers/net/bnx2.c
··· 4339 4339 } 4340 4340 4341 4341 static void 4342 - bnx2_reset_task(void *data) 4342 + bnx2_reset_task(struct work_struct *work) 4343 4343 { 4344 - struct bnx2 *bp = data; 4344 + struct bnx2 *bp = container_of(work, struct bnx2, reset_task); 4345 4345 4346 4346 if (!netif_running(bp->dev)) 4347 4347 return; ··· 5630 5630 bp->pdev = pdev; 5631 5631 5632 5632 spin_lock_init(&bp->phy_lock); 5633 - INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); 5633 + INIT_WORK(&bp->reset_task, bnx2_reset_task); 5634 5634 5635 5635 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 5636 5636 mem_len = MB_GET_CID_ADDR(17);
+3 -3
drivers/net/cassini.c
··· 4066 4066 return 0; 4067 4067 } 4068 4068 4069 - static void cas_reset_task(void *data) 4069 + static void cas_reset_task(struct work_struct *work) 4070 4070 { 4071 - struct cas *cp = (struct cas *) data; 4071 + struct cas *cp = container_of(work, struct cas, reset_task); 4072 4072 #if 0 4073 4073 int pending = atomic_read(&cp->reset_task_pending); 4074 4074 #else ··· 5006 5006 atomic_set(&cp->reset_task_pending_spare, 0); 5007 5007 atomic_set(&cp->reset_task_pending_mtu, 0); 5008 5008 #endif 5009 - INIT_WORK(&cp->reset_task, cas_reset_task, cp); 5009 + INIT_WORK(&cp->reset_task, cas_reset_task); 5010 5010 5011 5011 /* Default link parameters */ 5012 5012 if (link_mode >= 0 && link_mode <= 6)
+1 -1
drivers/net/chelsio/common.h
··· 209 209 struct peespi *espi; 210 210 211 211 struct port_info port[MAX_NPORTS]; 212 - struct work_struct stats_update_task; 212 + struct delayed_work stats_update_task; 213 213 struct timer_list stats_update_timer; 214 214 215 215 struct semaphore mib_mutex;
+9 -7
drivers/net/chelsio/cxgb2.c
··· 927 927 * Periodic accumulation of MAC statistics. This is used only if the MAC 928 928 * does not have any other way to prevent stats counter overflow. 929 929 */ 930 - static void mac_stats_task(void *data) 930 + static void mac_stats_task(struct work_struct *work) 931 931 { 932 932 int i; 933 - struct adapter *adapter = data; 933 + struct adapter *adapter = 934 + container_of(work, struct adapter, stats_update_task.work); 934 935 935 936 for_each_port(adapter, i) { 936 937 struct port_info *p = &adapter->port[i]; ··· 952 951 /* 953 952 * Processes elmer0 external interrupts in process context. 954 953 */ 955 - static void ext_intr_task(void *data) 954 + static void ext_intr_task(struct work_struct *work) 956 955 { 957 - struct adapter *adapter = data; 956 + struct adapter *adapter = 957 + container_of(work, struct adapter, ext_intr_handler_task); 958 958 959 959 elmer0_ext_intr_handler(adapter); 960 960 ··· 1089 1087 spin_lock_init(&adapter->async_lock); 1090 1088 1091 1089 INIT_WORK(&adapter->ext_intr_handler_task, 1092 - ext_intr_task, adapter); 1093 - INIT_WORK(&adapter->stats_update_task, mac_stats_task, 1094 - adapter); 1090 + ext_intr_task); 1091 + INIT_DELAYED_WORK(&adapter->stats_update_task, 1092 + mac_stats_task); 1095 1093 #ifdef work_struct 1096 1094 init_timer(&adapter->stats_update_timer); 1097 1095 adapter->stats_update_timer.function = mac_stats_timer;
+4 -4
drivers/net/e100.c
··· 2102 2102 schedule_work(&nic->tx_timeout_task); 2103 2103 } 2104 2104 2105 - static void e100_tx_timeout_task(struct net_device *netdev) 2105 + static void e100_tx_timeout_task(struct work_struct *work) 2106 2106 { 2107 - struct nic *nic = netdev_priv(netdev); 2107 + struct nic *nic = container_of(work, struct nic, tx_timeout_task); 2108 + struct net_device *netdev = nic->netdev; 2108 2109 2109 2110 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", 2110 2111 readb(&nic->csr->scb.status)); ··· 2638 2637 nic->blink_timer.function = e100_blink_led; 2639 2638 nic->blink_timer.data = (unsigned long)nic; 2640 2639 2641 - INIT_WORK(&nic->tx_timeout_task, 2642 - (void (*)(void *))e100_tx_timeout_task, netdev); 2640 + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); 2643 2641 2644 2642 if((err = e100_alloc(nic))) { 2645 2643 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
+5 -4
drivers/net/ehea/ehea_main.c
··· 2224 2224 return ret; 2225 2225 } 2226 2226 2227 - static void ehea_reset_port(void *data) 2227 + static void ehea_reset_port(struct work_struct *work) 2228 2228 { 2229 2229 int ret; 2230 - struct net_device *dev = data; 2231 - struct ehea_port *port = netdev_priv(dev); 2230 + struct ehea_port *port = 2231 + container_of(work, struct ehea_port, reset_task); 2232 + struct net_device *dev = port->netdev; 2232 2233 2233 2234 port->resets++; 2234 2235 down(&port->port_lock); ··· 2380 2379 dev->tx_timeout = &ehea_tx_watchdog; 2381 2380 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 2382 2381 2383 - INIT_WORK(&port->reset_task, ehea_reset_port, dev); 2382 + INIT_WORK(&port->reset_task, ehea_reset_port); 2384 2383 2385 2384 ehea_set_ethtool_ops(dev); 2386 2385
+9 -5
drivers/net/hamradio/baycom_epp.c
··· 168 168 int magic; 169 169 170 170 struct pardevice *pdev; 171 + struct net_device *dev; 171 172 unsigned int work_running; 172 - struct work_struct run_work; 173 + struct delayed_work run_work; 173 174 unsigned int modem; 174 175 unsigned int bitrate; 175 176 unsigned char stat; ··· 660 659 #define GETTICK(x) 661 660 #endif /* __i386__ */ 662 661 663 - static void epp_bh(struct net_device *dev) 662 + static void epp_bh(struct work_struct *work) 664 663 { 664 + struct net_device *dev; 665 665 struct baycom_state *bc; 666 666 struct parport *pp; 667 667 unsigned char stat; 668 668 unsigned char tmp[2]; 669 669 unsigned int time1 = 0, time2 = 0, time3 = 0; 670 670 int cnt, cnt2; 671 - 672 - bc = netdev_priv(dev); 671 + 672 + bc = container_of(work, struct baycom_state, run_work.work); 673 + dev = bc->dev; 673 674 if (!bc->work_running) 674 675 return; 675 676 baycom_int_freq(bc); ··· 892 889 return -EBUSY; 893 890 } 894 891 dev->irq = /*pp->irq*/ 0; 895 - INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev); 892 + INIT_DELAYED_WORK(&bc->run_work, epp_bh); 896 893 bc->work_running = 1; 897 894 bc->modem = EPP_CONVENTIONAL; 898 895 if (eppconfig(bc)) ··· 1216 1213 /* 1217 1214 * initialize part of the baycom_state struct 1218 1215 */ 1216 + bc->dev = dev; 1219 1217 bc->magic = BAYCOM_MAGIC; 1220 1218 bc->cfg.fclk = 19666600; 1221 1219 bc->cfg.bps = 9600;
+3 -3
drivers/net/irda/mcs7780.c
··· 560 560 return ret; 561 561 } 562 562 563 - static void mcs_speed_work(void *arg) 563 + static void mcs_speed_work(struct work_struct *work) 564 564 { 565 - struct mcs_cb *mcs = arg; 565 + struct mcs_cb *mcs = container_of(work, struct mcs_cb, work); 566 566 struct net_device *netdev = mcs->netdev; 567 567 568 568 mcs_speed_change(mcs); ··· 927 927 irda_qos_bits_to_value(&mcs->qos); 928 928 929 929 /* Speed change work initialisation*/ 930 - INIT_WORK(&mcs->work, mcs_speed_work, mcs); 930 + INIT_WORK(&mcs->work, mcs_speed_work); 931 931 932 932 /* Override the network functions we need to use */ 933 933 ndev->hard_start_xmit = mcs_hard_xmit;
+1 -1
drivers/net/irda/sir-dev.h
··· 22 22 23 23 struct sir_fsm { 24 24 struct semaphore sem; 25 - struct work_struct work; 25 + struct delayed_work work; 26 26 unsigned state, substate; 27 27 int param; 28 28 int result;
+4 -4
drivers/net/irda/sir_dev.c
··· 100 100 * Both must be unlocked/restarted on completion - but only on final exit. 101 101 */ 102 102 103 - static void sirdev_config_fsm(void *data) 103 + static void sirdev_config_fsm(struct work_struct *work) 104 104 { 105 - struct sir_dev *dev = data; 105 + struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work); 106 106 struct sir_fsm *fsm = &dev->fsm; 107 107 int next_state; 108 108 int ret = -1; ··· 309 309 fsm->param = param; 310 310 fsm->result = 0; 311 311 312 - INIT_WORK(&fsm->work, sirdev_config_fsm, dev); 313 - queue_work(irda_sir_wq, &fsm->work); 312 + INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm); 313 + queue_delayed_work(irda_sir_wq, &fsm->work, 0); 314 314 return 0; 315 315 } 316 316
+7 -5
drivers/net/iseries_veth.c
··· 166 166 167 167 struct veth_lpar_connection { 168 168 HvLpIndex remote_lp; 169 - struct work_struct statemachine_wq; 169 + struct delayed_work statemachine_wq; 170 170 struct veth_msg *msgs; 171 171 int num_events; 172 172 struct veth_cap_data local_caps; ··· 456 456 457 457 static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx) 458 458 { 459 - schedule_work(&cnx->statemachine_wq); 459 + schedule_delayed_work(&cnx->statemachine_wq, 0); 460 460 } 461 461 462 462 static void veth_take_cap(struct veth_lpar_connection *cnx, ··· 638 638 } 639 639 640 640 /* FIXME: The gotos here are a bit dubious */ 641 - static void veth_statemachine(void *p) 641 + static void veth_statemachine(struct work_struct *work) 642 642 { 643 - struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p; 643 + struct veth_lpar_connection *cnx = 644 + container_of(work, struct veth_lpar_connection, 645 + statemachine_wq.work); 644 646 int rlp = cnx->remote_lp; 645 647 int rc; 646 648 ··· 829 827 830 828 cnx->remote_lp = rlp; 831 829 spin_lock_init(&cnx->lock); 832 - INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx); 830 + INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine); 833 831 834 832 init_timer(&cnx->ack_timer); 835 833 cnx->ack_timer.function = veth_timed_ack;
+5 -5
drivers/net/ixgb/ixgb_main.c
··· 106 106 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter); 107 107 void ixgb_set_ethtool_ops(struct net_device *netdev); 108 108 static void ixgb_tx_timeout(struct net_device *dev); 109 - static void ixgb_tx_timeout_task(struct net_device *dev); 109 + static void ixgb_tx_timeout_task(struct work_struct *work); 110 110 static void ixgb_vlan_rx_register(struct net_device *netdev, 111 111 struct vlan_group *grp); 112 112 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); ··· 489 489 adapter->watchdog_timer.function = &ixgb_watchdog; 490 490 adapter->watchdog_timer.data = (unsigned long)adapter; 491 491 492 - INIT_WORK(&adapter->tx_timeout_task, 493 - (void (*)(void *))ixgb_tx_timeout_task, netdev); 492 + INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); 494 493 495 494 strcpy(netdev->name, "eth%d"); 496 495 if((err = register_netdev(netdev))) ··· 1492 1493 } 1493 1494 1494 1495 static void 1495 - ixgb_tx_timeout_task(struct net_device *netdev) 1496 + ixgb_tx_timeout_task(struct work_struct *work) 1496 1497 { 1497 - struct ixgb_adapter *adapter = netdev_priv(netdev); 1498 + struct ixgb_adapter *adapter = 1499 + container_of(work, struct ixgb_adapter, tx_timeout_task); 1498 1500 1499 1501 adapter->tx_timeout_count++; 1500 1502 ixgb_down(adapter, TRUE);
+4 -3
drivers/net/myri10ge/myri10ge.c
··· 2615 2615 * This watchdog is used to check whether the board has suffered 2616 2616 * from a parity error and needs to be recovered. 2617 2617 */ 2618 - static void myri10ge_watchdog(void *arg) 2618 + static void myri10ge_watchdog(struct work_struct *work) 2619 2619 { 2620 - struct myri10ge_priv *mgp = arg; 2620 + struct myri10ge_priv *mgp = 2621 + container_of(work, struct myri10ge_priv, watchdog_work); 2621 2622 u32 reboot; 2622 2623 int status; 2623 2624 u16 cmd, vendor; ··· 2888 2887 (unsigned long)mgp); 2889 2888 2890 2889 SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); 2891 - INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp); 2890 + INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog); 2892 2891 status = register_netdev(netdev); 2893 2892 if (status != 0) { 2894 2893 dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
+6 -4
drivers/net/ns83820.c
··· 427 427 u8 __iomem *base; 428 428 429 429 struct pci_dev *pci_dev; 430 + struct net_device *ndev; 430 431 431 432 #ifdef NS83820_VLAN_ACCEL_SUPPORT 432 433 struct vlan_group *vlgrp; ··· 632 631 } 633 632 634 633 /* REFILL */ 635 - static inline void queue_refill(void *_dev) 634 + static inline void queue_refill(struct work_struct *work) 636 635 { 637 - struct net_device *ndev = _dev; 638 - struct ns83820 *dev = PRIV(ndev); 636 + struct ns83820 *dev = container_of(work, struct ns83820, tq_refill); 637 + struct net_device *ndev = dev->ndev; 639 638 640 639 rx_refill(ndev, GFP_KERNEL); 641 640 if (dev->rx_info.up) ··· 1842 1841 1843 1842 ndev = alloc_etherdev(sizeof(struct ns83820)); 1844 1843 dev = PRIV(ndev); 1844 + dev->ndev = ndev; 1845 1845 err = -ENOMEM; 1846 1846 if (!dev) 1847 1847 goto out; ··· 1855 1853 SET_MODULE_OWNER(ndev); 1856 1854 SET_NETDEV_DEV(ndev, &pci_dev->dev); 1857 1855 1858 - INIT_WORK(&dev->tq_refill, queue_refill, ndev); 1856 + INIT_WORK(&dev->tq_refill, queue_refill); 1859 1857 tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev); 1860 1858 1861 1859 err = pci_enable_device(pci_dev);
+8 -4
drivers/net/pcmcia/xirc2ps_cs.c
··· 332 332 */ 333 333 334 334 typedef struct local_info_t { 335 + struct net_device *dev; 335 336 struct pcmcia_device *p_dev; 336 337 dev_node_t node; 337 338 struct net_device_stats stats; ··· 354 353 */ 355 354 static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); 356 355 static void do_tx_timeout(struct net_device *dev); 357 - static void xirc2ps_tx_timeout_task(void *data); 356 + static void xirc2ps_tx_timeout_task(struct work_struct *work); 358 357 static struct net_device_stats *do_get_stats(struct net_device *dev); 359 358 static void set_addresses(struct net_device *dev); 360 359 static void set_multicast_list(struct net_device *dev); ··· 568 567 if (!dev) 569 568 return -ENOMEM; 570 569 local = netdev_priv(dev); 570 + local->dev = dev; 571 571 local->p_dev = link; 572 572 link->priv = dev; 573 573 ··· 593 591 #ifdef HAVE_TX_TIMEOUT 594 592 dev->tx_timeout = do_tx_timeout; 595 593 dev->watchdog_timeo = TX_TIMEOUT; 596 - INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev); 594 + INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task); 597 595 #endif 598 596 599 597 return xirc2ps_config(link); ··· 1346 1344 /*====================================================================*/ 1347 1345 1348 1346 static void 1349 - xirc2ps_tx_timeout_task(void *data) 1347 + xirc2ps_tx_timeout_task(struct work_struct *work) 1350 1348 { 1351 - struct net_device *dev = data; 1349 + local_info_t *local = 1350 + container_of(work, local_info_t, tx_timeout_task); 1351 + struct net_device *dev = local->dev; 1352 1352 /* reset the card */ 1353 1353 do_reset(dev,1); 1354 1354 dev->trans_start = jiffies;
+5 -4
drivers/net/phy/phy.c
··· 394 394 EXPORT_SYMBOL(phy_start_aneg); 395 395 396 396 397 - static void phy_change(void *data); 397 + static void phy_change(struct work_struct *work); 398 398 static void phy_timer(unsigned long data); 399 399 400 400 /* phy_start_machine: ··· 549 549 { 550 550 int err = 0; 551 551 552 - INIT_WORK(&phydev->phy_queue, phy_change, phydev); 552 + INIT_WORK(&phydev->phy_queue, phy_change); 553 553 554 554 if (request_irq(phydev->irq, phy_interrupt, 555 555 IRQF_SHARED, ··· 585 585 586 586 587 587 /* Scheduled by the phy_interrupt/timer to handle PHY changes */ 588 - static void phy_change(void *data) 588 + static void phy_change(struct work_struct *work) 589 589 { 590 590 int err; 591 - struct phy_device *phydev = data; 591 + struct phy_device *phydev = 592 + container_of(work, struct phy_device, phy_queue); 592 593 593 594 err = phy_disable_interrupts(phydev); 594 595
+21 -17
drivers/net/plip.c
··· 138 138 #define PLIP_NIBBLE_WAIT 3000 139 139 140 140 /* Bottom halves */ 141 - static void plip_kick_bh(struct net_device *dev); 142 - static void plip_bh(struct net_device *dev); 143 - static void plip_timer_bh(struct net_device *dev); 141 + static void plip_kick_bh(struct work_struct *work); 142 + static void plip_bh(struct work_struct *work); 143 + static void plip_timer_bh(struct work_struct *work); 144 144 145 145 /* Interrupt handler */ 146 146 static void plip_interrupt(int irq, void *dev_id); ··· 207 207 208 208 struct net_local { 209 209 struct net_device_stats enet_stats; 210 + struct net_device *dev; 210 211 struct work_struct immediate; 211 - struct work_struct deferred; 212 - struct work_struct timer; 212 + struct delayed_work deferred; 213 + struct delayed_work timer; 213 214 struct plip_local snd_data; 214 215 struct plip_local rcv_data; 215 216 struct pardevice *pardev; ··· 307 306 nl->nibble = PLIP_NIBBLE_WAIT; 308 307 309 308 /* Initialize task queue structures */ 310 - INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev); 311 - INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev); 309 + INIT_WORK(&nl->immediate, plip_bh); 310 + INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh); 312 311 313 312 if (dev->irq == -1) 314 - INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev); 313 + INIT_DELAYED_WORK(&nl->timer, plip_timer_bh); 315 314 316 315 spin_lock_init(&nl->lock); 317 316 } ··· 320 319 This routine is kicked by do_timer(). 321 320 Request `plip_bh' to be invoked. */ 322 321 static void 323 - plip_kick_bh(struct net_device *dev) 322 + plip_kick_bh(struct work_struct *work) 324 323 { 325 - struct net_local *nl = netdev_priv(dev); 324 + struct net_local *nl = 325 + container_of(work, struct net_local, deferred.work); 326 326 327 327 if (nl->is_deferred) 328 328 schedule_work(&nl->immediate); ··· 364 362 365 363 /* Bottom half handler of PLIP. */ 366 364 static void 367 - plip_bh(struct net_device *dev) 365 + plip_bh(struct work_struct *work) 368 366 { 369 - struct net_local *nl = netdev_priv(dev); 367 + struct net_local *nl = container_of(work, struct net_local, immediate); 370 368 struct plip_local *snd = &nl->snd_data; 371 369 struct plip_local *rcv = &nl->rcv_data; 372 370 plip_func f; ··· 374 372 375 373 nl->is_deferred = 0; 376 374 f = connection_state_table[nl->connection]; 377 - if ((r = (*f)(dev, nl, snd, rcv)) != OK 378 - && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) { 375 + if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK 376 + && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) { 379 377 nl->is_deferred = 1; 380 378 schedule_delayed_work(&nl->deferred, 1); 381 379 } 382 380 } 383 381 384 382 static void 385 - plip_timer_bh(struct net_device *dev) 383 + plip_timer_bh(struct work_struct *work) 386 384 { 387 - struct net_local *nl = netdev_priv(dev); 385 + struct net_local *nl = 386 + container_of(work, struct net_local, timer.work); 388 387 389 388 if (!(atomic_read (&nl->kill_timer))) { 390 - plip_interrupt (-1, dev); 389 + plip_interrupt (-1, nl->dev); 391 390 392 391 schedule_delayed_work(&nl->timer, 1); 393 392 } ··· 1287 1284 } 1288 1285 1289 1286 nl = netdev_priv(dev); 1287 + nl->dev = dev; 1290 1288 nl->pardev = parport_register_device(port, name, plip_preempt, 1291 1289 plip_wakeup, plip_interrupt, 1292 1290 0, dev);
+12 -8
drivers/net/qla3xxx.c
··· 2008 2008 "%s: Another function issued a reset to the " 2009 2009 "chip. ISR value = %x.\n", ndev->name, value); 2010 2010 } 2011 - queue_work(qdev->workqueue, &qdev->reset_work); 2011 + queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2012 2012 spin_unlock(&qdev->adapter_lock); 2013 2013 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2014 2014 ql_disable_interrupts(qdev); ··· 3182 3182 /* 3183 3183 * Wake up the worker to process this event. 3184 3184 */ 3185 - queue_work(qdev->workqueue, &qdev->tx_timeout_work); 3185 + queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); 3186 3186 } 3187 3187 3188 - static void ql_reset_work(struct ql3_adapter *qdev) 3188 + static void ql_reset_work(struct work_struct *work) 3189 3189 { 3190 + struct ql3_adapter *qdev = 3191 + container_of(work, struct ql3_adapter, reset_work.work); 3190 3192 struct net_device *ndev = qdev->ndev; 3191 3193 u32 value; 3192 3194 struct ql_tx_buf_cb *tx_cb; ··· 3280 3278 } 3281 3279 } 3282 3280 3283 - static void ql_tx_timeout_work(struct ql3_adapter *qdev) 3281 + static void ql_tx_timeout_work(struct work_struct *work) 3284 3282 { 3285 - ql_cycle_adapter(qdev,QL_DO_RESET); 3283 + struct ql3_adapter *qdev = 3284 + container_of(work, struct ql3_adapter, tx_timeout_work.work); 3285 + 3286 + ql_cycle_adapter(qdev, QL_DO_RESET); 3286 3287 } 3287 3288 3288 3289 static void ql_get_board_info(struct ql3_adapter *qdev) ··· 3464 3459 netif_stop_queue(ndev); 3465 3460 3466 3461 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3467 - INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev); 3468 - INIT_WORK(&qdev->tx_timeout_work, 3469 - (void (*)(void *))ql_tx_timeout_work, qdev); 3462 + INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); 3463 + INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); 3470 3464 3471 3465 init_timer(&qdev->adapter_timer); 3472 3466 qdev->adapter_timer.function = ql3xxx_timer;
+2 -2
drivers/net/qla3xxx.h
··· 1186 1186 u32 numPorts; 1187 1187 struct net_device_stats stats; 1188 1188 struct workqueue_struct *workqueue; 1189 - struct work_struct reset_work; 1190 - struct work_struct tx_timeout_work; 1189 + struct delayed_work reset_work; 1190 + struct delayed_work tx_timeout_work; 1191 1191 u32 max_frame_size; 1192 1192 }; 1193 1193
+14 -9
drivers/net/r8169.c
··· 424 424 struct rtl8169_private { 425 425 void __iomem *mmio_addr; /* memory map physical address */ 426 426 struct pci_dev *pci_dev; /* Index of PCI device */ 427 + struct net_device *dev; 427 428 struct net_device_stats stats; /* statistics of net device */ 428 429 spinlock_t lock; /* spin lock flag */ 429 430 u32 msg_enable; ··· 456 455 void (*phy_reset_enable)(void __iomem *); 457 456 unsigned int (*phy_reset_pending)(void __iomem *); 458 457 unsigned int (*link_ok)(void __iomem *); 459 - struct work_struct task; 458 + struct delayed_work task; 460 459 unsigned wol_enabled : 1; 461 460 }; 462 461 ··· 1493 1492 SET_MODULE_OWNER(dev); 1494 1493 SET_NETDEV_DEV(dev, &pdev->dev); 1495 1494 tp = netdev_priv(dev); 1495 + tp->dev = dev; 1496 1496 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); 1497 1497 1498 1498 /* enable device (incl. PCI PM wakeup and hotplug setup) */ ··· 1766 1764 if (retval < 0) 1767 1765 goto err_free_rx; 1768 1766 1769 - INIT_WORK(&tp->task, NULL, dev); 1767 + INIT_DELAYED_WORK(&tp->task, NULL); 1770 1768 1771 1769 rtl8169_hw_start(dev); 1772 1770 ··· 2089 2087 tp->cur_tx = tp->dirty_tx = 0; 2090 2088 } 2091 2089 2092 - static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *)) 2090 + static void rtl8169_schedule_work(struct net_device *dev, work_func_t task) 2093 2091 { 2094 2092 struct rtl8169_private *tp = netdev_priv(dev); 2095 2093 2096 - PREPARE_WORK(&tp->task, task, dev); 2094 + PREPARE_DELAYED_WORK(&tp->task, task); 2097 2095 schedule_delayed_work(&tp->task, 4); 2098 2096 } 2099 2097 ··· 2112 2110 netif_poll_enable(dev); 2113 2111 } 2114 2112 2115 - static void rtl8169_reinit_task(void *_data) 2113 + static void rtl8169_reinit_task(struct work_struct *work) 2116 2114 { 2117 - struct net_device *dev = _data; 2115 + struct rtl8169_private *tp = 2116 + container_of(work, struct rtl8169_private, task.work); 2117 + struct net_device *dev = tp->dev; 2118 2118 int ret; 2119 2119 2120 2120 if (netif_running(dev)) { ··· 2139 2135 } 2140 2136 } 2141 2137 2142 - static void rtl8169_reset_task(void *_data) 2138 + static void rtl8169_reset_task(struct work_struct *work) 2143 2139 { 2144 - struct net_device *dev = _data; 2145 - struct rtl8169_private *tp = netdev_priv(dev); 2140 + struct rtl8169_private *tp = 2141 + container_of(work, struct rtl8169_private, task.work); 2142 + struct net_device *dev = tp->dev; 2146 2143 2147 2144 if (!netif_running(dev)) 2148 2145 return;
+7 -9
drivers/net/s2io.c
··· 5872 5872 * Description: Sets the link status for the adapter 5873 5873 */ 5874 5874 5875 - static void s2io_set_link(unsigned long data) 5875 + static void s2io_set_link(struct work_struct *work) 5876 5876 { 5877 - nic_t *nic = (nic_t *) data; 5877 + nic_t *nic = container_of(work, nic_t, set_link_task); 5878 5878 struct net_device *dev = nic->dev; 5879 5879 XENA_dev_config_t __iomem *bar0 = nic->bar0; 5880 5880 register u64 val64; ··· 6379 6379 * spin lock. 6380 6380 */ 6381 6381 6382 - static void s2io_restart_nic(unsigned long data) 6382 + static void s2io_restart_nic(struct work_struct *work) 6383 6383 { 6384 - struct net_device *dev = (struct net_device *) data; 6385 - nic_t *sp = dev->priv; 6384 + nic_t *sp = container_of(work, nic_t, rst_timer_task); 6385 + struct net_device *dev = sp->dev; 6386 6386 6387 6387 s2io_card_down(sp); 6388 6388 if (s2io_card_up(sp)) { ··· 6992 6992 6993 6993 dev->tx_timeout = &s2io_tx_watchdog; 6994 6994 dev->watchdog_timeo = WATCH_DOG_TIMEOUT; 6995 - INIT_WORK(&sp->rst_timer_task, 6996 - (void (*)(void *)) s2io_restart_nic, dev); 6997 - INIT_WORK(&sp->set_link_task, 6998 - (void (*)(void *)) s2io_set_link, sp); 6995 + INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); 6996 + INIT_WORK(&sp->set_link_task, s2io_set_link); 6999 6997 7000 6998 pci_save_state(sp->pdev); 7001 6999
+1 -1
drivers/net/s2io.h
··· 1000 1000 static irqreturn_t s2io_isr(int irq, void *dev_id); 1001 1001 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); 1002 1002 static const struct ethtool_ops netdev_ethtool_ops; 1003 - static void s2io_set_link(unsigned long data); 1003 + static void s2io_set_link(struct work_struct *work); 1004 1004 static int s2io_set_swapper(nic_t * sp); 1005 1005 static void s2io_card_down(nic_t *nic); 1006 1006 static int s2io_card_up(nic_t *nic);
+8 -5
drivers/net/sis190.c
··· 280 280 struct sis190_private { 281 281 void __iomem *mmio_addr; 282 282 struct pci_dev *pci_dev; 283 + struct net_device *dev; 283 284 struct net_device_stats stats; 284 285 spinlock_t lock; 285 286 u32 rx_buf_sz; ··· 898 897 netif_start_queue(dev); 899 898 } 900 899 901 - static void sis190_phy_task(void * data) 900 + static void sis190_phy_task(struct work_struct *work) 902 901 { 903 - struct net_device *dev = data; 904 - struct sis190_private *tp = netdev_priv(dev); 902 + struct sis190_private *tp = 903 + container_of(work, struct sis190_private, phy_task); 904 + struct net_device *dev = tp->dev; 905 905 void __iomem *ioaddr = tp->mmio_addr; 906 906 int phy_id = tp->mii_if.phy_id; 907 907 u16 val; ··· 1049 1047 if (rc < 0) 1050 1048 goto err_free_rx_1; 1051 1049 1052 - INIT_WORK(&tp->phy_task, sis190_phy_task, dev); 1050 + INIT_WORK(&tp->phy_task, sis190_phy_task); 1053 1051 1054 1052 sis190_request_timer(dev); 1055 1053 ··· 1438 1436 SET_NETDEV_DEV(dev, &pdev->dev); 1439 1437 1440 1438 tp = netdev_priv(dev); 1439 + tp->dev = dev; 1441 1440 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); 1442 1441 1443 1442 rc = pci_enable_device(pdev); ··· 1801 1798 1802 1799 sis190_init_rxfilter(dev); 1803 1800 1804 - INIT_WORK(&tp->phy_task, sis190_phy_task, dev); 1801 + INIT_WORK(&tp->phy_task, sis190_phy_task); 1805 1802 1806 1803 dev->open = sis190_open; 1807 1804 dev->stop = sis190_close;
+8 -7
drivers/net/skge.c
··· 1327 1327 * Since internal PHY is wired to a level triggered pin, can't 1328 1328 * get an interrupt when carrier is detected. 1329 1329 */ 1330 - static void xm_link_timer(void *arg) 1330 + static void xm_link_timer(struct work_struct *work) 1331 1331 { 1332 - struct net_device *dev = arg; 1333 - struct skge_port *skge = netdev_priv(arg); 1332 + struct skge_port *skge = 1333 + container_of(work, struct skge_port, link_thread.work); 1334 + struct net_device *dev = skge->netdev; 1334 1335 struct skge_hw *hw = skge->hw; 1335 1336 int port = skge->port; 1336 1337 ··· 3073 3072 * because accessing phy registers requires spin wait which might 3074 3073 * cause excess interrupt latency. 3075 3074 */ 3076 - static void skge_extirq(void *arg) 3075 + static void skge_extirq(struct work_struct *work) 3077 3076 { 3078 - struct skge_hw *hw = arg; 3077 + struct skge_hw *hw = container_of(work, struct skge_hw, phy_work); 3079 3078 int port; 3080 3079 3081 3080 mutex_lock(&hw->phy_mutex); ··· 3457 3456 skge->port = port; 3458 3457 3459 3458 /* Only used for Genesis XMAC */ 3460 - INIT_WORK(&skge->link_thread, xm_link_timer, dev); 3459 + INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer); 3461 3460 3462 3461 if (hw->chip_id != CHIP_ID_GENESIS) { 3463 3462 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; ··· 3544 3543 3545 3544 hw->pdev = pdev; 3546 3545 mutex_init(&hw->phy_mutex); 3547 - INIT_WORK(&hw->phy_work, skge_extirq, hw); 3546 + INIT_WORK(&hw->phy_work, skge_extirq); 3548 3547 spin_lock_init(&hw->hw_lock); 3549 3548 3550 3549 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+1 -1
drivers/net/skge.h
··· 2456 2456 2457 2457 struct net_device_stats net_stats; 2458 2458 2459 - struct work_struct link_thread; 2459 + struct delayed_work link_thread; 2460 2460 enum pause_control flow_control; 2461 2461 enum pause_status flow_status; 2462 2462 u8 rx_csum;
+5 -4
drivers/net/spider_net.c
··· 1945 1945 * called as task when tx hangs, resets interface (if interface is up) 1946 1946 */ 1947 1947 static void 1948 - spider_net_tx_timeout_task(void *data) 1948 + spider_net_tx_timeout_task(struct work_struct *work) 1949 1949 { 1950 - struct net_device *netdev = data; 1951 - struct spider_net_card *card = netdev_priv(netdev); 1950 + struct spider_net_card *card = 1951 + container_of(work, struct spider_net_card, tx_timeout_task); 1952 + struct net_device *netdev = card->netdev; 1952 1953 1953 1954 if (!(netdev->flags & IFF_UP)) 1954 1955 goto out; ··· 2123 2122 card = netdev_priv(netdev); 2124 2123 card->netdev = netdev; 2125 2124 card->msg_enable = SPIDER_NET_DEFAULT_MSG; 2126 - INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev); 2125 + INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task); 2127 2126 init_waitqueue_head(&card->waitq); 2128 2127 atomic_set(&card->tx_timeout_task_counter, 0); 2129 2128
+3 -3
drivers/net/sungem.c
··· 2281 2281 } 2282 2282 } 2283 2283 2284 - static void gem_reset_task(void *data) 2284 + static void gem_reset_task(struct work_struct *work) 2285 2285 { 2286 - struct gem *gp = (struct gem *) data; 2286 + struct gem *gp = container_of(work, struct gem, reset_task); 2287 2287 2288 2288 mutex_lock(&gp->pm_mutex); 2289 2289 ··· 3043 3043 gp->link_timer.function = gem_link_timer; 3044 3044 gp->link_timer.data = (unsigned long) gp; 3045 3045 3046 - INIT_WORK(&gp->reset_task, gem_reset_task, gp); 3046 + INIT_WORK(&gp->reset_task, gem_reset_task); 3047 3047 3048 3048 gp->lstate = link_down; 3049 3049 gp->timer_ticks = 0;
+3 -3
drivers/net/tg3.c
··· 3654 3654 } 3655 3655 #endif 3656 3656 3657 - static void tg3_reset_task(void *_data) 3657 + static void tg3_reset_task(struct work_struct *work) 3658 3658 { 3659 - struct tg3 *tp = _data; 3659 + struct tg3 *tp = container_of(work, struct tg3, reset_task); 3660 3660 unsigned int restart_timer; 3661 3661 3662 3662 tg3_full_lock(tp, 0); ··· 11734 11734 #endif 11735 11735 spin_lock_init(&tp->lock); 11736 11736 spin_lock_init(&tp->indirect_lock); 11737 - INIT_WORK(&tp->reset_task, tg3_reset_task, tp); 11737 + INIT_WORK(&tp->reset_task, tg3_reset_task); 11738 11738 11739 11739 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); 11740 11740 if (tp->regs == 0UL) {
+22 -1
drivers/net/tlan.c
··· 296 296 static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); 297 297 static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent); 298 298 static void TLan_tx_timeout( struct net_device *dev); 299 + static void TLan_tx_timeout_work(struct work_struct *work); 299 300 static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); 300 301 301 302 static u32 TLan_HandleInvalid( struct net_device *, u16 ); ··· 563 562 priv = netdev_priv(dev); 564 563 565 564 priv->pciDev = pdev; 565 + priv->dev = dev; 566 566 567 567 /* Is this a PCI device? */ 568 568 if (pdev) { ··· 636 634 637 635 /* This will be used when we get an adapter error from 638 636 * within our irq handler */ 639 - INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev); 637 + INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work); 640 638 641 639 spin_lock_init(&priv->lock); 642 640 ··· 1039 1037 dev->trans_start = jiffies; 1040 1038 netif_wake_queue( dev ); 1041 1039 1040 + } 1041 + 1042 + 1043 + /*************************************************************** 1044 + * TLan_tx_timeout_work 1045 + * 1046 + * Returns: nothing 1047 + * 1048 + * Params: 1049 + * work work item of device which timed out 1050 + * 1051 + **************************************************************/ 1052 + 1053 + static void TLan_tx_timeout_work(struct work_struct *work) 1054 + { 1055 + TLanPrivateInfo *priv = 1056 + container_of(work, TLanPrivateInfo, tlan_tqueue); 1057 + 1058 + TLan_tx_timeout(priv->dev); 1042 1059 } 1043 1060 1044 1061
+1
drivers/net/tlan.h
··· 170 170 typedef struct tlan_private_tag { 171 171 struct net_device *nextDevice; 172 172 struct pci_dev *pciDev; 173 + struct net_device *dev; 173 174 void *dmaStorage; 174 175 dma_addr_t dmaStorageDMA; 175 176 unsigned int dmaSize;
+4 -3
drivers/net/tulip/21142.c
··· 26 26 27 27 /* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list 28 28 of available transceivers. */ 29 - void t21142_media_task(void *data) 29 + void t21142_media_task(struct work_struct *work) 30 30 { 31 - struct net_device *dev = data; 32 - struct tulip_private *tp = netdev_priv(dev); 31 + struct tulip_private *tp = 32 + container_of(work, struct tulip_private, media_work); 33 + struct net_device *dev = tp->dev; 33 34 void __iomem *ioaddr = tp->base_addr; 34 35 int csr12 = ioread32(ioaddr + CSR12); 35 36 int next_tick = 60*HZ;
+4 -3
drivers/net/tulip/timer.c
··· 18 18 #include "tulip.h" 19 19 20 20 21 - void tulip_media_task(void *data) 21 + void tulip_media_task(struct work_struct *work) 22 22 { 23 - struct net_device *dev = data; 24 - struct tulip_private *tp = netdev_priv(dev); 23 + struct tulip_private *tp = 24 + container_of(work, struct tulip_private, media_work); 25 + struct net_device *dev = tp->dev; 25 26 void __iomem *ioaddr = tp->base_addr; 26 27 u32 csr12 = ioread32(ioaddr + CSR12); 27 28 int next_tick = 2*HZ;
+4 -3
drivers/net/tulip/tulip.h
··· 44 44 int valid_intrs; /* CSR7 interrupt enable settings */ 45 45 int flags; 46 46 void (*media_timer) (unsigned long); 47 - void (*media_task) (void *); 47 + work_func_t media_task; 48 48 }; 49 49 50 50 ··· 392 392 int csr12_shadow; 393 393 int pad0; /* Used for 8-byte alignment */ 394 394 struct work_struct media_work; 395 + struct net_device *dev; 395 396 }; 396 397 397 398 ··· 407 406 408 407 /* 21142.c */ 409 408 extern u16 t21142_csr14[]; 410 - void t21142_media_task(void *data); 409 + void t21142_media_task(struct work_struct *work); 411 410 void t21142_start_nway(struct net_device *dev); 412 411 void t21142_lnk_change(struct net_device *dev, int csr5); 413 412 ··· 445 444 void pnic_timer(unsigned long data); 446 445 447 446 /* timer.c */ 448 - void tulip_media_task(void *data); 447 + void tulip_media_task(struct work_struct *work); 449 448 void mxic_timer(unsigned long data); 450 449 void comet_timer(unsigned long data); 451 450
+2 -1
drivers/net/tulip/tulip_core.c
··· 1367 1367 * it is zeroed and aligned in alloc_etherdev 1368 1368 */ 1369 1369 tp = netdev_priv(dev); 1370 + tp->dev = dev; 1370 1371 1371 1372 tp->rx_ring = pci_alloc_consistent(pdev, 1372 1373 sizeof(struct tulip_rx_desc) * RX_RING_SIZE + ··· 1390 1389 tp->timer.data = (unsigned long)dev; 1391 1390 tp->timer.function = tulip_tbl[tp->chip_id].media_timer; 1392 1391 1393 - INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev); 1392 + INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); 1394 1393 1395 1394 dev->base_addr = (unsigned long)ioaddr; 1396 1395
+13 -10
drivers/net/wan/pc300_tty.c
··· 125 125 static int cpc_tty_chars_in_buffer(struct tty_struct *tty); 126 126 static void cpc_tty_flush_buffer(struct tty_struct *tty); 127 127 static void cpc_tty_hangup(struct tty_struct *tty); 128 - static void cpc_tty_rx_work(void *data); 129 - static void cpc_tty_tx_work(void *data); 128 + static void cpc_tty_rx_work(struct work_struct *work); 129 + static void cpc_tty_tx_work(struct work_struct *work); 130 130 static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len); 131 131 static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx); 132 132 static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char); ··· 261 261 cpc_tty->tty_minor = port + CPC_TTY_MINOR_START; 262 262 cpc_tty->pc300dev = pc300dev; 263 263 264 - INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty); 265 - INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port); 264 + INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work); 265 + INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work); 266 266 267 267 cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL; 268 268 ··· 659 659 * o call the line disc. read 660 660 * o free memory 661 661 */ 662 - static void cpc_tty_rx_work(void * data) 662 + static void cpc_tty_rx_work(struct work_struct *work) 663 663 { 664 + st_cpc_tty_area *cpc_tty; 664 665 unsigned long port; 665 666 int i, j; 666 - st_cpc_tty_area *cpc_tty; 667 667 volatile st_cpc_rx_buf *buf; 668 668 char flags=0,flg_rx=1; 669 669 struct tty_ldisc *ld; 670 670 671 671 if (cpc_tty_cnt == 0) return; 672 - 673 672 674 673 for (i=0; (i < 4) && flg_rx ; i++) { 675 674 flg_rx = 0; 676 - port = (unsigned long)data; 675 + 676 + cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work); 677 + port = cpc_tty - cpc_tty_area; 678 + 677 679 for (j=0; j < CPC_TTY_NPORTS; j++) { 678 680 cpc_tty = &cpc_tty_area[port]; 679 681 ··· 884 882 * o if need call line discipline wakeup 885 883 * o call wake_up_interruptible 886 884 */ 887 - static void cpc_tty_tx_work(void *data) 885 + static void cpc_tty_tx_work(struct work_struct *work) 888 886 { 889 - st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data; 887 + st_cpc_tty_area *cpc_tty = 888 + container_of(work, st_cpc_tty_area, tty_tx_work); 890 889 struct tty_struct *tty; 891 890 892 891 CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name);
+1 -1
drivers/net/wireless/bcm43xx/bcm43xx.h
··· 787 787 struct tasklet_struct isr_tasklet; 788 788 789 789 /* Periodic tasks */ 790 - struct work_struct periodic_work; 790 + struct delayed_work periodic_work; 791 791 unsigned int periodic_state; 792 792 793 793 struct work_struct restart_work;
+11 -9
drivers/net/wireless/bcm43xx/bcm43xx_main.c
··· 3177 3177 return badness; 3178 3178 } 3179 3179 3180 - static void bcm43xx_periodic_work_handler(void *d) 3180 + static void bcm43xx_periodic_work_handler(struct work_struct *work) 3181 3181 { 3182 - struct bcm43xx_private *bcm = d; 3182 + struct bcm43xx_private *bcm = 3183 + container_of(work, struct bcm43xx_private, periodic_work.work); 3183 3184 struct net_device *net_dev = bcm->net_dev; 3184 3185 unsigned long flags; 3185 3186 u32 savedirqs = 0; ··· 3243 3242 3244 3243 void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm) 3245 3244 { 3246 - struct work_struct *work = &(bcm->periodic_work); 3245 + struct delayed_work *work = &bcm->periodic_work; 3247 3246 3248 3247 assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED); 3249 - INIT_WORK(work, bcm43xx_periodic_work_handler, bcm); 3250 - schedule_work(work); 3248 + INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler); 3249 + schedule_delayed_work(work, 0); 3251 3250 } 3252 3251 3253 3252 static void bcm43xx_security_init(struct bcm43xx_private *bcm) ··· 3599 3598 bcm43xx_periodic_tasks_setup(bcm); 3600 3599 3601 3600 /*FIXME: This should be handled by softmac instead. */ 3602 - schedule_work(&bcm->softmac->associnfo.work); 3601 + schedule_delayed_work(&bcm->softmac->associnfo.work, 0); 3603 3602 3604 3603 out: 3605 3604 mutex_unlock(&(bcm)->mutex); ··· 4150 4149 /* Hard-reset the chip. Do not call this directly. 4151 4150 * Use bcm43xx_controller_restart() 4152 4151 */ 4153 - static void bcm43xx_chip_reset(void *_bcm) 4152 + static void bcm43xx_chip_reset(struct work_struct *work) 4154 4153 { 4155 - struct bcm43xx_private *bcm = _bcm; 4154 + struct bcm43xx_private *bcm = 4155 + container_of(work, struct bcm43xx_private, restart_work); 4156 4156 struct bcm43xx_phyinfo *phy; 4157 4157 int err = -ENODEV; 4158 4158 ··· 4180 4178 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) 4181 4179 return; 4182 4180 printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason); 4183 - INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm); 4181 + INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset); 4184 4182 schedule_work(&bcm->restart_work); 4185 4183 } 4186 4184
+1 -1
drivers/net/wireless/hostap/hostap.h
··· 35 35 struct net_device_stats *hostap_get_stats(struct net_device *dev); 36 36 void hostap_setup_dev(struct net_device *dev, local_info_t *local, 37 37 int main_dev); 38 - void hostap_set_multicast_list_queue(void *data); 38 + void hostap_set_multicast_list_queue(struct work_struct *work); 39 39 int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked); 40 40 int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked); 41 41 void hostap_cleanup(local_info_t *local);
+11 -8
drivers/net/wireless/hostap/hostap_ap.c
··· 49 49 static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta); 50 50 static void hostap_event_expired_sta(struct net_device *dev, 51 51 struct sta_info *sta); 52 - static void handle_add_proc_queue(void *data); 52 + static void handle_add_proc_queue(struct work_struct *work); 53 53 54 54 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 55 - static void handle_wds_oper_queue(void *data); 55 + static void handle_wds_oper_queue(struct work_struct *work); 56 56 static void prism2_send_mgmt(struct net_device *dev, 57 57 u16 type_subtype, char *body, 58 58 int body_len, u8 *addr, u16 tx_cb_idx); ··· 807 807 INIT_LIST_HEAD(&ap->sta_list); 808 808 809 809 /* Initialize task queue structure for AP management */ 810 - INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap); 810 + INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue); 811 811 812 812 ap->tx_callback_idx = 813 813 hostap_tx_callback_register(local, hostap_ap_tx_cb, ap); ··· 815 815 printk(KERN_WARNING "%s: failed to register TX callback for " 816 816 "AP\n", local->dev->name); 817 817 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 818 - INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local); 818 + INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue); 819 819 820 820 ap->tx_callback_auth = 821 821 hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap); ··· 1062 1062 } 1063 1063 1064 1064 1065 - static void handle_add_proc_queue(void *data) 1065 + static void handle_add_proc_queue(struct work_struct *work) 1066 1066 { 1067 - struct ap_data *ap = (struct ap_data *) data; 1067 + struct ap_data *ap = container_of(work, struct ap_data, 1068 + add_sta_proc_queue); 1068 1069 struct sta_info *sta; 1069 1070 char name[20]; 1070 1071 struct add_sta_proc_data *entry, *prev; ··· 1953 1952 1954 1953 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 1955 1954 1956 - static void handle_wds_oper_queue(void *data) 1955 + static void handle_wds_oper_queue(struct work_struct *work) 1957 1956 { 1958 - local_info_t *local = data; 1957 + struct ap_data *ap = container_of(work, struct ap_data, 1958 + wds_oper_queue); 1959 + local_info_t *local = ap->local; 1959 1960 struct wds_oper_data *entry, *prev; 1960 1961 1961 1962 spin_lock_bh(&local->lock);
+11 -10
drivers/net/wireless/hostap/hostap_hw.c
··· 1645 1645 1646 1646 /* Called only as scheduled task after noticing card timeout in interrupt 1647 1647 * context */ 1648 - static void handle_reset_queue(void *data) 1648 + static void handle_reset_queue(struct work_struct *work) 1649 1649 { 1650 - local_info_t *local = (local_info_t *) data; 1650 + local_info_t *local = container_of(work, local_info_t, reset_queue); 1651 1651 1652 1652 printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name); 1653 1653 prism2_hw_reset(local->dev); ··· 2896 2896 2897 2897 /* Called only as a scheduled task when communications quality values should 2898 2898 * be updated. */ 2899 - static void handle_comms_qual_update(void *data) 2899 + static void handle_comms_qual_update(struct work_struct *work) 2900 2900 { 2901 - local_info_t *local = data; 2901 + local_info_t *local = 2902 + container_of(work, local_info_t, comms_qual_update); 2902 2903 prism2_update_comms_qual(local->dev); 2903 2904 } 2904 2905 ··· 3051 3050 } 3052 3051 3053 3052 3054 - static void handle_set_tim_queue(void *data) 3053 + static void handle_set_tim_queue(struct work_struct *work) 3055 3054 { 3056 - local_info_t *local = (local_info_t *) data; 3055 + local_info_t *local = container_of(work, local_info_t, set_tim_queue); 3057 3056 struct set_tim_data *entry; 3058 3057 u16 val; 3059 3058 ··· 3210 3209 local->scan_channel_mask = 0xffff; 3211 3210 3212 3211 /* Initialize task queue structures */ 3213 - INIT_WORK(&local->reset_queue, handle_reset_queue, local); 3212 + INIT_WORK(&local->reset_queue, handle_reset_queue); 3214 3213 INIT_WORK(&local->set_multicast_list_queue, 3215 - hostap_set_multicast_list_queue, local->dev); 3214 + hostap_set_multicast_list_queue); 3216 3215 3217 - INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local); 3216 + INIT_WORK(&local->set_tim_queue, handle_set_tim_queue); 3218 3217 INIT_LIST_HEAD(&local->set_tim_list); 3219 3218 spin_lock_init(&local->set_tim_lock); 3220 3219 3221 - INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local); 3220 + INIT_WORK(&local->comms_qual_update, handle_comms_qual_update); 3222 3221 3223 3222 /* Initialize tasklets for handling hardware IRQ related operations 3224 3223 * outside hw IRQ handler */
+3 -3
drivers/net/wireless/hostap/hostap_info.c
··· 474 474 475 475 /* Called only as scheduled task after receiving info frames (used to avoid 476 476 * pending too much time in HW IRQ handler). */ 477 - static void handle_info_queue(void *data) 477 + static void handle_info_queue(struct work_struct *work) 478 478 { 479 - local_info_t *local = (local_info_t *) data; 479 + local_info_t *local = container_of(work, local_info_t, info_queue); 480 480 481 481 if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS, 482 482 &local->pending_info)) ··· 493 493 { 494 494 skb_queue_head_init(&local->info_list); 495 495 #ifndef PRISM2_NO_STATION_MODES 496 - INIT_WORK(&local->info_queue, handle_info_queue, local); 496 + INIT_WORK(&local->info_queue, handle_info_queue); 497 497 #endif /* PRISM2_NO_STATION_MODES */ 498 498 } 499 499
+4 -4
drivers/net/wireless/hostap/hostap_main.c
··· 767 767 768 768 /* TODO: to be further implemented as soon as Prism2 fully supports 769 769 * GroupAddresses and correct documentation is available */ 770 - void hostap_set_multicast_list_queue(void *data) 770 + void hostap_set_multicast_list_queue(struct work_struct *work) 771 771 { 772 - struct net_device *dev = (struct net_device *) data; 772 + local_info_t *local = 773 + container_of(work, local_info_t, set_multicast_list_queue); 774 + struct net_device *dev = local->dev; 773 775 struct hostap_interface *iface; 774 - local_info_t *local; 775 776 776 777 iface = netdev_priv(dev); 777 - local = iface->local; 778 778 if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE, 779 779 local->is_promisc)) { 780 780 printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
+27 -20
drivers/net/wireless/ipw2100.c
··· 316 316 struct ipw2100_fw *fw); 317 317 static int ipw2100_ucode_download(struct ipw2100_priv *priv, 318 318 struct ipw2100_fw *fw); 319 - static void ipw2100_wx_event_work(struct ipw2100_priv *priv); 319 + static void ipw2100_wx_event_work(struct work_struct *work); 320 320 static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev); 321 321 static struct iw_handler_def ipw2100_wx_handler_def; 322 322 ··· 679 679 queue_delayed_work(priv->workqueue, &priv->reset_work, 680 680 priv->reset_backoff * HZ); 681 681 else 682 - queue_work(priv->workqueue, &priv->reset_work); 682 + queue_delayed_work(priv->workqueue, &priv->reset_work, 683 + 0); 683 684 684 685 if (priv->reset_backoff < MAX_RESET_BACKOFF) 685 686 priv->reset_backoff++; ··· 1874 1873 netif_stop_queue(priv->net_dev); 1875 1874 } 1876 1875 1877 - static void ipw2100_reset_adapter(struct ipw2100_priv *priv) 1876 + static void ipw2100_reset_adapter(struct work_struct *work) 1878 1877 { 1878 + struct ipw2100_priv *priv = 1879 + container_of(work, struct ipw2100_priv, reset_work.work); 1879 1880 unsigned long flags; 1880 1881 union iwreq_data wrqu = { 1881 1882 .ap_addr = { ··· 2074 2071 return; 2075 2072 2076 2073 if (priv->status & STATUS_SECURITY_UPDATED) 2077 - queue_work(priv->workqueue, &priv->security_work); 2074 + queue_delayed_work(priv->workqueue, &priv->security_work, 0); 2078 2075 2079 - queue_work(priv->workqueue, &priv->wx_event_work); 2076 + queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0); 2080 2077 } 2081 2078 2082 2079 static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) ··· 5527 5524 return err; 5528 5525 } 5529 5526 5530 - static void ipw2100_security_work(struct ipw2100_priv *priv) 5527 + static void ipw2100_security_work(struct work_struct *work) 5531 5528 { 5529 + struct ipw2100_priv *priv = 5530 + container_of(work, struct ipw2100_priv, security_work.work); 5531 + 5532 5532 /* If we happen to have reconnected before we get a chance to 5533 5533 * process this, then update the security settings--which causes 5534 5534 * a disassociation to occur */ ··· 5754 5748 5755 5749 priv->reset_backoff = 0; 5756 5750 mutex_unlock(&priv->action_mutex); 5757 - ipw2100_reset_adapter(priv); 5751 + ipw2100_reset_adapter(&priv->reset_work.work); 5758 5752 return 0; 5759 5753 5760 5754 done: ··· 5929 5923 .get_drvinfo = ipw_ethtool_get_drvinfo, 5930 5924 }; 5931 5925 5932 - static void ipw2100_hang_check(void *adapter) 5926 + static void ipw2100_hang_check(struct work_struct *work) 5933 5927 { 5934 - struct ipw2100_priv *priv = adapter; 5928 + struct ipw2100_priv *priv = 5929 + container_of(work, struct ipw2100_priv, hang_check.work); 5935 5930 unsigned long flags; 5936 5931 u32 rtc = 0xa5a5a5a5; 5937 5932 u32 len = sizeof(rtc); ··· 5972 5965 spin_unlock_irqrestore(&priv->low_lock, flags); 5973 5966 } 5974 5967 5975 - static void ipw2100_rf_kill(void *adapter) 5968 + static void ipw2100_rf_kill(struct work_struct *work) 5976 5969 { 5977 - struct ipw2100_priv *priv = adapter; 5970 + struct ipw2100_priv *priv = 5971 + container_of(work, struct ipw2100_priv, rf_kill.work); 5978 5972 unsigned long flags; 5979 5973 5980 5974 spin_lock_irqsave(&priv->low_lock, flags); ··· 6125 6117 6126 6118 priv->workqueue = create_workqueue(DRV_NAME); 6127 6119 6128 - INIT_WORK(&priv->reset_work, 6129 - (void (*)(void *))ipw2100_reset_adapter, priv); 6130 - INIT_WORK(&priv->security_work, 6131 - (void (*)(void *))ipw2100_security_work, priv); 6132 - INIT_WORK(&priv->wx_event_work, 6133 - (void (*)(void *))ipw2100_wx_event_work, priv); 6134 - INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv); 6135 - INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv); 6120 + INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter); 6121 + INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work); 6122 + INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); 6123 + INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check); 6124 + INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill); 6136 6125 6137 6126 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 6138 6127 ipw2100_irq_tasklet, (unsigned long)priv); ··· 8295 8290 .get_wireless_stats = ipw2100_wx_wireless_stats, 8296 8291 }; 8297 8292 8298 - static void ipw2100_wx_event_work(struct ipw2100_priv *priv) 8293 + static void ipw2100_wx_event_work(struct work_struct *work) 8299 8294 { 8295 + struct ipw2100_priv *priv = 8296 + container_of(work, struct ipw2100_priv, wx_event_work.work); 8300 8297 union iwreq_data wrqu; 8301 8298 int len = ETH_ALEN; 8302 8299
+5 -5
drivers/net/wireless/ipw2100.h
··· 583 583 struct tasklet_struct irq_tasklet; 584 584 585 585 struct workqueue_struct *workqueue; 586 - struct work_struct reset_work; 587 - struct work_struct security_work; 588 - struct work_struct wx_event_work; 589 - struct work_struct hang_check; 590 - struct work_struct rf_kill; 586 + struct delayed_work reset_work; 587 + struct delayed_work security_work; 588 + struct delayed_work wx_event_work; 589 + struct delayed_work hang_check; 590 + struct delayed_work rf_kill; 591 591 592 592 u32 interrupts; 593 593 int tx_interrupts;
+127 -108
drivers/net/wireless/ipw2200.c
··· 187 187 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); 188 188 static void ipw_rx_queue_replenish(void *); 189 189 static int ipw_up(struct ipw_priv *); 190 - static void ipw_bg_up(void *); 190 + static void ipw_bg_up(struct work_struct *work); 191 191 static void ipw_down(struct ipw_priv *); 192 - static void ipw_bg_down(void *); 192 + static void ipw_bg_down(struct work_struct *work); 193 193 static int ipw_config(struct ipw_priv *); 194 194 static int init_supported_rates(struct ipw_priv *priv, 195 195 struct ipw_supported_rates *prates); ··· 862 862 spin_unlock_irqrestore(&priv->lock, flags); 863 863 } 864 864 865 - static void ipw_bg_led_link_on(void *data) 865 + static void ipw_bg_led_link_on(struct work_struct *work) 866 866 { 867 - struct ipw_priv *priv = data; 867 + struct ipw_priv *priv = 868 + container_of(work, struct ipw_priv, led_link_on.work); 868 869 mutex_lock(&priv->mutex); 869 - ipw_led_link_on(data); 870 + ipw_led_link_on(priv); 870 871 mutex_unlock(&priv->mutex); 871 872 } 872 873 ··· 907 906 spin_unlock_irqrestore(&priv->lock, flags); 908 907 } 909 908 910 - static void ipw_bg_led_link_off(void *data) 909 + static void ipw_bg_led_link_off(struct work_struct *work) 911 910 { 912 - struct ipw_priv *priv = data; 911 + struct ipw_priv *priv = 912 + container_of(work, struct ipw_priv, led_link_off.work); 913 913 mutex_lock(&priv->mutex); 914 - ipw_led_link_off(data); 914 + ipw_led_link_off(priv); 915 915 mutex_unlock(&priv->mutex); 916 916 } 917 917 ··· 987 985 spin_unlock_irqrestore(&priv->lock, flags); 988 986 } 989 987 990 - static void ipw_bg_led_activity_off(void *data) 988 + static void ipw_bg_led_activity_off(struct work_struct *work) 991 989 { 992 - struct ipw_priv *priv = data; 990 + struct ipw_priv *priv = 991 + container_of(work, struct ipw_priv, led_act_off.work); 993 992 mutex_lock(&priv->mutex); 994 - ipw_led_activity_off(data); 993 + ipw_led_activity_off(priv); 995 994 mutex_unlock(&priv->mutex); 996 995 } 997 996 ··· 2231 2228 } 2232 2229 } 2233 2230 2234 - static void ipw_bg_adapter_restart(void *data) 2231 + static void ipw_bg_adapter_restart(struct work_struct *work) 2235 2232 { 2236 - struct ipw_priv *priv = data; 2233 + struct ipw_priv *priv = 2234 + container_of(work, struct ipw_priv, adapter_restart); 2237 2235 mutex_lock(&priv->mutex); 2238 - ipw_adapter_restart(data); 2236 + ipw_adapter_restart(priv); 2239 2237 mutex_unlock(&priv->mutex); 2240 2238 } 2241 2239 ··· 2253 2249 } 2254 2250 } 2255 2251 2256 - static void ipw_bg_scan_check(void *data) 2252 + static void ipw_bg_scan_check(struct work_struct *work) 2257 2253 { 2258 - struct ipw_priv *priv = data; 2254 + struct ipw_priv *priv = 2255 + container_of(work, struct ipw_priv, scan_check.work); 2259 2256 mutex_lock(&priv->mutex); 2260 - ipw_scan_check(data); 2257 + ipw_scan_check(priv); 2261 2258 mutex_unlock(&priv->mutex); 2262 2259 } 2263 2260 ··· 3836 3831 return 1; 3837 3832 } 3838 3833 3839 - static void ipw_bg_disassociate(void *data) 3834 + static void ipw_bg_disassociate(struct work_struct *work) 3840 3835 { 3841 - struct ipw_priv *priv = data; 3836 + struct ipw_priv *priv = 3837 + container_of(work, struct ipw_priv, disassociate); 3842 3838 mutex_lock(&priv->mutex); 3843 - ipw_disassociate(data); 3839 + ipw_disassociate(priv); 3844 3840 mutex_unlock(&priv->mutex); 3845 3841 } 3846 3842 3847 - static void ipw_system_config(void *data) 3843 + static void ipw_system_config(struct work_struct *work) 3848 3844 { 3849 - struct ipw_priv *priv = data; 3845 + struct ipw_priv *priv = 3846 + container_of(work, struct ipw_priv, system_config); 3850 3847 3851 3848 #ifdef CONFIG_IPW2200_PROMISCUOUS 3852 3849 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { ··· 4215 4208 IPW_STATS_INTERVAL); 4216 4209 } 4217 4210 4218 - static void ipw_bg_gather_stats(void *data) 4211 + static void ipw_bg_gather_stats(struct work_struct *work) 4219 4212 { 4220 - struct ipw_priv *priv = data; 4213 + struct ipw_priv *priv = 4214 + container_of(work, struct ipw_priv, gather_stats.work); 4221 4215 mutex_lock(&priv->mutex); 4222 - ipw_gather_stats(data); 4216 + ipw_gather_stats(priv); 4223 4217 mutex_unlock(&priv->mutex); 4224 4218 } 4225 4219 ··· 4276 4268 if (!(priv->status & STATUS_ROAMING)) { 4277 4269 priv->status |= STATUS_ROAMING; 4278 4270 if (!(priv->status & STATUS_SCANNING)) 4279 - queue_work(priv->workqueue, 4280 - &priv->request_scan); 4271 + queue_delayed_work(priv->workqueue, 4272 + &priv->request_scan, 0); 4281 4273 } 4282 4274 return; 4283 4275 } ··· 4615 4607 #ifdef CONFIG_IPW2200_MONITOR 4616 4608 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 4617 4609 priv->status |= STATUS_SCAN_FORCED; 4618 - queue_work(priv->workqueue, 4619 - &priv->request_scan); 4610 + queue_delayed_work(priv->workqueue, 4611 + &priv->request_scan, 0); 4620 4612 break; 4621 4613 } 4622 4614 priv->status &= ~STATUS_SCAN_FORCED; ··· 4639 4631 /* Don't schedule if we aborted the scan */ 4640 4632 priv->status &= ~STATUS_ROAMING; 4641 4633 } else if (priv->status & STATUS_SCAN_PENDING) 4642 - queue_work(priv->workqueue, 4643 - &priv->request_scan); 4634 + queue_delayed_work(priv->workqueue, 4635 + &priv->request_scan, 0); 4644 4636 else if (priv->config & CFG_BACKGROUND_SCAN 4645 4637 && priv->status & STATUS_ASSOCIATED) 4646 4638 queue_delayed_work(priv->workqueue, ··· 5063 5055 ipw_rx_queue_restock(priv); 5064 5056 } 5065 5057 5066 - static void ipw_bg_rx_queue_replenish(void *data) 5058 + static void ipw_bg_rx_queue_replenish(struct work_struct *work) 5067 5059 { 5068 - struct ipw_priv *priv = data; 5060 + struct ipw_priv *priv = 5061 + container_of(work, struct ipw_priv, rx_replenish); 5069 5062 mutex_lock(&priv->mutex); 5070 - ipw_rx_queue_replenish(data); 5063 + ipw_rx_queue_replenish(priv); 5071 5064 mutex_unlock(&priv->mutex); 5072 5065 } 5073 5066 ··· 5498 5489 return 1; 5499 5490 } 5500 5491 5501 - static void ipw_merge_adhoc_network(void *data) 5492 + static void ipw_merge_adhoc_network(struct work_struct *work) 5502 5493 { 5503 - struct ipw_priv *priv = data; 5494 + struct ipw_priv *priv = 5495 + container_of(work, struct ipw_priv, merge_networks); 5504 5496 struct ieee80211_network *network = NULL; 5505 5497 struct ipw_network_match match = { 5506 5498 .network = priv->assoc_network ··· 5958 5948 priv->assoc_request.beacon_interval); 5959 5949 } 5960 5950 5961 - static void ipw_bg_adhoc_check(void *data) 5951 + static void ipw_bg_adhoc_check(struct work_struct *work) 5962 5952 { 5963 - struct ipw_priv *priv = data; 5953 + struct ipw_priv *priv = 5954 + container_of(work, struct ipw_priv, adhoc_check.work); 5964 5955 mutex_lock(&priv->mutex); 5965 - ipw_adhoc_check(data); 5956 + ipw_adhoc_check(priv); 5966 5957 mutex_unlock(&priv->mutex); 5967 5958 } 5968 5959 ··· 6310 6299 return err; 6311 6300 } 6312 6301 6313 - static int ipw_request_passive_scan(struct ipw_priv *priv) { 6314 - return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); 6315 - } 6316 - 6317 - static int ipw_request_scan(struct ipw_priv *priv) { 6318 - return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); 6319 - } 6320 - 6321 - static void ipw_bg_abort_scan(void *data) 6302 + static void ipw_request_passive_scan(struct work_struct *work) 6322 6303 { 6323 - struct ipw_priv *priv = data; 6304 + struct ipw_priv *priv = 6305 + container_of(work, struct ipw_priv, request_passive_scan); 6306 + ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); 6307 + } 6308 + 6309 + static void ipw_request_scan(struct work_struct *work) 6310 + { 6311 + struct ipw_priv *priv = 6312 + container_of(work, struct ipw_priv, request_scan.work); 6313 + ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); 6314 + } 6315 + 6316 + static void ipw_bg_abort_scan(struct work_struct *work) 6317 + { 6318 + struct ipw_priv *priv = 6319 + container_of(work, struct ipw_priv, abort_scan); 6324 6320 mutex_lock(&priv->mutex); 6325 - ipw_abort_scan(data); 6321 + ipw_abort_scan(priv); 6326 6322 mutex_unlock(&priv->mutex); 6327 6323 } 6328 6324 ··· 7102 7084 /* 7103 7085 * background support to run QoS activate functionality 7104 7086 */ 7105 - static void ipw_bg_qos_activate(void *data) 7087 + static void ipw_bg_qos_activate(struct work_struct *work) 7106 7088 { 7107 - struct ipw_priv *priv = data; 7089 + struct ipw_priv *priv = 7090 + container_of(work, struct ipw_priv, qos_activate); 7108 7091 7109 7092 if (priv == NULL) 7110 7093 return; ··· 7413 7394 priv->status &= ~STATUS_ROAMING; 7414 7395 } 7415 7396 7416 - static void ipw_bg_roam(void *data) 7397 + static void ipw_bg_roam(struct work_struct *work) 7417 7398 { 7418 - struct ipw_priv *priv = data; 7399 + struct ipw_priv *priv = 7400 + container_of(work, struct ipw_priv, roam); 7419 7401 mutex_lock(&priv->mutex); 7420 - ipw_roam(data); 7402 + ipw_roam(priv); 7421 7403 mutex_unlock(&priv->mutex); 7422 7404 } 7423 7405 ··· 7499 7479 &priv->request_scan, 7500 7480 SCAN_INTERVAL); 7501 7481 else 7502 - queue_work(priv->workqueue, 7503 - &priv->request_scan); 7482 + queue_delayed_work(priv->workqueue, 7483 + &priv->request_scan, 0); 7504 7484 } 7505 7485 7506 7486 return 0; ··· 7511 7491 return 1; 7512 7492 } 7513 7493 7514 - static void ipw_bg_associate(void *data) 7494 + static void ipw_bg_associate(struct work_struct *work) 7515 7495 { 7516 - struct ipw_priv *priv = data; 7496 + struct ipw_priv *priv = 7497 + container_of(work, struct ipw_priv, associate); 7517 7498 mutex_lock(&priv->mutex); 7518 - ipw_associate(data); 7499 + ipw_associate(priv); 7519 7500 mutex_unlock(&priv->mutex); 7520 7501 } 7521 7502 ··· 9431 9410 9432 9411 IPW_DEBUG_WX("Start scan\n"); 9433 9412 9434 - queue_work(priv->workqueue, &priv->request_scan); 9413 + queue_delayed_work(priv->workqueue, &priv->request_scan, 0); 9435 9414 9436 9415 return 0; 9437 9416 } ··· 10568 10547 spin_unlock_irqrestore(&priv->lock, flags); 10569 10548 } 10570 10549 10571 - static void ipw_bg_rf_kill(void *data) 10550 + static void ipw_bg_rf_kill(struct work_struct *work) 10572 10551 { 10573 - struct ipw_priv *priv = data; 10552 + struct ipw_priv *priv = 10553 + container_of(work, struct ipw_priv, rf_kill.work); 10574 10554 mutex_lock(&priv->mutex); 10575 - ipw_rf_kill(data); 10555 + ipw_rf_kill(priv); 10576 10556 mutex_unlock(&priv->mutex); 10577 10557 } 10578 10558 ··· 10604 10582 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); 10605 10583 } 10606 10584 10607 - static void ipw_bg_link_up(void *data) 10585 + static void ipw_bg_link_up(struct work_struct *work) 10608 10586 { 10609 - struct ipw_priv *priv = data; 10587 + struct ipw_priv *priv = 10588 + container_of(work, struct ipw_priv, link_up); 10610 10589 mutex_lock(&priv->mutex); 10611 - ipw_link_up(data); 10590 + ipw_link_up(priv); 10612 10591 mutex_unlock(&priv->mutex); 10613 10592 } 10614 10593 ··· 10629 10606 10630 10607 if (!(priv->status & STATUS_EXIT_PENDING)) { 10631 10608 /* Queue up another scan... */ 10632 - queue_work(priv->workqueue, &priv->request_scan); 10609 + queue_delayed_work(priv->workqueue, &priv->request_scan, 0); 10633 10610 } 10634 10611 } 10635 10612 10636 - static void ipw_bg_link_down(void *data) 10613 + static void ipw_bg_link_down(struct work_struct *work) 10637 10614 { 10638 - struct ipw_priv *priv = data; 10615 + struct ipw_priv *priv = 10616 + container_of(work, struct ipw_priv, link_down); 10639 10617 mutex_lock(&priv->mutex); 10640 - ipw_link_down(data); 10618 + ipw_link_down(priv); 10641 10619 mutex_unlock(&priv->mutex); 10642 10620 } 10643 10621 ··· 10650 10626 init_waitqueue_head(&priv->wait_command_queue); 10651 10627 init_waitqueue_head(&priv->wait_state); 10652 10628 10653 - INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv); 10654 - INIT_WORK(&priv->associate, ipw_bg_associate, priv); 10655 - INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv); 10656 - INIT_WORK(&priv->system_config, ipw_system_config, priv); 10657 - INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv); 10658 - INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv); 10659 - INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv); 10660 - INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv); 10661 - INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv); 10662 - INIT_WORK(&priv->request_scan, 10663 - (void (*)(void *))ipw_request_scan, priv); 10664 - INIT_WORK(&priv->request_passive_scan, 10665 - (void (*)(void *))ipw_request_passive_scan, priv); 10666 - INIT_WORK(&priv->gather_stats, 10667 - (void (*)(void *))ipw_bg_gather_stats, priv); 10668 - INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv); 10669 - INIT_WORK(&priv->roam, ipw_bg_roam, priv); 10670 - INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv); 10671 - INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv); 10672 - INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv); 10673 - INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on, 10674 - priv); 10675 - INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off, 10676 - priv); 10677 - INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off, 10678 - priv); 10679 - INIT_WORK(&priv->merge_networks, 10680 - (void (*)(void *))ipw_merge_adhoc_network, priv); 10629 + INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check); 10630 + INIT_WORK(&priv->associate, ipw_bg_associate); 10631 + INIT_WORK(&priv->disassociate, ipw_bg_disassociate); 10632 + INIT_WORK(&priv->system_config, ipw_system_config); 10633 + INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish); 10634 + INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart); 10635 + INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill); 10636 + INIT_WORK(&priv->up, ipw_bg_up); 10637 + INIT_WORK(&priv->down, ipw_bg_down); 10638 + INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); 10639 + INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan); 10640 + INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); 10641 + INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); 10642 + INIT_WORK(&priv->roam, ipw_bg_roam); 10643 + INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check); 10644 + INIT_WORK(&priv->link_up, ipw_bg_link_up); 10645 + INIT_WORK(&priv->link_down, ipw_bg_link_down); 10646 + INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on); 10647 + INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off); 10648 + INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off); 10649 + INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network); 10681 10650 10682 10651 #ifdef CONFIG_IPW2200_QOS 10683 - INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate, 10684 - priv); 10652 + INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate); 10685 10653 #endif /* CONFIG_IPW2200_QOS */ 10686 10654 10687 10655 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) ··· 11206 11190 11207 11191 /* If configure to try and auto-associate, kick 11208 11192 * off a scan. */ 11209 - queue_work(priv->workqueue, &priv->request_scan); 11193 + queue_delayed_work(priv->workqueue, 11194 + &priv->request_scan, 0); 11210 11195 11211 11196 return 0; 11212 11197 } ··· 11228 11211 return -EIO; 11229 11212 } 11230 11213 11231 - static void ipw_bg_up(void *data) 11214 + static void ipw_bg_up(struct work_struct *work) 11232 11215 { 11233 - struct ipw_priv *priv = data; 11216 + struct ipw_priv *priv = 11217 + container_of(work, struct ipw_priv, up); 11234 11218 mutex_lock(&priv->mutex); 11235 - ipw_up(data); 11219 + ipw_up(priv); 11236 11220 mutex_unlock(&priv->mutex); 11237 11221 } 11238 11222 ··· 11300 11282 ipw_led_radio_off(priv); 11301 11283 } 11302 11284 11303 - static void ipw_bg_down(void *data) 11285 + static void ipw_bg_down(struct work_struct *work) 11304 11286 { 11305 - struct ipw_priv *priv = data; 11287 + struct ipw_priv *priv = 11288 + container_of(work, struct ipw_priv, down); 11306 11289 mutex_lock(&priv->mutex); 11307 - ipw_down(data); 11290 + ipw_down(priv); 11308 11291 mutex_unlock(&priv->mutex); 11309 11292 } 11310 11293
+8 -8
drivers/net/wireless/ipw2200.h
··· 1290 1290 1291 1291 struct workqueue_struct *workqueue; 1292 1292 1293 - struct work_struct adhoc_check; 1293 + struct delayed_work adhoc_check; 1294 1294 struct work_struct associate; 1295 1295 struct work_struct disassociate; 1296 1296 struct work_struct system_config; 1297 1297 struct work_struct rx_replenish; 1298 - struct work_struct request_scan; 1298 + struct delayed_work request_scan; 1299 1299 struct work_struct request_passive_scan; 1300 1300 struct work_struct adapter_restart; 1301 - struct work_struct rf_kill; 1301 + struct delayed_work rf_kill; 1302 1302 struct work_struct up; 1303 1303 struct work_struct down; 1304 - struct work_struct gather_stats; 1304 + struct delayed_work gather_stats; 1305 1305 struct work_struct abort_scan; 1306 1306 struct work_struct roam; 1307 - struct work_struct scan_check; 1307 + struct delayed_work scan_check; 1308 1308 struct work_struct link_up; 1309 1309 struct work_struct link_down; 1310 1310 ··· 1319 1319 u32 led_ofdm_on; 1320 1320 u32 led_ofdm_off; 1321 1321 1322 - struct work_struct led_link_on; 1323 - struct work_struct led_link_off; 1324 - struct work_struct led_act_off; 1322 + struct delayed_work led_link_on; 1323 + struct delayed_work led_link_off; 1324 + struct delayed_work led_act_off; 1325 1325 struct work_struct merge_networks; 1326 1326 1327 1327 struct ipw_cmd_log *cmdlog;
+17 -11
drivers/net/wireless/orinoco.c
··· 980 980 } 981 981 982 982 /* Search scan results for requested BSSID, join it if found */ 983 - static void orinoco_join_ap(struct net_device *dev) 983 + static void orinoco_join_ap(struct work_struct *work) 984 984 { 985 - struct orinoco_private *priv = netdev_priv(dev); 985 + struct orinoco_private *priv = 986 + container_of(work, struct orinoco_private, join_work); 987 + struct net_device *dev = priv->ndev; 986 988 struct hermes *hw = &priv->hw; 987 989 int err; 988 990 unsigned long flags; ··· 1057 1055 } 1058 1056 1059 1057 /* Send new BSSID to userspace */ 1060 - static void orinoco_send_wevents(struct net_device *dev) 1058 + static void orinoco_send_wevents(struct work_struct *work) 1061 1059 { 1062 - struct orinoco_private *priv = netdev_priv(dev); 1060 + struct orinoco_private *priv = 1061 + container_of(work, struct orinoco_private, wevent_work); 1062 + struct net_device *dev = priv->ndev; 1063 1063 struct hermes *hw = &priv->hw; 1064 1064 union iwreq_data wrqu; 1065 1065 int err; ··· 1868 1864 1869 1865 /* This must be called from user context, without locks held - use 1870 1866 * schedule_work() */ 1871 - static void orinoco_reset(struct net_device *dev) 1867 + static void orinoco_reset(struct work_struct *work) 1872 1868 { 1873 - struct orinoco_private *priv = netdev_priv(dev); 1869 + struct orinoco_private *priv = 1870 + container_of(work, struct orinoco_private, reset_work); 1871 + struct net_device *dev = priv->ndev; 1874 1872 struct hermes *hw = &priv->hw; 1875 1873 int err; 1876 1874 unsigned long flags; ··· 2440 2434 priv->hw_unavailable = 1; /* orinoco_init() must clear this 2441 2435 * before anything else touches the 2442 2436 * hardware */ 2443 - INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev); 2444 - INIT_WORK(&priv->join_work, (void (*)(void *))orinoco_join_ap, dev); 2445 - INIT_WORK(&priv->wevent_work, (void (*)(void *))orinoco_send_wevents, dev); 2437 + INIT_WORK(&priv->reset_work, orinoco_reset); 2438 + INIT_WORK(&priv->join_work, orinoco_join_ap); 2439 + INIT_WORK(&priv->wevent_work, orinoco_send_wevents); 2446 2440 2447 2441 netif_carrier_off(dev); 2448 2442 priv->last_linkstatus = 0xffff; ··· 3614 3608 printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name); 3615 3609 3616 3610 /* Firmware reset */ 3617 - orinoco_reset(dev); 3611 + orinoco_reset(&priv->reset_work); 3618 3612 } else { 3619 3613 printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name); 3620 3614 ··· 4160 4154 return 0; 4161 4155 4162 4156 if (priv->broken_disableport) { 4163 - orinoco_reset(dev); 4157 + orinoco_reset(&priv->reset_work); 4164 4158 return 0; 4165 4159 } 4166 4160
+5 -3
drivers/net/wireless/prism54/isl_ioctl.c
··· 158 158 * schedule_work(), thus we can as well use sleeping semaphore 159 159 * locking */ 160 160 void 161 - prism54_update_stats(islpci_private *priv) 161 + prism54_update_stats(struct work_struct *work) 162 162 { 163 + islpci_private *priv = container_of(work, islpci_private, stats_work); 163 164 char *data; 164 165 int j; 165 166 struct obj_bss bss, *bss2; ··· 2495 2494 * interrupt context, no locks held. 2496 2495 */ 2497 2496 void 2498 - prism54_process_trap(void *data) 2497 + prism54_process_trap(struct work_struct *work) 2499 2498 { 2500 - struct islpci_mgmtframe *frame = data; 2499 + struct islpci_mgmtframe *frame = 2500 + container_of(work, struct islpci_mgmtframe, ws); 2501 2501 struct net_device *ndev = frame->ndev; 2502 2502 enum oid_num_t n = mgt_oidtonum(frame->header->oid); 2503 2503
+2 -2
drivers/net/wireless/prism54/isl_ioctl.h
··· 32 32 void prism54_mib_init(islpci_private *); 33 33 34 34 struct iw_statistics *prism54_get_wireless_stats(struct net_device *); 35 - void prism54_update_stats(islpci_private *); 35 + void prism54_update_stats(struct work_struct *); 36 36 37 37 void prism54_acl_init(struct islpci_acl *); 38 38 void prism54_acl_clean(struct islpci_acl *); 39 39 40 - void prism54_process_trap(void *); 40 + void prism54_process_trap(struct work_struct *); 41 41 42 42 void prism54_wpa_bss_ie_init(islpci_private *priv); 43 43 void prism54_wpa_bss_ie_clean(islpci_private *priv);
+2 -3
drivers/net/wireless/prism54/islpci_dev.c
··· 861 861 priv->state_off = 1; 862 862 863 863 /* initialize workqueue's */ 864 - INIT_WORK(&priv->stats_work, 865 - (void (*)(void *)) prism54_update_stats, priv); 864 + INIT_WORK(&priv->stats_work, prism54_update_stats); 866 865 priv->stats_timestamp = 0; 867 866 868 - INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv); 867 + INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake); 869 868 priv->reset_task_pending = 0; 870 869 871 870 /* allocate various memory areas */
+2 -2
drivers/net/wireless/prism54/islpci_eth.c
··· 482 482 } 483 483 484 484 void 485 - islpci_do_reset_and_wake(void *data) 485 + islpci_do_reset_and_wake(struct work_struct *work) 486 486 { 487 - islpci_private *priv = (islpci_private *) data; 487 + islpci_private *priv = container_of(work, islpci_private, reset_task); 488 488 islpci_reset(priv, 1); 489 489 netif_wake_queue(priv->ndev); 490 490 priv->reset_task_pending = 0;
+1 -1
drivers/net/wireless/prism54/islpci_eth.h
··· 68 68 int islpci_eth_transmit(struct sk_buff *, struct net_device *); 69 69 int islpci_eth_receive(islpci_private *); 70 70 void islpci_eth_tx_timeout(struct net_device *); 71 - void islpci_do_reset_and_wake(void *data); 71 + void islpci_do_reset_and_wake(struct work_struct *); 72 72 73 73 #endif /* _ISL_GEN_H */
+1 -1
drivers/net/wireless/prism54/islpci_mgt.c
··· 387 387 388 388 /* Create work to handle trap out of interrupt 389 389 * context. */ 390 - INIT_WORK(&frame->ws, prism54_process_trap, frame); 390 + INIT_WORK(&frame->ws, prism54_process_trap); 391 391 schedule_work(&frame->ws); 392 392 393 393 } else {
+4 -3
drivers/net/wireless/zd1211rw/zd_mac.c
··· 1090 1090 1091 1091 #define LINK_LED_WORK_DELAY HZ 1092 1092 1093 - static void link_led_handler(void *p) 1093 + static void link_led_handler(struct work_struct *work) 1094 1094 { 1095 - struct zd_mac *mac = p; 1095 + struct zd_mac *mac = 1096 + container_of(work, struct zd_mac, housekeeping.link_led_work.work); 1096 1097 struct zd_chip *chip = &mac->chip; 1097 1098 struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev); 1098 1099 int is_associated; ··· 1114 1113 1115 1114 static void housekeeping_init(struct zd_mac *mac) 1116 1115 { 1117 - INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac); 1116 + INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler); 1118 1117 } 1119 1118 1120 1119 static void housekeeping_enable(struct zd_mac *mac)
+1 -1
drivers/net/wireless/zd1211rw/zd_mac.h
··· 121 121 }; 122 122 123 123 struct housekeeping { 124 - struct work_struct link_led_work; 124 + struct delayed_work link_led_work; 125 125 }; 126 126 127 127 #define ZD_MAC_STATS_BUFFER_SIZE 16
+5 -4
drivers/oprofile/cpu_buffer.c
··· 29 29 30 30 struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; 31 31 32 - static void wq_sync_buffer(void *); 32 + static void wq_sync_buffer(struct work_struct *work); 33 33 34 34 #define DEFAULT_TIMER_EXPIRE (HZ / 10) 35 35 static int work_enabled; ··· 65 65 b->sample_received = 0; 66 66 b->sample_lost_overflow = 0; 67 67 b->cpu = i; 68 - INIT_WORK(&b->work, wq_sync_buffer, b); 68 + INIT_DELAYED_WORK(&b->work, wq_sync_buffer); 69 69 } 70 70 return 0; 71 71 ··· 282 282 * By using schedule_delayed_work_on and then schedule_delayed_work 283 283 * we guarantee this will stay on the correct cpu 284 284 */ 285 - static void wq_sync_buffer(void * data) 285 + static void wq_sync_buffer(struct work_struct *work) 286 286 { 287 - struct oprofile_cpu_buffer * b = data; 287 + struct oprofile_cpu_buffer * b = 288 + container_of(work, struct oprofile_cpu_buffer, work.work); 288 289 if (b->cpu != smp_processor_id()) { 289 290 printk("WQ on CPU%d, prefer CPU%d\n", 290 291 smp_processor_id(), b->cpu);
+1 -1
drivers/oprofile/cpu_buffer.h
··· 43 43 unsigned long sample_lost_overflow; 44 44 unsigned long backtrace_aborted; 45 45 int cpu; 46 - struct work_struct work; 46 + struct delayed_work work; 47 47 } ____cacheline_aligned; 48 48 49 49 extern struct oprofile_cpu_buffer cpu_buffer[];
+2 -2
drivers/pci/hotplug/shpchp.h
··· 70 70 struct hotplug_slot *hotplug_slot; 71 71 struct list_head slot_list; 72 72 char name[SLOT_NAME_SIZE]; 73 - struct work_struct work; /* work for button event */ 73 + struct delayed_work work; /* work for button event */ 74 74 struct mutex lock; 75 75 }; 76 76 ··· 187 187 extern int shpchp_unconfigure_device(struct slot *p_slot); 188 188 extern void shpchp_remove_ctrl_files(struct controller *ctrl); 189 189 extern void cleanup_slots(struct controller *ctrl); 190 - extern void queue_pushbutton_work(void *data); 190 + extern void queue_pushbutton_work(struct work_struct *work); 191 191 192 192 193 193 #ifdef CONFIG_ACPI
+1 -1
drivers/pci/hotplug/shpchp_core.c
··· 159 159 goto error_info; 160 160 161 161 slot->number = sun; 162 - INIT_WORK(&slot->work, queue_pushbutton_work, slot); 162 + INIT_DELAYED_WORK(&slot->work, queue_pushbutton_work); 163 163 164 164 /* register this slot with the hotplug pci core */ 165 165 hotplug_slot->private = slot;
+10 -9
drivers/pci/hotplug/shpchp_ctrl.c
··· 36 36 #include "../pci.h" 37 37 #include "shpchp.h" 38 38 39 - static void interrupt_event_handler(void *data); 39 + static void interrupt_event_handler(struct work_struct *work); 40 40 static int shpchp_enable_slot(struct slot *p_slot); 41 41 static int shpchp_disable_slot(struct slot *p_slot); 42 42 ··· 50 50 51 51 info->event_type = event_type; 52 52 info->p_slot = p_slot; 53 - INIT_WORK(&info->work, interrupt_event_handler, info); 53 + INIT_WORK(&info->work, interrupt_event_handler); 54 54 55 55 schedule_work(&info->work); 56 56 ··· 408 408 * Handles all pending events and exits. 409 409 * 410 410 */ 411 - static void shpchp_pushbutton_thread(void *data) 411 + static void shpchp_pushbutton_thread(struct work_struct *work) 412 412 { 413 - struct pushbutton_work_info *info = data; 413 + struct pushbutton_work_info *info = 414 + container_of(work, struct pushbutton_work_info, work); 414 415 struct slot *p_slot = info->p_slot; 415 416 416 417 mutex_lock(&p_slot->lock); ··· 437 436 kfree(info); 438 437 } 439 438 440 - void queue_pushbutton_work(void *data) 439 + void queue_pushbutton_work(struct work_struct *work) 441 440 { 442 - struct slot *p_slot = data; 441 + struct slot *p_slot = container_of(work, struct slot, work.work); 443 442 struct pushbutton_work_info *info; 444 443 445 444 info = kmalloc(sizeof(*info), GFP_KERNEL); ··· 448 447 return; 449 448 } 450 449 info->p_slot = p_slot; 451 - INIT_WORK(&info->work, shpchp_pushbutton_thread, info); 450 + INIT_WORK(&info->work, shpchp_pushbutton_thread); 452 451 453 452 mutex_lock(&p_slot->lock); 454 453 switch (p_slot->state) { ··· 542 541 } 543 542 } 544 543 545 - static void interrupt_event_handler(void *data) 544 + static void interrupt_event_handler(struct work_struct *work) 546 545 { 547 - struct event_info *info = data; 546 + struct event_info *info = container_of(work, struct event_info, work); 548 547 struct slot *p_slot = info->p_slot; 549 548 550 549 mutex_lock(&p_slot->lock);
+4 -3
drivers/pcmcia/ds.c
··· 698 698 } 699 699 700 700 701 - static void pcmcia_delayed_add_pseudo_device(void *data) 701 + static void pcmcia_delayed_add_pseudo_device(struct work_struct *work) 702 702 { 703 - struct pcmcia_socket *s = data; 703 + struct pcmcia_socket *s = 704 + container_of(work, struct pcmcia_socket, device_add); 704 705 pcmcia_device_add(s, 0); 705 706 s->pcmcia_state.device_add_pending = 0; 706 707 } ··· 1247 1246 init_waitqueue_head(&socket->queue); 1248 1247 #endif 1249 1248 INIT_LIST_HEAD(&socket->devices_list); 1250 - INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device, socket); 1249 + INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device); 1251 1250 memset(&socket->pcmcia_state, 0, sizeof(u8)); 1252 1251 socket->device_count = 0; 1253 1252
+4 -3
drivers/rtc/rtc-dev.c
··· 53 53 * Routine to poll RTC seconds field for change as often as possible, 54 54 * after first RTC_UIE use timer to reduce polling 55 55 */ 56 - static void rtc_uie_task(void *data) 56 + static void rtc_uie_task(struct work_struct *work) 57 57 { 58 - struct rtc_device *rtc = data; 58 + struct rtc_device *rtc = 59 + container_of(work, struct rtc_device, uie_task); 59 60 struct rtc_time tm; 60 61 int num = 0; 61 62 int err; ··· 399 398 spin_lock_init(&rtc->irq_lock); 400 399 init_waitqueue_head(&rtc->irq_queue); 401 400 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 402 - INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc); 401 + INIT_WORK(&rtc->uie_task, rtc_uie_task); 403 402 setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); 404 403 #endif 405 404
+6 -5
drivers/scsi/NCR5380.c
··· 849 849 hostdata->issue_queue = NULL; 850 850 hostdata->disconnected_queue = NULL; 851 851 852 - INIT_WORK(&hostdata->coroutine, NCR5380_main, hostdata); 852 + INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main); 853 853 854 854 #ifdef NCR5380_STATS 855 855 for (i = 0; i < 8; ++i) { ··· 1016 1016 1017 1017 /* Run the coroutine if it isn't already running. */ 1018 1018 /* Kick off command processing */ 1019 - schedule_work(&hostdata->coroutine); 1019 + schedule_delayed_work(&hostdata->coroutine, 0); 1020 1020 return 0; 1021 1021 } 1022 1022 ··· 1033 1033 * host lock and called routines may take the isa dma lock. 1034 1034 */ 1035 1035 1036 - static void NCR5380_main(void *p) 1036 + static void NCR5380_main(struct work_struct *work) 1037 1037 { 1038 - struct NCR5380_hostdata *hostdata = p; 1038 + struct NCR5380_hostdata *hostdata = 1039 + container_of(work, struct NCR5380_hostdata, coroutine.work); 1039 1040 struct Scsi_Host *instance = hostdata->host; 1040 1041 Scsi_Cmnd *tmp, *prev; 1041 1042 int done; ··· 1222 1221 } /* if BASR_IRQ */ 1223 1222 spin_unlock_irqrestore(instance->host_lock, flags); 1224 1223 if(!done) 1225 - schedule_work(&hostdata->coroutine); 1224 + schedule_delayed_work(&hostdata->coroutine, 0); 1226 1225 } while (!done); 1227 1226 return IRQ_HANDLED; 1228 1227 }
+2 -2
drivers/scsi/NCR5380.h
··· 271 271 unsigned long time_expires; /* in jiffies, set prior to sleeping */ 272 272 int select_time; /* timer in select for target response */ 273 273 volatile Scsi_Cmnd *selecting; 274 - struct work_struct coroutine; /* our co-routine */ 274 + struct delayed_work coroutine; /* our co-routine */ 275 275 #ifdef NCR5380_STATS 276 276 unsigned timebase; /* Base for time calcs */ 277 277 long time_read[8]; /* time to do reads */ ··· 298 298 #ifndef DONT_USE_INTR 299 299 static irqreturn_t NCR5380_intr(int irq, void *dev_id); 300 300 #endif 301 - static void NCR5380_main(void *ptr); 301 + static void NCR5380_main(struct work_struct *work); 302 302 static void NCR5380_print_options(struct Scsi_Host *instance); 303 303 #ifdef NDEBUG 304 304 static void NCR5380_print_phase(struct Scsi_Host *instance);
+2 -2
drivers/scsi/aha152x.c
··· 1443 1443 * Run service completions on the card with interrupts enabled. 1444 1444 * 1445 1445 */ 1446 - static void run(void) 1446 + static void run(struct work_struct *work) 1447 1447 { 1448 1448 struct aha152x_hostdata *hd; 1449 1449 ··· 1499 1499 HOSTDATA(shpnt)->service=1; 1500 1500 1501 1501 /* Poke the BH handler */ 1502 - INIT_WORK(&aha152x_tq, (void *) run, NULL); 1502 + INIT_WORK(&aha152x_tq, run); 1503 1503 schedule_work(&aha152x_tq); 1504 1504 } 1505 1505 DO_UNLOCK(flags);
+5 -7
drivers/scsi/imm.c
··· 36 36 int base_hi; /* Hi Base address for ECP-ISA chipset */ 37 37 int mode; /* Transfer mode */ 38 38 struct scsi_cmnd *cur_cmd; /* Current queued command */ 39 - struct work_struct imm_tq; /* Polling interrupt stuff */ 39 + struct delayed_work imm_tq; /* Polling interrupt stuff */ 40 40 unsigned long jstart; /* Jiffies at start */ 41 41 unsigned failed:1; /* Failure flag */ 42 42 unsigned dp:1; /* Data phase present */ ··· 733 733 * the scheduler's task queue to generate a stream of call-backs and 734 734 * complete the request when the drive is ready. 735 735 */ 736 - static void imm_interrupt(void *data) 736 + static void imm_interrupt(struct work_struct *work) 737 737 { 738 - imm_struct *dev = (imm_struct *) data; 738 + imm_struct *dev = container_of(work, imm_struct, imm_tq.work); 739 739 struct scsi_cmnd *cmd = dev->cur_cmd; 740 740 struct Scsi_Host *host = cmd->device->host; 741 741 unsigned long flags; ··· 745 745 return; 746 746 } 747 747 if (imm_engine(dev, cmd)) { 748 - INIT_WORK(&dev->imm_tq, imm_interrupt, (void *) dev); 749 748 schedule_delayed_work(&dev->imm_tq, 1); 750 749 return; 751 750 } ··· 952 953 cmd->result = DID_ERROR << 16; /* default return code */ 953 954 cmd->SCp.phase = 0; /* bus free */ 954 955 955 - INIT_WORK(&dev->imm_tq, imm_interrupt, dev); 956 - schedule_work(&dev->imm_tq); 956 + schedule_delayed_work(&dev->imm_tq, 0); 957 957 958 958 imm_pb_claim(dev); 959 959 ··· 1223 1225 else 1224 1226 ports = 8; 1225 1227 1226 - INIT_WORK(&dev->imm_tq, imm_interrupt, dev); 1228 + INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt); 1227 1229 1228 1230 err = -ENOMEM; 1229 1231 host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
+5 -4
drivers/scsi/ipr.c
··· 2093 2093 2094 2094 /** 2095 2095 * ipr_worker_thread - Worker thread 2096 - * @data: ioa config struct 2096 + * @work: ioa config struct 2097 2097 * 2098 2098 * Called at task level from a work thread. This function takes care 2099 2099 * of adding and removing device from the mid-layer as configuration ··· 2102 2102 * Return value: 2103 2103 * nothing 2104 2104 **/ 2105 - static void ipr_worker_thread(void *data) 2105 + static void ipr_worker_thread(struct work_struct *work) 2106 2106 { 2107 2107 unsigned long lock_flags; 2108 2108 struct ipr_resource_entry *res; 2109 2109 struct scsi_device *sdev; 2110 2110 struct ipr_dump *dump; 2111 - struct ipr_ioa_cfg *ioa_cfg = data; 2111 + struct ipr_ioa_cfg *ioa_cfg = 2112 + container_of(work, struct ipr_ioa_cfg, work_q); 2112 2113 u8 bus, target, lun; 2113 2114 int did_work; 2114 2115 ··· 6927 6926 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); 6928 6927 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 6929 6928 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 6930 - INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); 6929 + INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 6931 6930 init_waitqueue_head(&ioa_cfg->reset_wait_q); 6932 6931 ioa_cfg->sdt_state = INACTIVE; 6933 6932 if (ipr_enable_cache)
+4 -3
drivers/scsi/libiscsi.c
··· 719 719 return rc; 720 720 } 721 721 722 - static void iscsi_xmitworker(void *data) 722 + static void iscsi_xmitworker(struct work_struct *work) 723 723 { 724 - struct iscsi_conn *conn = data; 724 + struct iscsi_conn *conn = 725 + container_of(work, struct iscsi_conn, xmitwork); 725 726 int rc; 726 727 /* 727 728 * serialize Xmit worker on a per-connection basis. ··· 1513 1512 if (conn->mgmtqueue == ERR_PTR(-ENOMEM)) 1514 1513 goto mgmtqueue_alloc_fail; 1515 1514 1516 - INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn); 1515 + INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 1517 1516 1518 1517 /* allocate login_mtask used for the login/text sequences */ 1519 1518 spin_lock_bh(&session->lock);
+14 -8
drivers/scsi/libsas/sas_discover.c
··· 647 647 * Discover process only interrogates devices in order to discover the 648 648 * domain. 649 649 */ 650 - static void sas_discover_domain(void *data) 650 + static void sas_discover_domain(struct work_struct *work) 651 651 { 652 652 int error = 0; 653 - struct asd_sas_port *port = data; 653 + struct sas_discovery_event *ev = 654 + container_of(work, struct sas_discovery_event, work); 655 + struct asd_sas_port *port = ev->port; 654 656 655 657 sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, 656 658 &port->disc.pending); ··· 694 692 current->pid, error); 695 693 } 696 694 697 - static void sas_revalidate_domain(void *data) 695 + static void sas_revalidate_domain(struct work_struct *work) 698 696 { 699 697 int res = 0; 700 - struct asd_sas_port *port = data; 698 + struct sas_discovery_event *ev = 699 + container_of(work, struct sas_discovery_event, work); 700 + struct asd_sas_port *port = ev->port; 701 701 702 702 sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, 703 703 &port->disc.pending); ··· 726 722 BUG_ON(ev >= DISC_NUM_EVENTS); 727 723 728 724 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, 729 - &disc->disc_work[ev], port->ha->core.shost); 725 + &disc->disc_work[ev].work, port->ha->core.shost); 730 726 731 727 return 0; 732 728 } ··· 741 737 { 742 738 int i; 743 739 744 - static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = { 740 + static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = { 745 741 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, 746 742 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, 747 743 }; 748 744 749 745 spin_lock_init(&disc->disc_event_lock); 750 746 disc->pending = 0; 751 - for (i = 0; i < DISC_NUM_EVENTS; i++) 752 - INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port); 747 + for (i = 0; i < DISC_NUM_EVENTS; i++) { 748 + INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]); 749 + disc->disc_work[i].port = port; 750 + } 753 751 }
+8 -6
drivers/scsi/libsas/sas_event.c
··· 31 31 BUG_ON(event >= HA_NUM_EVENTS); 32 32 33 33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, 34 - &sas_ha->ha_events[event], sas_ha->core.shost); 34 + &sas_ha->ha_events[event].work, sas_ha->core.shost); 35 35 } 36 36 37 37 static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) ··· 41 41 BUG_ON(event >= PORT_NUM_EVENTS); 42 42 43 43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, 44 - &phy->port_events[event], ha->core.shost); 44 + &phy->port_events[event].work, ha->core.shost); 45 45 } 46 46 47 47 static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) ··· 51 51 BUG_ON(event >= PHY_NUM_EVENTS); 52 52 53 53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, 54 - &phy->phy_events[event], ha->core.shost); 54 + &phy->phy_events[event].work, ha->core.shost); 55 55 } 56 56 57 57 int sas_init_events(struct sas_ha_struct *sas_ha) 58 58 { 59 - static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = { 59 + static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = { 60 60 [HAE_RESET] = sas_hae_reset, 61 61 }; 62 62 ··· 64 64 65 65 spin_lock_init(&sas_ha->event_lock); 66 66 67 - for (i = 0; i < HA_NUM_EVENTS; i++) 68 - INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha); 67 + for (i = 0; i < HA_NUM_EVENTS; i++) { 68 + INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); 69 + sas_ha->ha_events[i].ha = sas_ha; 70 + } 69 71 70 72 sas_ha->notify_ha_event = notify_ha_event; 71 73 sas_ha->notify_port_event = notify_port_event;
+4 -2
drivers/scsi/libsas/sas_init.c
··· 65 65 66 66 /* ---------- HA events ---------- */ 67 67 68 - void sas_hae_reset(void *data) 68 + void sas_hae_reset(struct work_struct *work) 69 69 { 70 - struct sas_ha_struct *ha = data; 70 + struct sas_ha_event *ev = 71 + container_of(work, struct sas_ha_event, work); 72 + struct sas_ha_struct *ha = ev->ha; 71 73 72 74 sas_begin_event(HAE_RESET, &ha->event_lock, 73 75 &ha->pending);
+6 -6
drivers/scsi/libsas/sas_internal.h
··· 60 60 61 61 void sas_deform_port(struct asd_sas_phy *phy); 62 62 63 - void sas_porte_bytes_dmaed(void *); 64 - void sas_porte_broadcast_rcvd(void *); 65 - void sas_porte_link_reset_err(void *); 66 - void sas_porte_timer_event(void *); 67 - void sas_porte_hard_reset(void *); 63 + void sas_porte_bytes_dmaed(struct work_struct *work); 64 + void sas_porte_broadcast_rcvd(struct work_struct *work); 65 + void sas_porte_link_reset_err(struct work_struct *work); 66 + void sas_porte_timer_event(struct work_struct *work); 67 + void sas_porte_hard_reset(struct work_struct *work); 68 68 69 69 int sas_notify_lldd_dev_found(struct domain_device *); 70 70 void sas_notify_lldd_dev_gone(struct domain_device *); ··· 75 75 76 76 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); 77 77 78 - void sas_hae_reset(void *); 78 + void sas_hae_reset(struct work_struct *work); 79 79 80 80 static inline void sas_queue_event(int event, spinlock_t *lock, 81 81 unsigned long *pending,
+29 -16
drivers/scsi/libsas/sas_phy.c
··· 30 30 31 31 /* ---------- Phy events ---------- */ 32 32 33 - static void sas_phye_loss_of_signal(void *data) 33 + static void sas_phye_loss_of_signal(struct work_struct *work) 34 34 { 35 - struct asd_sas_phy *phy = data; 35 + struct asd_sas_event *ev = 36 + container_of(work, struct asd_sas_event, work); 37 + struct asd_sas_phy *phy = ev->phy; 36 38 37 39 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, 38 40 &phy->phy_events_pending); ··· 42 40 sas_deform_port(phy); 43 41 } 44 42 45 - static void sas_phye_oob_done(void *data) 43 + static void sas_phye_oob_done(struct work_struct *work) 46 44 { 47 - struct asd_sas_phy *phy = data; 45 + struct asd_sas_event *ev = 46 + container_of(work, struct asd_sas_event, work); 47 + struct asd_sas_phy *phy = ev->phy; 48 48 49 49 sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock, 50 50 &phy->phy_events_pending); 51 51 phy->error = 0; 52 52 } 53 53 54 - static void sas_phye_oob_error(void *data) 54 + static void sas_phye_oob_error(struct work_struct *work) 55 55 { 56 - struct asd_sas_phy *phy = data; 56 + struct asd_sas_event *ev = 57 + container_of(work, struct asd_sas_event, work); 58 + struct asd_sas_phy *phy = ev->phy; 57 59 struct sas_ha_struct *sas_ha = phy->ha; 58 60 struct asd_sas_port *port = phy->port; 59 61 struct sas_internal *i = ··· 86 80 } 87 81 } 88 82 89 - static void sas_phye_spinup_hold(void *data) 83 + static void sas_phye_spinup_hold(struct work_struct *work) 90 84 { 91 - struct asd_sas_phy *phy = data; 85 + struct asd_sas_event *ev = 86 + container_of(work, struct asd_sas_event, work); 87 + struct asd_sas_phy *phy = ev->phy; 92 88 struct sas_ha_struct *sas_ha = phy->ha; 93 89 struct sas_internal *i = 94 90 to_sas_internal(sas_ha->core.shost->transportt); ··· 108 100 { 109 101 int i; 110 102 111 - static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = { 103 + static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = { 112 104 [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, 113 105 [PHYE_OOB_DONE] = sas_phye_oob_done, 114 106 [PHYE_OOB_ERROR] = sas_phye_oob_error, 115 107 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, 116 108 }; 117 109 118 - static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = { 110 + static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = { 119 111 [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, 120 112 [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, 121 113 [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, ··· 130 122 131 123 phy->error = 0; 132 124 INIT_LIST_HEAD(&phy->port_phy_el); 133 - for (k = 0; k < PORT_NUM_EVENTS; k++) 134 - INIT_WORK(&phy->port_events[k], sas_port_event_fns[k], 135 - phy); 125 + for (k = 0; k < PORT_NUM_EVENTS; k++) { 126 + INIT_WORK(&phy->port_events[k].work, 127 + sas_port_event_fns[k]); 128 + phy->port_events[k].phy = phy; 129 + } 136 130 137 - for (k = 0; k < PHY_NUM_EVENTS; k++) 138 - INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k], 139 - phy); 131 + for (k = 0; k < PHY_NUM_EVENTS; k++) { 132 + INIT_WORK(&phy->phy_events[k].work, 133 + sas_phy_event_fns[k]); 134 + phy->phy_events[k].phy = phy; 135 + } 136 + 140 137 phy->port = NULL; 141 138 phy->ha = sas_ha; 142 139 spin_lock_init(&phy->frame_rcvd_lock);
+20 -10
drivers/scsi/libsas/sas_port.c
··· 181 181 182 182 /* ---------- SAS port events ---------- */ 183 183 184 - void sas_porte_bytes_dmaed(void *data) 184 + void sas_porte_bytes_dmaed(struct work_struct *work) 185 185 { 186 - struct asd_sas_phy *phy = data; 186 + struct asd_sas_event *ev = 187 + container_of(work, struct asd_sas_event, work); 188 + struct asd_sas_phy *phy = ev->phy; 187 189 188 190 sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock, 189 191 &phy->port_events_pending); ··· 193 191 sas_form_port(phy); 194 192 } 195 193 196 - void sas_porte_broadcast_rcvd(void *data) 194 + void sas_porte_broadcast_rcvd(struct work_struct *work) 197 195 { 196 + struct asd_sas_event *ev = 197 + container_of(work, struct asd_sas_event, work); 198 + struct asd_sas_phy *phy = ev->phy; 198 199 unsigned long flags; 199 200 u32 prim; 200 - struct asd_sas_phy *phy = data; 201 201 202 202 sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock, 203 203 &phy->port_events_pending); ··· 212 208 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); 213 209 } 214 210 215 - void sas_porte_link_reset_err(void *data) 211 + void sas_porte_link_reset_err(struct work_struct *work) 216 212 { 217 - struct asd_sas_phy *phy = data; 213 + struct asd_sas_event *ev = 214 + container_of(work, struct asd_sas_event, work); 215 + struct asd_sas_phy *phy = ev->phy; 218 216 219 217 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, 220 218 &phy->port_events_pending); ··· 224 218 sas_deform_port(phy); 225 219 } 226 220 227 - void sas_porte_timer_event(void *data) 221 + void sas_porte_timer_event(struct work_struct *work) 228 222 { 229 - struct asd_sas_phy *phy = data; 223 + struct asd_sas_event *ev = 224 + container_of(work, struct asd_sas_event, work); 225 + struct asd_sas_phy *phy = ev->phy; 230 226 231 227 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, 232 228 &phy->port_events_pending); ··· 236 228 sas_deform_port(phy); 237 229 } 238 230 239 - void sas_porte_hard_reset(void *data) 231 + void sas_porte_hard_reset(struct work_struct *work) 240 232 { 241 - struct asd_sas_phy *phy = data; 233 + struct asd_sas_event *ev = 234 + container_of(work, struct asd_sas_event, work); 235 + struct asd_sas_phy *phy = ev->phy; 242 236 243 237 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, 244 238 &phy->port_events_pending);
+5 -7
drivers/scsi/ppa.c
··· 31 31 int base; /* Actual port address */ 32 32 int mode; /* Transfer mode */ 33 33 struct scsi_cmnd *cur_cmd; /* Current queued command */ 34 - struct work_struct ppa_tq; /* Polling interrupt stuff */ 34 + struct delayed_work ppa_tq; /* Polling interrupt stuff */ 35 35 unsigned long jstart; /* Jiffies at start */ 36 36 unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */ 37 37 unsigned int failed:1; /* Failure flag */ ··· 627 627 * the scheduler's task queue to generate a stream of call-backs and 628 628 * complete the request when the drive is ready. 629 629 */ 630 - static void ppa_interrupt(void *data) 630 + static void ppa_interrupt(struct work_struct *work) 631 631 { 632 - ppa_struct *dev = (ppa_struct *) data; 632 + ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work); 633 633 struct scsi_cmnd *cmd = dev->cur_cmd; 634 634 635 635 if (!cmd) { ··· 637 637 return; 638 638 } 639 639 if (ppa_engine(dev, cmd)) { 640 - dev->ppa_tq.data = (void *) dev; 641 640 schedule_delayed_work(&dev->ppa_tq, 1); 642 641 return; 643 642 } ··· 821 822 cmd->result = DID_ERROR << 16; /* default return code */ 822 823 cmd->SCp.phase = 0; /* bus free */ 823 824 824 - dev->ppa_tq.data = dev; 825 - schedule_work(&dev->ppa_tq); 825 + schedule_delayed_work(&dev->ppa_tq, 0); 826 826 827 827 ppa_pb_claim(dev); 828 828 ··· 1084 1086 else 1085 1087 ports = 8; 1086 1088 1087 - INIT_WORK(&dev->ppa_tq, ppa_interrupt, dev); 1089 + INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt); 1088 1090 1089 1091 err = -ENOMEM; 1090 1092 host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
+4 -3
drivers/scsi/qla4xxx/ql4_os.c
··· 1011 1011 * the mid-level tries to sleep when it reaches the driver threshold 1012 1012 * "host->can_queue". This can cause a panic if we were in our interrupt code. 1013 1013 **/ 1014 - static void qla4xxx_do_dpc(void *data) 1014 + static void qla4xxx_do_dpc(struct work_struct *work) 1015 1015 { 1016 - struct scsi_qla_host *ha = (struct scsi_qla_host *) data; 1016 + struct scsi_qla_host *ha = 1017 + container_of(work, struct scsi_qla_host, dpc_work); 1017 1018 struct ddb_entry *ddb_entry, *dtemp; 1018 1019 1019 1020 DEBUG2(printk("scsi%ld: %s: DPC handler waking up.\n", ··· 1316 1315 ret = -ENODEV; 1317 1316 goto probe_failed; 1318 1317 } 1319 - INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha); 1318 + INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); 1320 1319 1321 1320 ret = request_irq(pdev->irq, qla4xxx_intr_handler, 1322 1321 SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha);
+31 -29
drivers/scsi/scsi_transport_fc.c
··· 241 241 #define FC_MGMTSRVR_PORTID 0x00000a 242 242 243 243 244 - static void fc_timeout_deleted_rport(void *data); 245 - static void fc_timeout_fail_rport_io(void *data); 246 - static void fc_scsi_scan_rport(void *data); 244 + static void fc_timeout_deleted_rport(struct work_struct *work); 245 + static void fc_timeout_fail_rport_io(struct work_struct *work); 246 + static void fc_scsi_scan_rport(struct work_struct *work); 247 247 248 248 /* 249 249 * Attribute counts pre object type... ··· 1613 1613 * 1 on success / 0 already queued / < 0 for error 1614 1614 **/ 1615 1615 static int 1616 - fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, 1616 + fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work, 1617 1617 unsigned long delay) 1618 1618 { 1619 1619 if (unlikely(!fc_host_devloss_work_q(shost))) { ··· 1624 1624 1625 1625 return -EINVAL; 1626 1626 } 1627 - 1628 - if (delay == 0) 1629 - return queue_work(fc_host_devloss_work_q(shost), work); 1630 1627 1631 1628 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); 1632 1629 } ··· 1709 1712 * fc_starget_delete - called to delete the scsi decendents of an rport 1710 1713 * (target and all sdevs) 1711 1714 * 1712 - * @data: remote port to be operated on. 1715 + * @work: remote port to be operated on. 1713 1716 **/ 1714 1717 static void 1715 - fc_starget_delete(void *data) 1718 + fc_starget_delete(struct work_struct *work) 1716 1719 { 1717 - struct fc_rport *rport = (struct fc_rport *)data; 1720 + struct fc_rport *rport = 1721 + container_of(work, struct fc_rport, stgt_delete_work); 1718 1722 struct Scsi_Host *shost = rport_to_shost(rport); 1719 1723 unsigned long flags; 1720 1724 struct fc_internal *i = to_fc_internal(shost->transportt); ··· 1749 1751 /** 1750 1752 * fc_rport_final_delete - finish rport termination and delete it. 1751 1753 * 1752 - * @data: remote port to be deleted. 1754 + * @work: remote port to be deleted. 1753 1755 **/ 1754 1756 static void 1755 - fc_rport_final_delete(void *data) 1757 + fc_rport_final_delete(struct work_struct *work) 1756 1758 { 1757 - struct fc_rport *rport = (struct fc_rport *)data; 1759 + struct fc_rport *rport = 1760 + container_of(work, struct fc_rport, rport_delete_work); 1758 1761 struct device *dev = &rport->dev; 1759 1762 struct Scsi_Host *shost = rport_to_shost(rport); 1760 1763 struct fc_internal *i = to_fc_internal(shost->transportt); ··· 1769 1770 1770 1771 /* Delete SCSI target and sdevs */ 1771 1772 if (rport->scsi_target_id != -1) 1772 - fc_starget_delete(data); 1773 + fc_starget_delete(&rport->stgt_delete_work); 1773 1774 else if (i->f->dev_loss_tmo_callbk) 1774 1775 i->f->dev_loss_tmo_callbk(rport); 1775 1776 else if (i->f->terminate_rport_io) ··· 1828 1829 rport->channel = channel; 1829 1830 rport->fast_io_fail_tmo = -1; 1830 1831 1831 - INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport); 1832 - INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport); 1833 - INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); 1834 - INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport); 1835 - INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport); 1832 + INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport); 1833 + INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io); 1834 + INIT_WORK(&rport->scan_work, fc_scsi_scan_rport); 1835 + INIT_WORK(&rport->stgt_delete_work, fc_starget_delete); 1836 + INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete); 1836 1837 1837 1838 spin_lock_irqsave(shost->host_lock, flags); 1838 1839 ··· 1962 1963 } 1963 1964 1964 1965 if (match) { 1965 - struct work_struct *work = 1966 + struct delayed_work *work = 1966 1967 &rport->dev_loss_work; 1967 1968 1968 1969 memcpy(&rport->node_name, &ids->node_name, ··· 2266 2267 * was a SCSI target (thus was blocked), and failed 2267 2268 * to return in the alloted time. 2268 2269 * 2269 - * @data: rport target that failed to reappear in the alloted time. 2270 + * @work: rport target that failed to reappear in the alloted time. 2270 2271 **/ 2271 2272 static void 2272 - fc_timeout_deleted_rport(void *data) 2273 + fc_timeout_deleted_rport(struct work_struct *work) 2273 2274 { 2274 - struct fc_rport *rport = (struct fc_rport *)data; 2275 + struct fc_rport *rport = 2276 + container_of(work, struct fc_rport, dev_loss_work.work); 2275 2277 struct Scsi_Host *shost = rport_to_shost(rport); 2276 2278 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 2277 2279 unsigned long flags; ··· 2366 2366 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a 2367 2367 * disconnected SCSI target. 2368 2368 * 2369 - * @data: rport to terminate io on. 2369 + * @work: rport to terminate io on. 2370 2370 * 2371 2371 * Notes: Only requests the failure of the io, not that all are flushed 2372 2372 * prior to returning. 2373 2373 **/ 2374 2374 static void 2375 - fc_timeout_fail_rport_io(void *data) 2375 + fc_timeout_fail_rport_io(struct work_struct *work) 2376 2376 { 2377 - struct fc_rport *rport = (struct fc_rport *)data; 2377 + struct fc_rport *rport = 2378 + container_of(work, struct fc_rport, fail_io_work.work); 2378 2379 struct Scsi_Host *shost = rport_to_shost(rport); 2379 2380 struct fc_internal *i = to_fc_internal(shost->transportt); 2380 2381 ··· 2388 2387 /** 2389 2388 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. 2390 2389 * 2391 - * @data: remote port to be scanned. 2390 + * @work: remote port to be scanned. 2392 2391 **/ 2393 2392 static void 2394 - fc_scsi_scan_rport(void *data) 2393 + fc_scsi_scan_rport(struct work_struct *work) 2395 2394 { 2396 - struct fc_rport *rport = (struct fc_rport *)data; 2395 + struct fc_rport *rport = 2396 + container_of(work, struct fc_rport, scan_work); 2397 2397 struct Scsi_Host *shost = rport_to_shost(rport); 2398 2398 unsigned long flags; 2399 2399
+5 -3
drivers/scsi/scsi_transport_iscsi.c
··· 234 234 return 0; 235 235 } 236 236 237 - static void session_recovery_timedout(void *data) 237 + static void session_recovery_timedout(struct work_struct *work) 238 238 { 239 - struct iscsi_cls_session *session = data; 239 + struct iscsi_cls_session *session = 240 + container_of(work, struct iscsi_cls_session, 241 + recovery_work.work); 240 242 241 243 dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed " 242 244 "out after %d secs\n", session->recovery_tmo); ··· 278 276 279 277 session->transport = transport; 280 278 session->recovery_tmo = 120; 281 - INIT_WORK(&session->recovery_work, session_recovery_timedout, session); 279 + INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); 282 280 INIT_LIST_HEAD(&session->host_list); 283 281 INIT_LIST_HEAD(&session->sess_list); 284 282
+4 -3
drivers/scsi/scsi_transport_spi.c
··· 964 964 }; 965 965 966 966 static void 967 - spi_dv_device_work_wrapper(void *data) 967 + spi_dv_device_work_wrapper(struct work_struct *work) 968 968 { 969 - struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 969 + struct work_queue_wrapper *wqw = 970 + container_of(work, struct work_queue_wrapper, work); 970 971 struct scsi_device *sdev = wqw->sdev; 971 972 972 973 kfree(wqw); ··· 1007 1006 return; 1008 1007 } 1009 1008 1010 - INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw); 1009 + INIT_WORK(&wqw->work, spi_dv_device_work_wrapper); 1011 1010 wqw->sdev = sdev; 1012 1011 1013 1012 schedule_work(&wqw->work);
+4 -3
drivers/spi/spi_bitbang.c
··· 265 265 * Drivers can provide word-at-a-time i/o primitives, or provide 266 266 * transfer-at-a-time ones to leverage dma or fifo hardware. 267 267 */ 268 - static void bitbang_work(void *_bitbang) 268 + static void bitbang_work(struct work_struct *work) 269 269 { 270 - struct spi_bitbang *bitbang = _bitbang; 270 + struct spi_bitbang *bitbang = 271 + container_of(work, struct spi_bitbang, work); 271 272 unsigned long flags; 272 273 273 274 spin_lock_irqsave(&bitbang->lock, flags); ··· 457 456 if (!bitbang->master || !bitbang->chipselect) 458 457 return -EINVAL; 459 458 460 - INIT_WORK(&bitbang->work, bitbang_work, bitbang); 459 + INIT_WORK(&bitbang->work, bitbang_work); 461 460 spin_lock_init(&bitbang->lock); 462 461 INIT_LIST_HEAD(&bitbang->queue); 463 462
+7 -5
drivers/usb/atm/cxacru.c
··· 158 158 const struct cxacru_modem_type *modem_type; 159 159 160 160 int line_status; 161 - struct work_struct poll_work; 161 + struct delayed_work poll_work; 162 162 163 163 /* contol handles */ 164 164 struct mutex cm_serialize; ··· 347 347 return 0; 348 348 } 349 349 350 - static void cxacru_poll_status(struct cxacru_data *instance); 350 + static void cxacru_poll_status(struct work_struct *work); 351 351 352 352 static int cxacru_atm_start(struct usbatm_data *usbatm_instance, 353 353 struct atm_dev *atm_dev) ··· 376 376 } 377 377 378 378 /* Start status polling */ 379 - cxacru_poll_status(instance); 379 + cxacru_poll_status(&instance->poll_work.work); 380 380 return 0; 381 381 } 382 382 383 - static void cxacru_poll_status(struct cxacru_data *instance) 383 + static void cxacru_poll_status(struct work_struct *work) 384 384 { 385 + struct cxacru_data *instance = 386 + container_of(work, struct cxacru_data, poll_work.work); 385 387 u32 buf[CXINF_MAX] = {}; 386 388 struct usbatm_data *usbatm = instance->usbatm; 387 389 struct atm_dev *atm_dev = usbatm->atm_dev; ··· 722 720 723 721 mutex_init(&instance->cm_serialize); 724 722 725 - INIT_WORK(&instance->poll_work, (void *)cxacru_poll_status, instance); 723 + INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status); 726 724 727 725 usbatm_instance->driver_data = instance; 728 726
+9 -6
drivers/usb/atm/speedtch.c
··· 142 142 143 143 struct speedtch_params params; /* set in probe, constant afterwards */ 144 144 145 - struct work_struct status_checker; 145 + struct delayed_work status_checker; 146 146 147 147 unsigned char last_status; 148 148 ··· 498 498 return ret; 499 499 } 500 500 501 - static void speedtch_check_status(struct speedtch_instance_data *instance) 501 + static void speedtch_check_status(struct work_struct *work) 502 502 { 503 + struct speedtch_instance_data *instance = 504 + container_of(work, struct speedtch_instance_data, 505 + status_checker.work); 503 506 struct usbatm_data *usbatm = instance->usbatm; 504 507 struct atm_dev *atm_dev = usbatm->atm_dev; 505 508 unsigned char *buf = instance->scratch_buffer; ··· 579 576 { 580 577 struct speedtch_instance_data *instance = (void *)data; 581 578 582 - schedule_work(&instance->status_checker); 579 + schedule_delayed_work(&instance->status_checker, 0); 583 580 584 581 /* The following check is racy, but the race is harmless */ 585 582 if (instance->poll_delay < MAX_POLL_DELAY) ··· 599 596 if (int_urb) { 600 597 ret = usb_submit_urb(int_urb, GFP_ATOMIC); 601 598 if (!ret) 602 - schedule_work(&instance->status_checker); 599 + schedule_delayed_work(&instance->status_checker, 0); 603 600 else { 604 601 atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); 605 602 mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY)); ··· 643 640 644 641 if ((int_urb = instance->int_urb)) { 645 642 ret = usb_submit_urb(int_urb, GFP_ATOMIC); 646 - schedule_work(&instance->status_checker); 643 + schedule_delayed_work(&instance->status_checker, 0); 647 644 if (ret < 0) { 648 645 atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); 649 646 goto fail; ··· 858 855 859 856 usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0); 860 857 861 - INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance); 858 + INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status); 862 859 863 860 instance->status_checker.timer.function = speedtch_status_poll; 864 861 instance->status_checker.timer.data = (unsigned long)instance;
+3 -3
drivers/usb/atm/ueagle-atm.c
··· 658 658 /* 659 659 * The uea_load_page() function must be called within a process context 660 660 */ 661 - static void uea_load_page(void *xsc) 661 + static void uea_load_page(struct work_struct *work) 662 662 { 663 - struct uea_softc *sc = xsc; 663 + struct uea_softc *sc = container_of(work, struct uea_softc, task); 664 664 u16 pageno = sc->pageno; 665 665 u16 ovl = sc->ovl; 666 666 struct block_info bi; ··· 1352 1352 1353 1353 uea_enters(INS_TO_USBDEV(sc)); 1354 1354 1355 - INIT_WORK(&sc->task, uea_load_page, sc); 1355 + INIT_WORK(&sc->task, uea_load_page); 1356 1356 init_waitqueue_head(&sc->sync_q); 1357 1357 init_waitqueue_head(&sc->cmv_ack_wait); 1358 1358
+3 -3
drivers/usb/class/cdc-acm.c
··· 421 421 schedule_work(&acm->work); 422 422 } 423 423 424 - static void acm_softint(void *private) 424 + static void acm_softint(struct work_struct *work) 425 425 { 426 - struct acm *acm = private; 426 + struct acm *acm = container_of(work, struct acm, work); 427 427 dbg("Entering acm_softint."); 428 428 429 429 if (!ACM_READY(acm)) ··· 927 927 acm->rx_buflimit = num_rx_buf; 928 928 acm->urb_task.func = acm_rx_tasklet; 929 929 acm->urb_task.data = (unsigned long) acm; 930 - INIT_WORK(&acm->work, acm_softint, acm); 930 + INIT_WORK(&acm->work, acm_softint); 931 931 spin_lock_init(&acm->throttle_lock); 932 932 spin_lock_init(&acm->write_lock); 933 933 spin_lock_init(&acm->read_lock);
+10 -8
drivers/usb/core/hub.c
··· 167 167 168 168 #define LED_CYCLE_PERIOD ((2*HZ)/3) 169 169 170 - static void led_work (void *__hub) 170 + static void led_work (struct work_struct *work) 171 171 { 172 - struct usb_hub *hub = __hub; 172 + struct usb_hub *hub = 173 + container_of(work, struct usb_hub, leds.work); 173 174 struct usb_device *hdev = hub->hdev; 174 175 unsigned i; 175 176 unsigned changed = 0; ··· 352 351 * talking to TTs must queue control transfers (not just bulk and iso), so 353 352 * both can talk to the same hub concurrently. 354 353 */ 355 - static void hub_tt_kevent (void *arg) 354 + static void hub_tt_kevent (struct work_struct *work) 356 355 { 357 - struct usb_hub *hub = arg; 356 + struct usb_hub *hub = 357 + container_of(work, struct usb_hub, tt.kevent); 358 358 unsigned long flags; 359 359 360 360 spin_lock_irqsave (&hub->tt.lock, flags); ··· 643 641 644 642 spin_lock_init (&hub->tt.lock); 645 643 INIT_LIST_HEAD (&hub->tt.clear_list); 646 - INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub); 644 + INIT_WORK (&hub->tt.kevent, hub_tt_kevent); 647 645 switch (hdev->descriptor.bDeviceProtocol) { 648 646 case 0: 649 647 break; ··· 882 880 INIT_LIST_HEAD(&hub->event_list); 883 881 hub->intfdev = &intf->dev; 884 882 hub->hdev = hdev; 885 - INIT_WORK(&hub->leds, led_work, hub); 883 + INIT_DELAYED_WORK(&hub->leds, led_work); 886 884 887 885 usb_set_intfdata (intf, hub); 888 886 ··· 2283 2281 /* hub LEDs are probably harder to miss than syslog */ 2284 2282 if (hub->has_indicators) { 2285 2283 hub->indicator[port1-1] = INDICATOR_GREEN_BLINK; 2286 - schedule_work (&hub->leds); 2284 + schedule_delayed_work (&hub->leds, 0); 2287 2285 } 2288 2286 } 2289 2287 kfree(qual); ··· 2457 2455 if (hub->has_indicators) { 2458 2456 hub->indicator[port1-1] = 2459 2457 INDICATOR_AMBER_BLINK; 2460 - schedule_work (&hub->leds); 2458 + schedule_delayed_work (&hub->leds, 0); 2461 2459 } 2462 2460 status = -ENOTCONN; /* Don't retry */ 2463 2461 goto loop_disable;
+1 -1
drivers/usb/core/hub.h
··· 230 230 231 231 unsigned has_indicators:1; 232 232 enum hub_led_mode indicator[USB_MAXCHILDREN]; 233 - struct work_struct leds; 233 + struct delayed_work leds; 234 234 }; 235 235 236 236 #endif /* __LINUX_HUB_H */
+4 -3
drivers/usb/core/message.c
··· 1501 1501 }; 1502 1502 1503 1503 /* Worker routine for usb_driver_set_configuration() */ 1504 - static void driver_set_config_work(void *_req) 1504 + static void driver_set_config_work(struct work_struct *work) 1505 1505 { 1506 - struct set_config_request *req = _req; 1506 + struct set_config_request *req = 1507 + container_of(work, struct set_config_request, work); 1507 1508 1508 1509 usb_lock_device(req->udev); 1509 1510 usb_set_configuration(req->udev, req->config); ··· 1542 1541 return -ENOMEM; 1543 1542 req->udev = udev; 1544 1543 req->config = config; 1545 - INIT_WORK(&req->work, driver_set_config_work, req); 1544 + INIT_WORK(&req->work, driver_set_config_work); 1546 1545 1547 1546 usb_get_dev(udev); 1548 1547 if (!schedule_work(&req->work)) {
+5 -4
drivers/usb/core/usb.c
··· 210 210 #ifdef CONFIG_USB_SUSPEND 211 211 212 212 /* usb_autosuspend_work - callback routine to autosuspend a USB device */ 213 - static void usb_autosuspend_work(void *_udev) 213 + static void usb_autosuspend_work(struct work_struct *work) 214 214 { 215 - struct usb_device *udev = _udev; 215 + struct usb_device *udev = 216 + container_of(work, struct usb_device, autosuspend.work); 216 217 217 218 usb_pm_lock(udev); 218 219 udev->auto_pm = 1; ··· 223 222 224 223 #else 225 224 226 - static void usb_autosuspend_work(void *_udev) 225 + static void usb_autosuspend_work(struct work_struct *work) 227 226 {} 228 227 229 228 #endif ··· 305 304 306 305 #ifdef CONFIG_PM 307 306 mutex_init(&dev->pm_mutex); 308 - INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev); 307 + INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work); 309 308 #endif 310 309 return dev; 311 310 }
+3 -3
drivers/usb/gadget/ether.c
··· 1833 1833 spin_unlock_irqrestore(&dev->req_lock, flags); 1834 1834 } 1835 1835 1836 - static void eth_work (void *_dev) 1836 + static void eth_work (struct work_struct *work) 1837 1837 { 1838 - struct eth_dev *dev = _dev; 1838 + struct eth_dev *dev = container_of(work, struct eth_dev, work); 1839 1839 1840 1840 if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) { 1841 1841 if (netif_running (dev->net)) ··· 2398 2398 dev = netdev_priv(net); 2399 2399 spin_lock_init (&dev->lock); 2400 2400 spin_lock_init (&dev->req_lock); 2401 - INIT_WORK (&dev->work, eth_work, dev); 2401 + INIT_WORK (&dev->work, eth_work); 2402 2402 INIT_LIST_HEAD (&dev->tx_reqs); 2403 2403 INIT_LIST_HEAD (&dev->rx_reqs); 2404 2404
+24 -38
drivers/usb/host/u132-hcd.c
··· 163 163 u16 queue_next; 164 164 struct urb *urb_list[ENDP_QUEUE_SIZE]; 165 165 struct list_head urb_more; 166 - struct work_struct scheduler; 166 + struct delayed_work scheduler; 167 167 }; 168 168 struct u132_ring { 169 169 unsigned in_use:1; ··· 171 171 u8 number; 172 172 struct u132 *u132; 173 173 struct u132_endp *curr_endp; 174 - struct work_struct scheduler; 174 + struct delayed_work scheduler; 175 175 }; 176 176 #define OHCI_QUIRK_AMD756 0x01 177 177 #define OHCI_QUIRK_SUPERIO 0x02 ··· 198 198 u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; 199 199 int flags; 200 200 unsigned long next_statechange; 201 - struct work_struct monitor; 201 + struct delayed_work monitor; 202 202 int num_endpoints; 203 203 struct u132_addr addr[MAX_U132_ADDRS]; 204 204 struct u132_udev udev[MAX_U132_UDEVS]; ··· 314 314 if (delta > 0) { 315 315 if (queue_delayed_work(workqueue, &ring->scheduler, delta)) 316 316 return; 317 - } else if (queue_work(workqueue, &ring->scheduler)) 317 + } else if (queue_delayed_work(workqueue, &ring->scheduler, 0)) 318 318 return; 319 319 kref_put(&u132->kref, u132_hcd_delete); 320 320 return; ··· 393 393 static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, 394 394 unsigned int delta) 395 395 { 396 - if (delta > 0) { 397 - if (queue_delayed_work(workqueue, &endp->scheduler, delta)) 398 - kref_get(&endp->kref); 399 - } else if (queue_work(workqueue, &endp->scheduler)) 400 - kref_get(&endp->kref); 401 - return; 396 + if (queue_delayed_work(workqueue, &endp->scheduler, delta)) 397 + kref_get(&endp->kref); 402 398 } 403 399 404 400 static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) ··· 410 414 411 415 static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) 412 416 { 413 - if (delta > 0) { 414 - if (queue_delayed_work(workqueue, &u132->monitor, delta)) { 415 - kref_get(&u132->kref); 416 - } 417 - } else if (queue_work(workqueue, &u132->monitor)) 418 - kref_get(&u132->kref); 419 - return; 417 + if (queue_delayed_work(workqueue, &u132->monitor, delta)) 418 + kref_get(&u132->kref); 420 419 } 421 420 422 421 static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) 423 422 { 424 - if (delta > 0) { 425 - if (queue_delayed_work(workqueue, &u132->monitor, delta)) 426 - return; 427 - } else if (queue_work(workqueue, &u132->monitor)) 428 - return; 429 - kref_put(&u132->kref, u132_hcd_delete); 430 - return; 423 + if (!queue_delayed_work(workqueue, &u132->monitor, delta)) 424 + kref_put(&u132->kref, u132_hcd_delete); 431 425 } 432 426 433 427 static void u132_monitor_cancel_work(struct u132 *u132) ··· 479 493 return 0; 480 494 } 481 495 482 - static void u132_hcd_monitor_work(void *data) 496 + static void u132_hcd_monitor_work(struct work_struct *work) 483 497 { 484 - struct u132 *u132 = data; 498 + struct u132 *u132 = container_of(work, struct u132, monitor.work); 485 499 if (u132->going > 1) { 486 500 dev_err(&u132->platform_dev->dev, "device has been removed %d\n" 487 501 , u132->going); ··· 1305 1319 } 1306 1320 } 1307 1321 1308 - static void u132_hcd_ring_work_scheduler(void *data); 1309 - static void u132_hcd_endp_work_scheduler(void *data); 1310 1322 /* 1311 1323 * this work function is only executed from the work queue 1312 1324 * 1313 1325 */ 1314 - static void u132_hcd_ring_work_scheduler(void *data) 1326 + static void u132_hcd_ring_work_scheduler(struct work_struct *work) 1315 1327 { 1316 - struct u132_ring *ring = data; 1328 + struct u132_ring *ring = 1329 + container_of(work, struct u132_ring, scheduler.work); 1317 1330 struct u132 *u132 = ring->u132; 1318 1331 down(&u132->scheduler_lock); 1319 1332 if (ring->in_use) { ··· 1371 1386 } 1372 1387 } 1373 1388 1374 - static void u132_hcd_endp_work_scheduler(void *data) 1389 + static void u132_hcd_endp_work_scheduler(struct work_struct *work) 1375 1390 { 1376 1391 struct u132_ring *ring; 1377 - struct u132_endp *endp = data; 1392 + struct u132_endp *endp = 1393 + container_of(work, struct u132_endp, scheduler.work); 1378 1394 struct u132 *u132 = endp->u132; 1379 1395 down(&u132->scheduler_lock); 1380 1396 ring = endp->ring; ··· 1933 1947 if (!endp) { 1934 1948 return -ENOMEM; 1935 1949 } 1936 - INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 1950 + INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); 1937 1951 spin_lock_init(&endp->queue_lock.slock); 1938 1952 INIT_LIST_HEAD(&endp->urb_more); 1939 1953 ring = endp->ring = &u132->ring[0]; ··· 2022 2036 if (!endp) { 2023 2037 return -ENOMEM; 2024 2038 } 2025 - INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 2039 + INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); 2026 2040 spin_lock_init(&endp->queue_lock.slock); 2027 2041 INIT_LIST_HEAD(&endp->urb_more); 2028 2042 endp->dequeueing = 0; ··· 2107 2121 if (!endp) { 2108 2122 return -ENOMEM; 2109 2123 } 2110 - INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); 2124 + INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); 2111 2125 spin_lock_init(&endp->queue_lock.slock); 2112 2126 INIT_LIST_HEAD(&endp->urb_more); 2113 2127 ring = endp->ring = &u132->ring[0]; ··· 3086 3100 ring->number = rings + 1; 3087 3101 ring->length = 0; 3088 3102 ring->curr_endp = NULL; 3089 - INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler, 3090 - (void *)ring); 3103 + INIT_DELAYED_WORK(&ring->scheduler, 3104 + u132_hcd_ring_work_scheduler); 3091 3105 } down(&u132->sw_lock); 3092 - INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132); 3106 + INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work); 3093 3107 while (ports-- > 0) { 3094 3108 struct u132_port *port = &u132->port[ports]; 3095 3109 port->u132 = u132;
+4 -3
drivers/usb/input/hid-core.c
··· 969 969 } 970 970 971 971 /* Workqueue routine to reset the device */ 972 - static void hid_reset(void *_hid) 972 + static void hid_reset(struct work_struct *work) 973 973 { 974 - struct hid_device *hid = (struct hid_device *) _hid; 974 + struct hid_device *hid = 975 + container_of(work, struct hid_device, reset_work); 975 976 int rc_lock, rc; 976 977 977 978 dev_dbg(&hid->intf->dev, "resetting device\n"); ··· 2016 2015 2017 2016 init_waitqueue_head(&hid->wait); 2018 2017 2019 - INIT_WORK(&hid->reset_work, hid_reset, hid); 2018 + INIT_WORK(&hid->reset_work, hid_reset); 2020 2019 setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid); 2021 2020 2022 2021 spin_lock_init(&hid->inlock);
+28 -58
drivers/usb/misc/ftdi-elan.c
··· 156 156 struct usb_device *udev; 157 157 struct usb_interface *interface; 158 158 struct usb_class_driver *class; 159 - struct work_struct status_work; 160 - struct work_struct command_work; 161 - struct work_struct respond_work; 159 + struct delayed_work status_work; 160 + struct delayed_work command_work; 161 + struct delayed_work respond_work; 162 162 struct u132_platform_data platform_data; 163 163 struct resource resources[0]; 164 164 struct platform_device platform_dev; ··· 210 210 211 211 static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) 212 212 { 213 - if (delta > 0) { 214 - if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) 215 - return; 216 - } else if (queue_work(status_queue, &ftdi->status_work)) 217 - return; 218 - kref_put(&ftdi->kref, ftdi_elan_delete); 219 - return; 213 + if (!queue_delayed_work(status_queue, &ftdi->status_work, delta)) 214 + kref_put(&ftdi->kref, ftdi_elan_delete); 220 215 } 221 216 222 217 static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta) 223 218 { 224 - if (delta > 0) { 225 - if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) 226 - kref_get(&ftdi->kref); 227 - } else if (queue_work(status_queue, &ftdi->status_work)) 228 - kref_get(&ftdi->kref); 229 - return; 219 + if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) 220 + kref_get(&ftdi->kref); 230 221 } 231 222 232 223 static void ftdi_status_cancel_work(struct usb_ftdi *ftdi) ··· 228 237 229 238 static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) 230 239 { 231 - if (delta > 0) { 232 - if (queue_delayed_work(command_queue, &ftdi->command_work, 233 - delta)) 234 - return; 235 - } else if (queue_work(command_queue, &ftdi->command_work)) 236 - return; 237 - kref_put(&ftdi->kref, ftdi_elan_delete); 238 - return; 240 + if (!queue_delayed_work(command_queue, &ftdi->command_work, delta)) 241 + kref_put(&ftdi->kref, ftdi_elan_delete); 239 242 } 240 243 241 244 static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta) 242 245 { 243 - if (delta > 0) { 244 - if (queue_delayed_work(command_queue, &ftdi->command_work, 245 - delta)) 246 - kref_get(&ftdi->kref); 247 - } else if (queue_work(command_queue, &ftdi->command_work)) 248 - kref_get(&ftdi->kref); 249 - return; 246 + if (queue_delayed_work(command_queue, &ftdi->command_work, delta)) 247 + kref_get(&ftdi->kref); 250 248 } 251 249 252 250 static void ftdi_command_cancel_work(struct usb_ftdi *ftdi) ··· 247 267 static void ftdi_response_requeue_work(struct usb_ftdi *ftdi, 248 268 unsigned int delta) 249 269 { 250 - if (delta > 0) { 251 - if (queue_delayed_work(respond_queue, &ftdi->respond_work, 252 - delta)) 253 - return; 254 - } else if (queue_work(respond_queue, &ftdi->respond_work)) 255 - return; 256 - kref_put(&ftdi->kref, ftdi_elan_delete); 257 - return; 270 + if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta)) 271 + kref_put(&ftdi->kref, ftdi_elan_delete); 258 272 } 259 273 260 274 static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta) 261 275 { 262 - if (delta > 0) { 263 - if (queue_delayed_work(respond_queue, &ftdi->respond_work, 264 - delta)) 265 - kref_get(&ftdi->kref); 266 - } else if (queue_work(respond_queue, &ftdi->respond_work)) 267 - kref_get(&ftdi->kref); 268 - return; 276 + if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta)) 277 + kref_get(&ftdi->kref); 269 278 } 270 279 271 280 static void ftdi_response_cancel_work(struct usb_ftdi *ftdi) ··· 444 475 return; 445 476 } 446 477 447 - static void ftdi_elan_command_work(void *data) 478 + static void ftdi_elan_command_work(struct work_struct *work) 448 479 { 449 - struct usb_ftdi *ftdi = data; 480 + struct usb_ftdi *ftdi = 481 + container_of(work, struct usb_ftdi, command_work.work); 482 + 450 483 if (ftdi->disconnected > 0) { 451 484 ftdi_elan_put_kref(ftdi); 452 485 return; ··· 471 500 return; 472 501 } 473 502 474 - static void ftdi_elan_respond_work(void *data) 503 + static void ftdi_elan_respond_work(struct work_struct *work) 475 504 { 476 - struct usb_ftdi *ftdi = data; 505 + struct usb_ftdi *ftdi = 506 + container_of(work, struct usb_ftdi, respond_work.work); 477 507 if (ftdi->disconnected > 0) { 478 508 ftdi_elan_put_kref(ftdi); 479 509 return; ··· 506 534 * after the FTDI has been synchronized 507 535 * 508 536 */ 509 - static void ftdi_elan_status_work(void *data) 537 + static void ftdi_elan_status_work(struct work_struct *work) 510 538 { 511 - struct usb_ftdi *ftdi = data; 539 + struct usb_ftdi *ftdi = 540 + container_of(work, struct usb_ftdi, status_work.work); 512 541 int work_delay_in_msec = 0; 513 542 if (ftdi->disconnected > 0) { 514 543 ftdi_elan_put_kref(ftdi); ··· 2664 2691 ftdi->class = NULL; 2665 2692 dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a" 2666 2693 "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber); 2667 - INIT_WORK(&ftdi->status_work, ftdi_elan_status_work, 2668 - (void *)ftdi); 2669 - INIT_WORK(&ftdi->command_work, ftdi_elan_command_work, 2670 - (void *)ftdi); 2671 - INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work, 2672 - (void *)ftdi); 2694 + INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work); 2695 + INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work); 2696 + INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work); 2673 2697 ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000)); 2674 2698 return 0; 2675 2699 } else {
+12 -9
drivers/usb/misc/phidgetkit.c
··· 81 81 unsigned char *data; 82 82 dma_addr_t data_dma; 83 83 84 - struct work_struct do_notify; 85 - struct work_struct do_resubmit; 84 + struct delayed_work do_notify; 85 + struct delayed_work do_resubmit; 86 86 unsigned long input_events; 87 87 unsigned long sensor_events; 88 88 }; ··· 374 374 } 375 375 376 376 if (kit->input_events || kit->sensor_events) 377 - schedule_work(&kit->do_notify); 377 + schedule_delayed_work(&kit->do_notify, 0); 378 378 379 379 resubmit: 380 380 status = usb_submit_urb(urb, SLAB_ATOMIC); ··· 384 384 kit->udev->devpath, status); 385 385 } 386 386 387 - static void do_notify(void *data) 387 + static void do_notify(struct work_struct *work) 388 388 { 389 - struct interfacekit *kit = data; 389 + struct interfacekit *kit = 390 + container_of(work, struct interfacekit, do_notify.work); 390 391 int i; 391 392 char sysfs_file[8]; 392 393 ··· 406 405 } 407 406 } 408 407 409 - static void do_resubmit(void *data) 408 + static void do_resubmit(struct work_struct *work) 410 409 { 411 - set_outputs(data); 410 + struct interfacekit *kit = 411 + container_of(work, struct interfacekit, do_resubmit.work); 412 + set_outputs(kit); 412 413 } 413 414 414 415 #define show_set_output(value) \ ··· 578 575 579 576 kit->udev = usb_get_dev(dev); 580 577 kit->intf = intf; 581 - INIT_WORK(&kit->do_notify, do_notify, kit); 582 - INIT_WORK(&kit->do_resubmit, do_resubmit, kit); 578 + INIT_DELAYED_WORK(&kit->do_notify, do_notify); 579 + INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit); 583 580 usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data, 584 581 maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, 585 582 interfacekit_irq, kit, endpoint->bInterval);
+6 -5
drivers/usb/misc/phidgetmotorcontrol.c
··· 41 41 unsigned char *data; 42 42 dma_addr_t data_dma; 43 43 44 - struct work_struct do_notify; 44 + struct delayed_work do_notify; 45 45 unsigned long input_events; 46 46 unsigned long speed_events; 47 47 unsigned long exceed_events; ··· 148 148 set_bit(1, &mc->exceed_events); 149 149 150 150 if (mc->input_events || mc->exceed_events || mc->speed_events) 151 - schedule_work(&mc->do_notify); 151 + schedule_delayed_work(&mc->do_notify, 0); 152 152 153 153 resubmit: 154 154 status = usb_submit_urb(urb, SLAB_ATOMIC); ··· 159 159 mc->udev->devpath, status); 160 160 } 161 161 162 - static void do_notify(void *data) 162 + static void do_notify(struct work_struct *work) 163 163 { 164 - struct motorcontrol *mc = data; 164 + struct motorcontrol *mc = 165 + container_of(work, struct motorcontrol, do_notify.work); 165 166 int i; 166 167 char sysfs_file[8]; 167 168 ··· 349 348 mc->udev = usb_get_dev(dev); 350 349 mc->intf = intf; 351 350 mc->acceleration[0] = mc->acceleration[1] = 10; 352 - INIT_WORK(&mc->do_notify, do_notify, mc); 351 + INIT_DELAYED_WORK(&mc->do_notify, do_notify); 353 352 usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data, 354 353 maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, 355 354 motorcontrol_irq, mc, endpoint->bInterval);
+5 -4
drivers/usb/net/kaweth.c
··· 222 222 int suspend_lowmem_ctrl; 223 223 int linkstate; 224 224 int opened; 225 - struct work_struct lowmem_work; 225 + struct delayed_work lowmem_work; 226 226 227 227 struct usb_device *dev; 228 228 struct net_device *net; ··· 530 530 kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC); 531 531 } 532 532 533 - static void kaweth_resubmit_tl(void *d) 533 + static void kaweth_resubmit_tl(struct work_struct *work) 534 534 { 535 - struct kaweth_device *kaweth = (struct kaweth_device *)d; 535 + struct kaweth_device *kaweth = 536 + container_of(work, struct kaweth_device, lowmem_work.work); 536 537 537 538 if (IS_BLOCKED(kaweth->status)) 538 539 return; ··· 1127 1126 1128 1127 /* kaweth is zeroed as part of alloc_netdev */ 1129 1128 1130 - INIT_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl, (void *)kaweth); 1129 + INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); 1131 1130 1132 1131 SET_MODULE_OWNER(netdev); 1133 1132
+3 -3
drivers/usb/net/pegasus.c
··· 1280 1280 static struct workqueue_struct *pegasus_workqueue = NULL; 1281 1281 #define CARRIER_CHECK_DELAY (2 * HZ) 1282 1282 1283 - static void check_carrier(void *data) 1283 + static void check_carrier(struct work_struct *work) 1284 1284 { 1285 - pegasus_t *pegasus = data; 1285 + pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work); 1286 1286 set_carrier(pegasus->net); 1287 1287 if (!(pegasus->flags & PEGASUS_UNPLUG)) { 1288 1288 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, ··· 1318 1318 1319 1319 tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus); 1320 1320 1321 - INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus); 1321 + INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier); 1322 1322 1323 1323 pegasus->intf = intf; 1324 1324 pegasus->usb = dev;
+1 -1
drivers/usb/net/pegasus.h
··· 95 95 int dev_index; 96 96 int intr_interval; 97 97 struct tasklet_struct rx_tl; 98 - struct work_struct carrier_check; 98 + struct delayed_work carrier_check; 99 99 struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb; 100 100 struct sk_buff *rx_pool[RX_SKBS]; 101 101 struct sk_buff *rx_skb;
+4 -3
drivers/usb/net/usbnet.c
··· 782 782 * especially now that control transfers can be queued. 783 783 */ 784 784 static void 785 - kevent (void *data) 785 + kevent (struct work_struct *work) 786 786 { 787 - struct usbnet *dev = data; 787 + struct usbnet *dev = 788 + container_of(work, struct usbnet, kevent); 788 789 int status; 789 790 790 791 /* usb_clear_halt() needs a thread context */ ··· 1147 1146 skb_queue_head_init (&dev->done); 1148 1147 dev->bh.func = usbnet_bh; 1149 1148 dev->bh.data = (unsigned long) dev; 1150 - INIT_WORK (&dev->kevent, kevent, dev); 1149 + INIT_WORK (&dev->kevent, kevent); 1151 1150 dev->delay.function = usbnet_bh; 1152 1151 dev->delay.data = (unsigned long) dev; 1153 1152 init_timer (&dev->delay);
+8 -5
drivers/usb/serial/aircable.c
··· 92 92 struct circ_buf *rx_buf; /* read buffer */ 93 93 int rx_flags; /* for throttilng */ 94 94 struct work_struct rx_work; /* work cue for the receiving line */ 95 + struct usb_serial_port *port; /* USB port with which associated */ 95 96 }; 96 97 97 98 /* Private methods */ ··· 252 251 schedule_work(&port->work); 253 252 } 254 253 255 - static void aircable_read(void *params) 254 + static void aircable_read(struct work_struct *work) 256 255 { 257 - struct usb_serial_port *port = params; 258 - struct aircable_private *priv = usb_get_serial_port_data(port); 256 + struct aircable_private *priv = 257 + container_of(work, struct aircable_private, rx_work); 258 + struct usb_serial_port *port = priv->port; 259 259 struct tty_struct *tty; 260 260 unsigned char *data; 261 261 int count; ··· 350 348 } 351 349 352 350 priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED); 353 - INIT_WORK(&priv->rx_work, aircable_read, port); 351 + priv->port = port; 352 + INIT_WORK(&priv->rx_work, aircable_read); 354 353 355 354 usb_set_serial_port_data(serial->port[0], priv); 356 355 ··· 518 515 package_length - shift); 519 516 } 520 517 } 521 - aircable_read(port); 518 + aircable_read(&priv->rx_work); 522 519 } 523 520 524 521 /* Schedule the next read _if_ we are still open */
+8 -6
drivers/usb/serial/digi_acceleport.c
··· 430 430 int dp_in_close; /* close in progress */ 431 431 wait_queue_head_t dp_close_wait; /* wait queue for close */ 432 432 struct work_struct dp_wakeup_work; 433 + struct usb_serial_port *dp_port; 433 434 }; 434 435 435 436 436 437 /* Local Function Declarations */ 437 438 438 439 static void digi_wakeup_write( struct usb_serial_port *port ); 439 - static void digi_wakeup_write_lock(void *); 440 + static void digi_wakeup_write_lock(struct work_struct *work); 440 441 static int digi_write_oob_command( struct usb_serial_port *port, 441 442 unsigned char *buf, int count, int interruptible ); 442 443 static int digi_write_inb_command( struct usb_serial_port *port, ··· 599 598 * on writes. 600 599 */ 601 600 602 - static void digi_wakeup_write_lock(void *arg) 601 + static void digi_wakeup_write_lock(struct work_struct *work) 603 602 { 604 - struct usb_serial_port *port = arg; 603 + struct digi_port *priv = 604 + container_of(work, struct digi_port, dp_wakeup_work); 605 + struct usb_serial_port *port = priv->dp_port; 605 606 unsigned long flags; 606 - struct digi_port *priv = usb_get_serial_port_data(port); 607 607 608 608 609 609 spin_lock_irqsave( &priv->dp_port_lock, flags ); ··· 1704 1702 init_waitqueue_head( &priv->dp_flush_wait ); 1705 1703 priv->dp_in_close = 0; 1706 1704 init_waitqueue_head( &priv->dp_close_wait ); 1707 - INIT_WORK(&priv->dp_wakeup_work, 1708 - digi_wakeup_write_lock, serial->port[i]); 1705 + INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock); 1706 + priv->dp_port = serial->port[i]; 1709 1707 1710 1708 /* initialize write wait queue for this port */ 1711 1709 init_waitqueue_head( &serial->port[i]->write_wait );
+11 -8
drivers/usb/serial/ftdi_sio.c
··· 559 559 char prev_status, diff_status; /* Used for TIOCMIWAIT */ 560 560 __u8 rx_flags; /* receive state flags (throttling) */ 561 561 spinlock_t rx_lock; /* spinlock for receive state */ 562 - struct work_struct rx_work; 562 + struct delayed_work rx_work; 563 + struct usb_serial_port *port; 563 564 int rx_processed; 564 565 unsigned long rx_bytes; 565 566 ··· 594 593 static int ftdi_chars_in_buffer (struct usb_serial_port *port); 595 594 static void ftdi_write_bulk_callback (struct urb *urb); 596 595 static void ftdi_read_bulk_callback (struct urb *urb); 597 - static void ftdi_process_read (void *param); 596 + static void ftdi_process_read (struct work_struct *work); 598 597 static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old); 599 598 static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file); 600 599 static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear); ··· 1202 1201 port->read_urb->transfer_buffer_length = BUFSZ; 1203 1202 } 1204 1203 1205 - INIT_WORK(&priv->rx_work, ftdi_process_read, port); 1204 + INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read); 1205 + priv->port = port; 1206 1206 1207 1207 /* Free port's existing write urb and transfer buffer. */ 1208 1208 if (port->write_urb) { ··· 1643 1641 priv->rx_bytes += countread; 1644 1642 spin_unlock_irqrestore(&priv->rx_lock, flags); 1645 1643 1646 - ftdi_process_read(port); 1644 + ftdi_process_read(&priv->rx_work.work); 1647 1645 1648 1646 } /* ftdi_read_bulk_callback */ 1649 1647 1650 1648 1651 - static void ftdi_process_read (void *param) 1649 + static void ftdi_process_read (struct work_struct *work) 1652 1650 { /* ftdi_process_read */ 1653 - struct usb_serial_port *port = (struct usb_serial_port*)param; 1651 + struct ftdi_private *priv = 1652 + container_of(work, struct ftdi_private, rx_work.work); 1653 + struct usb_serial_port *port = priv->port; 1654 1654 struct urb *urb; 1655 1655 struct tty_struct *tty; 1656 - struct ftdi_private *priv; 1657 1656 char error_flag; 1658 1657 unsigned char *data; 1659 1658 ··· 2183 2180 spin_unlock_irqrestore(&priv->rx_lock, flags); 2184 2181 2185 2182 if (actually_throttled) 2186 - schedule_work(&priv->rx_work); 2183 + schedule_delayed_work(&priv->rx_work, 0); 2187 2184 } 2188 2185 2189 2186 static int __init ftdi_init (void)
+14 -8
drivers/usb/serial/keyspan_pda.c
··· 120 120 int tx_throttled; 121 121 struct work_struct wakeup_work; 122 122 struct work_struct unthrottle_work; 123 + struct usb_serial *serial; 124 + struct usb_serial_port *port; 123 125 }; 124 126 125 127 ··· 177 175 }; 178 176 #endif 179 177 180 - static void keyspan_pda_wakeup_write( struct usb_serial_port *port ) 178 + static void keyspan_pda_wakeup_write(struct work_struct *work) 181 179 { 182 - 180 + struct keyspan_pda_private *priv = 181 + container_of(work, struct keyspan_pda_private, wakeup_work); 182 + struct usb_serial_port *port = priv->port; 183 183 struct tty_struct *tty = port->tty; 184 184 185 185 /* wake up port processes */ ··· 191 187 tty_wakeup(tty); 192 188 } 193 189 194 - static void keyspan_pda_request_unthrottle( struct usb_serial *serial ) 190 + static void keyspan_pda_request_unthrottle(struct work_struct *work) 195 191 { 192 + struct keyspan_pda_private *priv = 193 + container_of(work, struct keyspan_pda_private, unthrottle_work); 194 + struct usb_serial *serial = priv->serial; 196 195 int result; 197 196 198 197 dbg(" request_unthrottle"); ··· 772 765 return (1); /* error */ 773 766 usb_set_serial_port_data(serial->port[0], priv); 774 767 init_waitqueue_head(&serial->port[0]->write_wait); 775 - INIT_WORK(&priv->wakeup_work, (void *)keyspan_pda_wakeup_write, 776 - (void *)(serial->port[0])); 777 - INIT_WORK(&priv->unthrottle_work, 778 - (void *)keyspan_pda_request_unthrottle, 779 - (void *)(serial)); 768 + INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write); 769 + INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle); 770 + priv->serial = serial; 771 + priv->port = serial->port[0]; 780 772 return (0); 781 773 } 782 774
+4 -3
drivers/usb/serial/usb-serial.c
··· 533 533 schedule_work(&port->work); 534 534 } 535 535 536 - static void usb_serial_port_work(void *private) 536 + static void usb_serial_port_work(struct work_struct *work) 537 537 { 538 - struct usb_serial_port *port = private; 538 + struct usb_serial_port *port = 539 + container_of(work, struct usb_serial_port, work); 539 540 struct tty_struct *tty; 540 541 541 542 dbg("%s - port %d", __FUNCTION__, port->number); ··· 800 799 port->serial = serial; 801 800 spin_lock_init(&port->lock); 802 801 mutex_init(&port->mutex); 803 - INIT_WORK(&port->work, usb_serial_port_work, port); 802 + INIT_WORK(&port->work, usb_serial_port_work); 804 803 serial->port[i] = port; 805 804 } 806 805
+9 -6
drivers/usb/serial/whiteheat.c
··· 227 227 struct list_head rx_urbs_submitted; 228 228 struct list_head rx_urb_q; 229 229 struct work_struct rx_work; 230 + struct usb_serial_port *port; 230 231 struct list_head tx_urbs_free; 231 232 struct list_head tx_urbs_submitted; 232 233 }; ··· 242 241 static int start_port_read(struct usb_serial_port *port); 243 242 static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head); 244 243 static struct list_head *list_first(struct list_head *head); 245 - static void rx_data_softint(void *private); 244 + static void rx_data_softint(struct work_struct *work); 246 245 247 246 static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize); 248 247 static int firm_open(struct usb_serial_port *port); ··· 425 424 spin_lock_init(&info->lock); 426 425 info->flags = 0; 427 426 info->mcr = 0; 428 - INIT_WORK(&info->rx_work, rx_data_softint, port); 427 + INIT_WORK(&info->rx_work, rx_data_softint); 428 + info->port = port; 429 429 430 430 INIT_LIST_HEAD(&info->rx_urbs_free); 431 431 INIT_LIST_HEAD(&info->rx_urbs_submitted); ··· 951 949 spin_unlock_irqrestore(&info->lock, flags); 952 950 953 951 if (actually_throttled) 954 - rx_data_softint(port); 952 + rx_data_softint(&info->rx_work); 955 953 956 954 return; 957 955 } ··· 1402 1400 } 1403 1401 1404 1402 1405 - static void rx_data_softint(void *private) 1403 + static void rx_data_softint(struct work_struct *work) 1406 1404 { 1407 - struct usb_serial_port *port = (struct usb_serial_port *)private; 1408 - struct whiteheat_private *info = usb_get_serial_port_data(port); 1405 + struct whiteheat_private *info = 1406 + container_of(work, struct whiteheat_private, rx_work); 1407 + struct usb_serial_port *port = info->port; 1409 1408 struct tty_struct *tty = port->tty; 1410 1409 struct whiteheat_urb_wrap *wrap; 1411 1410 struct urb *urb;
+3 -3
drivers/video/console/fbcon.c
··· 383 383 softback_top = 0; 384 384 } 385 385 386 - static void fb_flashcursor(void *private) 386 + static void fb_flashcursor(struct work_struct *work) 387 387 { 388 - struct fb_info *info = private; 388 + struct fb_info *info = container_of(work, struct fb_info, queue); 389 389 struct fbcon_ops *ops = info->fbcon_par; 390 390 struct display *p; 391 391 struct vc_data *vc = NULL; ··· 442 442 if ((!info->queue.func || info->queue.func == fb_flashcursor) && 443 443 !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) { 444 444 if (!info->queue.func) 445 - INIT_WORK(&info->queue, fb_flashcursor, info); 445 + INIT_WORK(&info->queue, fb_flashcursor); 446 446 447 447 init_timer(&ops->cursor_timer); 448 448 ops->cursor_timer.function = cursor_timer_handler;
+8 -8
fs/9p/mux.c
··· 110 110 }; 111 111 112 112 static int v9fs_poll_proc(void *); 113 - static void v9fs_read_work(void *); 114 - static void v9fs_write_work(void *); 113 + static void v9fs_read_work(struct work_struct *work); 114 + static void v9fs_write_work(struct work_struct *work); 115 115 static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, 116 116 poll_table * p); 117 117 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); ··· 297 297 m->rbuf = NULL; 298 298 m->wpos = m->wsize = 0; 299 299 m->wbuf = NULL; 300 - INIT_WORK(&m->rq, v9fs_read_work, m); 301 - INIT_WORK(&m->wq, v9fs_write_work, m); 300 + INIT_WORK(&m->rq, v9fs_read_work); 301 + INIT_WORK(&m->wq, v9fs_write_work); 302 302 m->wsched = 0; 303 303 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); 304 304 m->poll_task = NULL; ··· 458 458 /** 459 459 * v9fs_write_work - called when a transport can send some data 460 460 */ 461 - static void v9fs_write_work(void *a) 461 + static void v9fs_write_work(struct work_struct *work) 462 462 { 463 463 int n, err; 464 464 struct v9fs_mux_data *m; 465 465 struct v9fs_req *req; 466 466 467 - m = a; 467 + m = container_of(work, struct v9fs_mux_data, wq); 468 468 469 469 if (m->err < 0) { 470 470 clear_bit(Wworksched, &m->wsched); ··· 564 564 /** 565 565 * v9fs_read_work - called when there is some data to be read from a transport 566 566 */ 567 - static void v9fs_read_work(void *a) 567 + static void v9fs_read_work(struct work_struct *work) 568 568 { 569 569 int n, err; 570 570 struct v9fs_mux_data *m; ··· 572 572 struct v9fs_fcall *rcall; 573 573 char *rbuf; 574 574 575 - m = a; 575 + m = container_of(work, struct v9fs_mux_data, rq); 576 576 577 577 if (m->err < 0) 578 578 return;
+4 -4
fs/gfs2/glock.c
··· 35 35 36 36 struct greedy { 37 37 struct gfs2_holder gr_gh; 38 - struct work_struct gr_work; 38 + struct delayed_work gr_work; 39 39 }; 40 40 41 41 struct gfs2_gl_hash_bucket { ··· 1368 1368 glops->go_xmote_th(gl, state, flags); 1369 1369 } 1370 1370 1371 - static void greedy_work(void *data) 1371 + static void greedy_work(struct work_struct *work) 1372 1372 { 1373 - struct greedy *gr = data; 1373 + struct greedy *gr = container_of(work, struct greedy, gr_work.work); 1374 1374 struct gfs2_holder *gh = &gr->gr_gh; 1375 1375 struct gfs2_glock *gl = gh->gh_gl; 1376 1376 const struct gfs2_glock_operations *glops = gl->gl_ops; ··· 1422 1422 1423 1423 gfs2_holder_init(gl, 0, 0, gh); 1424 1424 set_bit(HIF_GREEDY, &gh->gh_iflags); 1425 - INIT_WORK(&gr->gr_work, greedy_work, gr); 1425 + INIT_DELAYED_WORK(&gr->gr_work, greedy_work); 1426 1426 1427 1427 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); 1428 1428 schedule_delayed_work(&gr->gr_work, time);
+4 -4
fs/ncpfs/inode.c
··· 577 577 server->rcv.ptr = (unsigned char*)&server->rcv.buf; 578 578 server->rcv.len = 10; 579 579 server->rcv.state = 0; 580 - INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server); 581 - INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server); 580 + INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc); 581 + INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc); 582 582 sock->sk->sk_write_space = ncp_tcp_write_space; 583 583 } else { 584 - INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server); 585 - INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server); 584 + INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc); 585 + INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc); 586 586 server->timeout_tm.data = (unsigned long)server; 587 587 server->timeout_tm.function = ncpdgram_timeout_call; 588 588 }
+12 -8
fs/ncpfs/sock.c
··· 350 350 } 351 351 } 352 352 353 - void ncpdgram_rcv_proc(void *s) 353 + void ncpdgram_rcv_proc(struct work_struct *work) 354 354 { 355 - struct ncp_server *server = s; 355 + struct ncp_server *server = 356 + container_of(work, struct ncp_server, rcv.tq); 356 357 struct socket* sock; 357 358 358 359 sock = server->ncp_sock; ··· 469 468 } 470 469 } 471 470 472 - void ncpdgram_timeout_proc(void *s) 471 + void ncpdgram_timeout_proc(struct work_struct *work) 473 472 { 474 - struct ncp_server *server = s; 473 + struct ncp_server *server = 474 + container_of(work, struct ncp_server, timeout_tq); 475 475 mutex_lock(&server->rcv.creq_mutex); 476 476 __ncpdgram_timeout_proc(server); 477 477 mutex_unlock(&server->rcv.creq_mutex); ··· 654 652 } 655 653 } 656 654 657 - void ncp_tcp_rcv_proc(void *s) 655 + void ncp_tcp_rcv_proc(struct work_struct *work) 658 656 { 659 - struct ncp_server *server = s; 657 + struct ncp_server *server = 658 + container_of(work, struct ncp_server, rcv.tq); 660 659 661 660 mutex_lock(&server->rcv.creq_mutex); 662 661 __ncptcp_rcv_proc(server); 663 662 mutex_unlock(&server->rcv.creq_mutex); 664 663 } 665 664 666 - void ncp_tcp_tx_proc(void *s) 665 + void ncp_tcp_tx_proc(struct work_struct *work) 667 666 { 668 - struct ncp_server *server = s; 667 + struct ncp_server *server = 668 + container_of(work, struct ncp_server, tx.tq); 669 669 670 670 mutex_lock(&server->rcv.creq_mutex); 671 671 __ncptcp_try_send(server);
+3 -4
fs/nfsd/nfs4state.c
··· 1829 1829 } 1830 1830 1831 1831 static struct workqueue_struct *laundry_wq; 1832 - static struct work_struct laundromat_work; 1833 - static void laundromat_main(void *); 1834 - static DECLARE_WORK(laundromat_work, laundromat_main, NULL); 1832 + static void laundromat_main(struct work_struct *); 1833 + static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main); 1835 1834 1836 1835 __be32 1837 1836 nfsd4_renew(clientid_t *clid) ··· 1939 1940 } 1940 1941 1941 1942 void 1942 - laundromat_main(void *not_used) 1943 + laundromat_main(struct work_struct *not_used) 1943 1944 { 1944 1945 time_t t; 1945 1946
+6 -3
fs/ocfs2/alloc.c
··· 1205 1205 return status; 1206 1206 } 1207 1207 1208 - static void ocfs2_truncate_log_worker(void *data) 1208 + static void ocfs2_truncate_log_worker(struct work_struct *work) 1209 1209 { 1210 1210 int status; 1211 - struct ocfs2_super *osb = data; 1211 + struct ocfs2_super *osb = 1212 + container_of(work, struct ocfs2_super, 1213 + osb_truncate_log_wq.work); 1212 1214 1213 1215 mlog_entry_void(); 1214 1216 ··· 1443 1441 /* ocfs2_truncate_log_shutdown keys on the existence of 1444 1442 * osb->osb_tl_inode so we don't set any of the osb variables 1445 1443 * until we're sure all is well. */ 1446 - INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb); 1444 + INIT_DELAYED_WORK(&osb->osb_truncate_log_wq, 1445 + ocfs2_truncate_log_worker); 1447 1446 osb->osb_tl_bh = tl_bh; 1448 1447 osb->osb_tl_inode = tl_inode; 1449 1448
+6 -4
fs/ocfs2/cluster/heartbeat.c
··· 141 141 * recognizes a node going up and down in one iteration */ 142 142 u64 hr_generation; 143 143 144 - struct work_struct hr_write_timeout_work; 144 + struct delayed_work hr_write_timeout_work; 145 145 unsigned long hr_last_timeout_start; 146 146 147 147 /* Used during o2hb_check_slot to hold a copy of the block ··· 156 156 int wc_error; 157 157 }; 158 158 159 - static void o2hb_write_timeout(void *arg) 159 + static void o2hb_write_timeout(struct work_struct *work) 160 160 { 161 - struct o2hb_region *reg = arg; 161 + struct o2hb_region *reg = 162 + container_of(work, struct o2hb_region, 163 + hr_write_timeout_work.work); 162 164 163 165 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " 164 166 "milliseconds\n", reg->hr_dev_name, ··· 1406 1404 goto out; 1407 1405 } 1408 1406 1409 - INIT_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout, reg); 1407 + INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout); 1410 1408 1411 1409 /* 1412 1410 * A node is considered live after it has beat LIVE_THRESHOLD
+2 -2
fs/ocfs2/cluster/quorum.c
··· 88 88 o2quo_fence_self(); 89 89 } 90 90 91 - static void o2quo_make_decision(void *arg) 91 + static void o2quo_make_decision(struct work_struct *work) 92 92 { 93 93 int quorum; 94 94 int lowest_hb, lowest_reachable = 0, fence = 0; ··· 306 306 struct o2quo_state *qs = &o2quo_state; 307 307 308 308 spin_lock_init(&qs->qs_lock); 309 - INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL); 309 + INIT_WORK(&qs->qs_work, o2quo_make_decision); 310 310 } 311 311 312 312 void o2quo_exit(void)
+44 -34
fs/ocfs2/cluster/tcp.c
··· 140 140 [O2NET_ERR_DIED] = -EHOSTDOWN,}; 141 141 142 142 /* can't quite avoid *all* internal declarations :/ */ 143 - static void o2net_sc_connect_completed(void *arg); 144 - static void o2net_rx_until_empty(void *arg); 145 - static void o2net_shutdown_sc(void *arg); 143 + static void o2net_sc_connect_completed(struct work_struct *work); 144 + static void o2net_rx_until_empty(struct work_struct *work); 145 + static void o2net_shutdown_sc(struct work_struct *work); 146 146 static void o2net_listen_data_ready(struct sock *sk, int bytes); 147 - static void o2net_sc_send_keep_req(void *arg); 147 + static void o2net_sc_send_keep_req(struct work_struct *work); 148 148 static void o2net_idle_timer(unsigned long data); 149 149 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); 150 150 ··· 308 308 o2nm_node_get(node); 309 309 sc->sc_node = node; 310 310 311 - INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc); 312 - INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc); 313 - INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc); 314 - INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc); 311 + INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); 312 + INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); 313 + INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); 314 + INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req); 315 315 316 316 init_timer(&sc->sc_idle_timeout); 317 317 sc->sc_idle_timeout.function = o2net_idle_timer; ··· 342 342 sc_put(sc); 343 343 } 344 344 static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, 345 - struct work_struct *work, 345 + struct delayed_work *work, 346 346 int delay) 347 347 { 348 348 sc_get(sc); ··· 350 350 sc_put(sc); 351 351 } 352 352 static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, 353 - struct work_struct *work) 353 + struct delayed_work *work) 354 354 { 355 355 if (cancel_delayed_work(work)) 356 356 sc_put(sc); ··· 564 564 * ourselves as state_change couldn't get the nn_lock and call set_nn_state 565 565 * itself. 566 566 */ 567 - static void o2net_shutdown_sc(void *arg) 567 + static void o2net_shutdown_sc(struct work_struct *work) 568 568 { 569 - struct o2net_sock_container *sc = arg; 569 + struct o2net_sock_container *sc = 570 + container_of(work, struct o2net_sock_container, 571 + sc_shutdown_work); 570 572 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); 571 573 572 574 sclog(sc, "shutting down\n"); ··· 1203 1201 /* this work func is triggerd by data ready. it reads until it can read no 1204 1202 * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing 1205 1203 * our work the work struct will be marked and we'll be called again. */ 1206 - static void o2net_rx_until_empty(void *arg) 1204 + static void o2net_rx_until_empty(struct work_struct *work) 1207 1205 { 1208 - struct o2net_sock_container *sc = arg; 1206 + struct o2net_sock_container *sc = 1207 + container_of(work, struct o2net_sock_container, sc_rx_work); 1209 1208 int ret; 1210 1209 1211 1210 do { ··· 1252 1249 1253 1250 /* called when a connect completes and after a sock is accepted. the 1254 1251 * rx path will see the response and mark the sc valid */ 1255 - static void o2net_sc_connect_completed(void *arg) 1252 + static void o2net_sc_connect_completed(struct work_struct *work) 1256 1253 { 1257 - struct o2net_sock_container *sc = arg; 1254 + struct o2net_sock_container *sc = 1255 + container_of(work, struct o2net_sock_container, 1256 + sc_connect_work); 1258 1257 1259 1258 mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", 1260 1259 (unsigned long long)O2NET_PROTOCOL_VERSION, ··· 1267 1262 } 1268 1263 1269 1264 /* this is called as a work_struct func. */ 1270 - static void o2net_sc_send_keep_req(void *arg) 1265 + static void o2net_sc_send_keep_req(struct work_struct *work) 1271 1266 { 1272 - struct o2net_sock_container *sc = arg; 1267 + struct o2net_sock_container *sc = 1268 + container_of(work, struct o2net_sock_container, 1269 + sc_keepalive_work.work); 1273 1270 1274 1271 o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); 1275 1272 sc_put(sc); ··· 1321 1314 * having a connect attempt fail, etc. This centralizes the logic which decides 1322 1315 * if a connect attempt should be made or if we should give up and all future 1323 1316 * transmit attempts should fail */ 1324 - static void o2net_start_connect(void *arg) 1317 + static void o2net_start_connect(struct work_struct *work) 1325 1318 { 1326 - struct o2net_node *nn = arg; 1319 + struct o2net_node *nn = 1320 + container_of(work, struct o2net_node, nn_connect_work.work); 1327 1321 struct o2net_sock_container *sc = NULL; 1328 1322 struct o2nm_node *node = NULL, *mynode = NULL; 1329 1323 struct socket *sock = NULL; 1330 1324 struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; 1331 - int ret = 0; 1325 + int ret = 0, stop; 1332 1326 1333 1327 /* if we're greater we initiate tx, otherwise we accept */ 1334 1328 if (o2nm_this_node() <= o2net_num_from_nn(nn)) ··· 1350 1342 1351 1343 spin_lock(&nn->nn_lock); 1352 1344 /* see if we already have one pending or have given up */ 1353 - if (nn->nn_sc || nn->nn_persistent_error) 1354 - arg = NULL; 1345 + stop = (nn->nn_sc || nn->nn_persistent_error); 1355 1346 spin_unlock(&nn->nn_lock); 1356 - if (arg == NULL) /* *shrug*, needed some indicator */ 1347 + if (stop) 1357 1348 goto out; 1358 1349 1359 1350 nn->nn_last_connect_attempt = jiffies; ··· 1428 1421 return; 1429 1422 } 1430 1423 1431 - static void o2net_connect_expired(void *arg) 1424 + static void o2net_connect_expired(struct work_struct *work) 1432 1425 { 1433 - struct o2net_node *nn = arg; 1426 + struct o2net_node *nn = 1427 + container_of(work, struct o2net_node, nn_connect_expired.work); 1434 1428 1435 1429 spin_lock(&nn->nn_lock); 1436 1430 if (!nn->nn_sc_valid) { ··· 1444 1436 spin_unlock(&nn->nn_lock); 1445 1437 } 1446 1438 1447 - static void o2net_still_up(void *arg) 1439 + static void o2net_still_up(struct work_struct *work) 1448 1440 { 1449 - struct o2net_node *nn = arg; 1441 + struct o2net_node *nn = 1442 + container_of(work, struct o2net_node, nn_still_up.work); 1450 1443 1451 1444 o2quo_hb_still_up(o2net_num_from_nn(nn)); 1452 1445 } ··· 1653 1644 return ret; 1654 1645 } 1655 1646 1656 - static void o2net_accept_many(void *arg) 1647 + static void o2net_accept_many(struct work_struct *work) 1657 1648 { 1658 - struct socket *sock = arg; 1649 + struct socket *sock = o2net_listen_sock; 1659 1650 while (o2net_accept_one(sock) == 0) 1660 1651 cond_resched(); 1661 1652 } ··· 1709 1700 write_unlock_bh(&sock->sk->sk_callback_lock); 1710 1701 1711 1702 o2net_listen_sock = sock; 1712 - INIT_WORK(&o2net_listen_work, o2net_accept_many, sock); 1703 + INIT_WORK(&o2net_listen_work, o2net_accept_many); 1713 1704 1714 1705 sock->sk->sk_reuse = 1; 1715 1706 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); ··· 1828 1819 struct o2net_node *nn = o2net_nn_from_num(i); 1829 1820 1830 1821 spin_lock_init(&nn->nn_lock); 1831 - INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn); 1832 - INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn); 1833 - INIT_WORK(&nn->nn_still_up, o2net_still_up, nn); 1822 + INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect); 1823 + INIT_DELAYED_WORK(&nn->nn_connect_expired, 1824 + o2net_connect_expired); 1825 + INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up); 1834 1826 /* until we see hb from a node we'll return einval */ 1835 1827 nn->nn_persistent_error = -ENOTCONN; 1836 1828 init_waitqueue_head(&nn->nn_sc_wq);
+4 -4
fs/ocfs2/cluster/tcp_internal.h
··· 86 86 * connect attempt fails and so can be self-arming. shutdown is 87 87 * careful to first mark the nn such that no connects will be attempted 88 88 * before canceling delayed connect work and flushing the queue. */ 89 - struct work_struct nn_connect_work; 89 + struct delayed_work nn_connect_work; 90 90 unsigned long nn_last_connect_attempt; 91 91 92 92 /* this is queued as nodes come up and is canceled when a connection is 93 93 * established. this expiring gives up on the node and errors out 94 94 * transmits */ 95 - struct work_struct nn_connect_expired; 95 + struct delayed_work nn_connect_expired; 96 96 97 97 /* after we give up on a socket we wait a while before deciding 98 98 * that it is still heartbeating and that we should do some 99 99 * quorum work */ 100 - struct work_struct nn_still_up; 100 + struct delayed_work nn_still_up; 101 101 }; 102 102 103 103 struct o2net_sock_container { ··· 129 129 struct work_struct sc_shutdown_work; 130 130 131 131 struct timer_list sc_idle_timeout; 132 - struct work_struct sc_keepalive_work; 132 + struct delayed_work sc_keepalive_work; 133 133 134 134 unsigned sc_handshake_ok:1; 135 135
+1 -1
fs/ocfs2/dlm/dlmcommon.h
··· 153 153 * called functions that cannot be directly called from the 154 154 * net message handlers for some reason, usually because 155 155 * they need to send net messages of their own. */ 156 - void dlm_dispatch_work(void *data); 156 + void dlm_dispatch_work(struct work_struct *work); 157 157 158 158 struct dlm_lock_resource; 159 159 struct dlm_work_item;
+1 -1
fs/ocfs2/dlm/dlmdomain.c
··· 1296 1296 1297 1297 spin_lock_init(&dlm->work_lock); 1298 1298 INIT_LIST_HEAD(&dlm->work_list); 1299 - INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm); 1299 + INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work); 1300 1300 1301 1301 kref_init(&dlm->dlm_refs); 1302 1302 dlm->dlm_state = DLM_CTXT_NEW;
+3 -2
fs/ocfs2/dlm/dlmrecovery.c
··· 153 153 } 154 154 155 155 /* Worker function used during recovery. */ 156 - void dlm_dispatch_work(void *data) 156 + void dlm_dispatch_work(struct work_struct *work) 157 157 { 158 - struct dlm_ctxt *dlm = (struct dlm_ctxt *)data; 158 + struct dlm_ctxt *dlm = 159 + container_of(work, struct dlm_ctxt, dispatched_work); 159 160 LIST_HEAD(tmp_list); 160 161 struct list_head *iter, *iter2; 161 162 struct dlm_work_item *item;
+5 -5
fs/ocfs2/dlm/userdlm.c
··· 171 171 BUG(); 172 172 } 173 173 174 - static void user_dlm_unblock_lock(void *opaque); 174 + static void user_dlm_unblock_lock(struct work_struct *work); 175 175 176 176 static void __user_dlm_queue_lockres(struct user_lock_res *lockres) 177 177 { 178 178 if (!(lockres->l_flags & USER_LOCK_QUEUED)) { 179 179 user_dlm_grab_inode_ref(lockres); 180 180 181 - INIT_WORK(&lockres->l_work, user_dlm_unblock_lock, 182 - lockres); 181 + INIT_WORK(&lockres->l_work, user_dlm_unblock_lock); 183 182 184 183 queue_work(user_dlm_worker, &lockres->l_work); 185 184 lockres->l_flags |= USER_LOCK_QUEUED; ··· 278 279 iput(inode); 279 280 } 280 281 281 - static void user_dlm_unblock_lock(void *opaque) 282 + static void user_dlm_unblock_lock(struct work_struct *work) 282 283 { 283 284 int new_level, status; 284 - struct user_lock_res *lockres = (struct user_lock_res *) opaque; 285 + struct user_lock_res *lockres = 286 + container_of(work, struct user_lock_res, l_work); 285 287 struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); 286 288 287 289 mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
+4 -3
fs/ocfs2/journal.c
··· 911 911 * NOTE: This function can and will sleep on recovery of other nodes 912 912 * during cluster locking, just like any other ocfs2 process. 913 913 */ 914 - void ocfs2_complete_recovery(void *data) 914 + void ocfs2_complete_recovery(struct work_struct *work) 915 915 { 916 916 int ret; 917 - struct ocfs2_super *osb = data; 918 - struct ocfs2_journal *journal = osb->journal; 917 + struct ocfs2_journal *journal = 918 + container_of(work, struct ocfs2_journal, j_recovery_work); 919 + struct ocfs2_super *osb = journal->j_osb; 919 920 struct ocfs2_dinode *la_dinode, *tl_dinode; 920 921 struct ocfs2_la_recovery_item *item; 921 922 struct list_head *p, *n;
+1 -1
fs/ocfs2/journal.h
··· 172 172 } 173 173 174 174 /* Exported only for the journal struct init code in super.c. Do not call. */ 175 - void ocfs2_complete_recovery(void *data); 175 + void ocfs2_complete_recovery(struct work_struct *work); 176 176 177 177 /* 178 178 * Journal Control:
+1 -1
fs/ocfs2/ocfs2.h
··· 283 283 /* Truncate log info */ 284 284 struct inode *osb_tl_inode; 285 285 struct buffer_head *osb_tl_bh; 286 - struct work_struct osb_truncate_log_wq; 286 + struct delayed_work osb_truncate_log_wq; 287 287 288 288 struct ocfs2_node_map osb_recovering_orphan_dirs; 289 289 unsigned int *osb_orphan_wipes;
+1 -1
fs/ocfs2/super.c
··· 1365 1365 spin_lock_init(&journal->j_lock); 1366 1366 journal->j_trans_id = (unsigned long) 1; 1367 1367 INIT_LIST_HEAD(&journal->j_la_cleanups); 1368 - INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb); 1368 + INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery); 1369 1369 journal->j_state = OCFS2_JOURNAL_FREE; 1370 1370 1371 1371 /* get some pseudo constants for clustersize bits */
+7 -5
fs/reiserfs/journal.c
··· 104 104 struct reiserfs_journal *journal); 105 105 static int dirty_one_transaction(struct super_block *s, 106 106 struct reiserfs_journal_list *jl); 107 - static void flush_async_commits(void *p); 107 + static void flush_async_commits(struct work_struct *work); 108 108 static void queue_log_writer(struct super_block *s); 109 109 110 110 /* values for join in do_journal_begin_r */ ··· 2836 2836 if (reiserfs_mounted_fs_count <= 1) 2837 2837 commit_wq = create_workqueue("reiserfs"); 2838 2838 2839 - INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb); 2839 + INIT_DELAYED_WORK(&journal->j_work, flush_async_commits); 2840 + journal->j_work_sb = p_s_sb; 2840 2841 return 0; 2841 2842 free_and_return: 2842 2843 free_journal_ram(p_s_sb); ··· 3448 3447 /* 3449 3448 ** writeback the pending async commits to disk 3450 3449 */ 3451 - static void flush_async_commits(void *p) 3450 + static void flush_async_commits(struct work_struct *work) 3452 3451 { 3453 - struct super_block *p_s_sb = p; 3454 - struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); 3452 + struct reiserfs_journal *journal = 3453 + container_of(work, struct reiserfs_journal, j_work.work); 3454 + struct super_block *p_s_sb = journal->j_work_sb; 3455 3455 struct reiserfs_journal_list *jl; 3456 3456 struct list_head *entry; 3457 3457
+12 -9
fs/xfs/linux-2.6/xfs_aops.c
··· 149 149 */ 150 150 STATIC void 151 151 xfs_end_bio_delalloc( 152 - void *data) 152 + struct work_struct *work) 153 153 { 154 - xfs_ioend_t *ioend = data; 154 + xfs_ioend_t *ioend = 155 + container_of(work, xfs_ioend_t, io_work); 155 156 156 157 xfs_destroy_ioend(ioend); 157 158 } ··· 162 161 */ 163 162 STATIC void 164 163 xfs_end_bio_written( 165 - void *data) 164 + struct work_struct *work) 166 165 { 167 - xfs_ioend_t *ioend = data; 166 + xfs_ioend_t *ioend = 167 + container_of(work, xfs_ioend_t, io_work); 168 168 169 169 xfs_destroy_ioend(ioend); 170 170 } ··· 178 176 */ 179 177 STATIC void 180 178 xfs_end_bio_unwritten( 181 - void *data) 179 + struct work_struct *work) 182 180 { 183 - xfs_ioend_t *ioend = data; 181 + xfs_ioend_t *ioend = 182 + container_of(work, xfs_ioend_t, io_work); 184 183 bhv_vnode_t *vp = ioend->io_vnode; 185 184 xfs_off_t offset = ioend->io_offset; 186 185 size_t size = ioend->io_size; ··· 223 220 ioend->io_size = 0; 224 221 225 222 if (type == IOMAP_UNWRITTEN) 226 - INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); 223 + INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten); 227 224 else if (type == IOMAP_DELAY) 228 - INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend); 225 + INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc); 229 226 else 230 - INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend); 227 + INIT_WORK(&ioend->io_work, xfs_end_bio_written); 231 228 232 229 return ioend; 233 230 }
+5 -4
fs/xfs/linux-2.6/xfs_buf.c
··· 994 994 995 995 STATIC void 996 996 xfs_buf_iodone_work( 997 - void *v) 997 + struct work_struct *work) 998 998 { 999 - xfs_buf_t *bp = (xfs_buf_t *)v; 999 + xfs_buf_t *bp = 1000 + container_of(work, xfs_buf_t, b_iodone_work); 1000 1001 1001 1002 if (bp->b_iodone) 1002 1003 (*(bp->b_iodone))(bp); ··· 1018 1017 1019 1018 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { 1020 1019 if (schedule) { 1021 - INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp); 1020 + INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); 1022 1021 queue_work(xfslogd_workqueue, &bp->b_iodone_work); 1023 1022 } else { 1024 - xfs_buf_iodone_work(bp); 1023 + xfs_buf_iodone_work(&bp->b_iodone_work); 1025 1024 } 1026 1025 } else { 1027 1026 up(&bp->b_iodonesema);
+2 -2
include/linux/connector.h
··· 133 133 struct cn_callback_entry { 134 134 struct list_head callback_entry; 135 135 struct cn_callback *cb; 136 - struct work_struct work; 136 + struct delayed_work work; 137 137 struct cn_queue_dev *pdev; 138 138 139 139 struct cn_callback_id id; ··· 170 170 171 171 int cn_cb_equal(struct cb_id *, struct cb_id *); 172 172 173 - void cn_queue_wrapper(void *data); 173 + void cn_queue_wrapper(struct work_struct *work); 174 174 175 175 extern int cn_already_initialized; 176 176
+1 -1
include/linux/i2o.h
··· 461 461 int (*reply) (struct i2o_controller *, u32, struct i2o_message *); 462 462 463 463 /* Event handler */ 464 - void (*event) (struct i2o_event *); 464 + work_func_t event; 465 465 466 466 struct workqueue_struct *event_queue; /* Event queue */ 467 467
+1 -1
include/linux/mmc/host.h
··· 110 110 struct mmc_card *card_busy; /* the MMC card claiming host */ 111 111 struct mmc_card *card_selected; /* the selected MMC card */ 112 112 113 - struct work_struct detect; 113 + struct delayed_work detect; 114 114 115 115 unsigned long private[0] ____cacheline_aligned; 116 116 };
+4 -4
include/linux/ncp_fs_sb.h
··· 127 127 } unexpected_packet; 128 128 }; 129 129 130 - extern void ncp_tcp_rcv_proc(void *server); 131 - extern void ncp_tcp_tx_proc(void *server); 132 - extern void ncpdgram_rcv_proc(void *server); 133 - extern void ncpdgram_timeout_proc(void *server); 130 + extern void ncp_tcp_rcv_proc(struct work_struct *work); 131 + extern void ncp_tcp_tx_proc(struct work_struct *work); 132 + extern void ncpdgram_rcv_proc(struct work_struct *work); 133 + extern void ncpdgram_timeout_proc(struct work_struct *work); 134 134 extern void ncpdgram_timeout_call(unsigned long server); 135 135 extern void ncp_tcp_data_ready(struct sock* sk, int len); 136 136 extern void ncp_tcp_write_space(struct sock* sk);
+2 -1
include/linux/reiserfs_fs_sb.h
··· 249 249 int j_errno; 250 250 251 251 /* when flushing ordered buffers, throttle new ordered writers */ 252 - struct work_struct j_work; 252 + struct delayed_work j_work; 253 + struct super_block *j_work_sb; 253 254 atomic_t j_async_throttle; 254 255 }; 255 256
+1 -1
include/linux/relay.h
··· 38 38 size_t subbufs_consumed; /* count of sub-buffers consumed */ 39 39 struct rchan *chan; /* associated channel */ 40 40 wait_queue_head_t read_wait; /* reader wait queue */ 41 - struct work_struct wake_readers; /* reader wake-up work struct */ 41 + struct delayed_work wake_readers; /* reader wake-up work struct */ 42 42 struct dentry *dentry; /* channel file dentry */ 43 43 struct kref kref; /* channel buffer refcount */ 44 44 struct page **page_array; /* array of current buffer pages */
+1 -1
include/linux/usb.h
··· 382 382 383 383 int pm_usage_cnt; /* usage counter for autosuspend */ 384 384 #ifdef CONFIG_PM 385 - struct work_struct autosuspend; /* for delayed autosuspends */ 385 + struct delayed_work autosuspend; /* for delayed autosuspends */ 386 386 struct mutex pm_mutex; /* protects PM operations */ 387 387 388 388 unsigned auto_pm:1; /* autosuspend/resume in progress */
+2 -2
include/net/ieee80211softmac.h
··· 108 108 /* Scan retries remaining */ 109 109 int scan_retry; 110 110 111 - struct work_struct work; 112 - struct work_struct timeout; 111 + struct delayed_work work; 112 + struct delayed_work timeout; 113 113 }; 114 114 115 115 struct ieee80211softmac_bss_info {
+1 -1
include/net/sctp/structs.h
··· 1030 1030 void sctp_inq_free(struct sctp_inq *); 1031 1031 void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet); 1032 1032 struct sctp_chunk *sctp_inq_pop(struct sctp_inq *); 1033 - void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *); 1033 + void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t); 1034 1034 1035 1035 /* This is the structure we use to hold outbound chunks. You push 1036 1036 * chunks in and they automatically pop out the other end as bundled
+19 -4
include/scsi/libsas.h
··· 200 200 void *lldd_dev; 201 201 }; 202 202 203 + struct sas_discovery_event { 204 + struct work_struct work; 205 + struct asd_sas_port *port; 206 + }; 207 + 203 208 struct sas_discovery { 204 209 spinlock_t disc_event_lock; 205 - struct work_struct disc_work[DISC_NUM_EVENTS]; 210 + struct sas_discovery_event disc_work[DISC_NUM_EVENTS]; 206 211 unsigned long pending; 207 212 u8 fanout_sas_addr[8]; 208 213 u8 eeds_a[8]; ··· 253 248 void *lldd_port; /* not touched by the sas class code */ 254 249 }; 255 250 251 + struct asd_sas_event { 252 + struct work_struct work; 253 + struct asd_sas_phy *phy; 254 + }; 255 + 256 256 /* The phy pretty much is controlled by the LLDD. 257 257 * The class only reads those fields. 258 258 */ 259 259 struct asd_sas_phy { 260 260 /* private: */ 261 261 /* protected by ha->event_lock */ 262 - struct work_struct port_events[PORT_NUM_EVENTS]; 263 - struct work_struct phy_events[PHY_NUM_EVENTS]; 262 + struct asd_sas_event port_events[PORT_NUM_EVENTS]; 263 + struct asd_sas_event phy_events[PHY_NUM_EVENTS]; 264 264 265 265 unsigned long port_events_pending; 266 266 unsigned long phy_events_pending; ··· 317 307 int queue_thread_kill; 318 308 }; 319 309 310 + struct sas_ha_event { 311 + struct work_struct work; 312 + struct sas_ha_struct *ha; 313 + }; 314 + 320 315 struct sas_ha_struct { 321 316 /* private: */ 322 317 spinlock_t event_lock; 323 - struct work_struct ha_events[HA_NUM_EVENTS]; 318 + struct sas_ha_event ha_events[HA_NUM_EVENTS]; 324 319 unsigned long pending; 325 320 326 321 struct scsi_core core;
+2 -2
include/scsi/scsi_transport_fc.h
··· 206 206 u8 flags; 207 207 struct list_head peers; 208 208 struct device dev; 209 - struct work_struct dev_loss_work; 209 + struct delayed_work dev_loss_work; 210 210 struct work_struct scan_work; 211 - struct work_struct fail_io_work; 211 + struct delayed_work fail_io_work; 212 212 struct work_struct stgt_delete_work; 213 213 struct work_struct rport_delete_work; 214 214 } __attribute__((aligned(sizeof(unsigned long))));
+1 -1
include/scsi/scsi_transport_iscsi.h
··· 176 176 177 177 /* recovery fields */ 178 178 int recovery_tmo; 179 - struct work_struct recovery_work; 179 + struct delayed_work recovery_work; 180 180 181 181 int target_id; 182 182
+1 -1
include/sound/ac97_codec.h
··· 511 511 #ifdef CONFIG_SND_AC97_POWER_SAVE 512 512 unsigned int power_up; /* power states */ 513 513 struct workqueue_struct *power_workq; 514 - struct work_struct power_work; 514 + struct delayed_work power_work; 515 515 #endif 516 516 struct device dev; 517 517 };
+1 -1
include/sound/ak4114.h
··· 182 182 unsigned char rcs0; 183 183 unsigned char rcs1; 184 184 struct workqueue_struct *workqueue; 185 - struct work_struct work; 185 + struct delayed_work work; 186 186 void *change_callback_private; 187 187 void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1); 188 188 };
+6 -4
kernel/relay.c
··· 308 308 * reason waking is deferred is that calling directly from write 309 309 * causes problems if you're writing from say the scheduler. 310 310 */ 311 - static void wakeup_readers(void *private) 311 + static void wakeup_readers(struct work_struct *work) 312 312 { 313 - struct rchan_buf *buf = private; 313 + struct rchan_buf *buf = 314 + container_of(work, struct rchan_buf, wake_readers.work); 314 315 wake_up_interruptible(&buf->read_wait); 315 316 } 316 317 ··· 329 328 if (init) { 330 329 init_waitqueue_head(&buf->read_wait); 331 330 kref_init(&buf->kref); 332 - INIT_WORK(&buf->wake_readers, NULL, NULL); 331 + INIT_DELAYED_WORK(&buf->wake_readers, NULL); 333 332 } else { 334 333 cancel_delayed_work(&buf->wake_readers); 335 334 flush_scheduled_work(); ··· 550 549 buf->padding[old_subbuf]; 551 550 smp_mb(); 552 551 if (waitqueue_active(&buf->read_wait)) { 553 - PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf); 552 + PREPARE_DELAYED_WORK(&buf->wake_readers, 553 + wakeup_readers); 554 554 schedule_delayed_work(&buf->wake_readers, 1); 555 555 } 556 556 }
+2 -2
mm/swap.c
··· 216 216 } 217 217 218 218 #ifdef CONFIG_NUMA 219 - static void lru_add_drain_per_cpu(void *dummy) 219 + static void lru_add_drain_per_cpu(struct work_struct *dummy) 220 220 { 221 221 lru_add_drain(); 222 222 } ··· 226 226 */ 227 227 int lru_add_drain_all(void) 228 228 { 229 - return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); 229 + return schedule_on_each_cpu(lru_add_drain_per_cpu); 230 230 } 231 231 232 232 #else
+5 -4
net/atm/lec.c
··· 1458 1458 1459 1459 #define LEC_ARP_REFRESH_INTERVAL (3*HZ) 1460 1460 1461 - static void lec_arp_check_expire(void *data); 1461 + static void lec_arp_check_expire(struct work_struct *work); 1462 1462 static void lec_arp_expire_arp(unsigned long data); 1463 1463 1464 1464 /* ··· 1481 1481 INIT_HLIST_HEAD(&priv->lec_no_forward); 1482 1482 INIT_HLIST_HEAD(&priv->mcast_fwds); 1483 1483 spin_lock_init(&priv->lec_arp_lock); 1484 - INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv); 1484 + INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire); 1485 1485 schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); 1486 1486 } 1487 1487 ··· 1879 1879 * to ESI_FORWARD_DIRECT. This causes the flush period to end 1880 1880 * regardless of the progress of the flush protocol. 1881 1881 */ 1882 - static void lec_arp_check_expire(void *data) 1882 + static void lec_arp_check_expire(struct work_struct *work) 1883 1883 { 1884 1884 unsigned long flags; 1885 - struct lec_priv *priv = data; 1885 + struct lec_priv *priv = 1886 + container_of(work, struct lec_priv, lec_arp_work.work); 1886 1887 struct hlist_node *node, *next; 1887 1888 struct lec_arp_table *entry; 1888 1889 unsigned long now;
+1 -1
net/atm/lec.h
··· 92 92 spinlock_t lec_arp_lock; 93 93 struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ 94 94 struct atm_vcc *lecd; 95 - struct work_struct lec_arp_work; /* C10 */ 95 + struct delayed_work lec_arp_work; /* C10 */ 96 96 unsigned int maximum_unknown_frame_count; 97 97 /* 98 98 * Within the period of time defined by this variable, the client will send
+6 -6
net/bluetooth/hci_sysfs.c
··· 237 237 kfree(data); 238 238 } 239 239 240 - static void add_conn(void *data) 240 + static void add_conn(struct work_struct *work) 241 241 { 242 - struct hci_conn *conn = data; 242 + struct hci_conn *conn = container_of(work, struct hci_conn, work); 243 243 int i; 244 244 245 245 if (device_register(&conn->dev) < 0) { ··· 272 272 273 273 dev_set_drvdata(&conn->dev, conn); 274 274 275 - INIT_WORK(&conn->work, add_conn, (void *) conn); 275 + INIT_WORK(&conn->work, add_conn); 276 276 277 277 schedule_work(&conn->work); 278 278 } 279 279 280 - static void del_conn(void *data) 280 + static void del_conn(struct work_struct *work) 281 281 { 282 - struct hci_conn *conn = data; 282 + struct hci_conn *conn = container_of(work, struct hci_conn, work); 283 283 device_del(&conn->dev); 284 284 } 285 285 ··· 287 287 { 288 288 BT_DBG("conn %p", conn); 289 289 290 - INIT_WORK(&conn->work, del_conn, (void *) conn); 290 + INIT_WORK(&conn->work, del_conn); 291 291 292 292 schedule_work(&conn->work); 293 293 }
+7 -3
net/bridge/br_if.c
··· 77 77 * Called from work queue to allow for calling functions that 78 78 * might sleep (such as speed check), and to debounce. 79 79 */ 80 - static void port_carrier_check(void *arg) 80 + static void port_carrier_check(struct work_struct *work) 81 81 { 82 - struct net_device *dev = arg; 83 82 struct net_bridge_port *p; 83 + struct net_device *dev; 84 84 struct net_bridge *br; 85 + 86 + dev = container_of(work, struct net_bridge_port, 87 + carrier_check.work)->dev; 88 + work_release(work); 85 89 86 90 rtnl_lock(); 87 91 p = dev->br_port; ··· 280 276 p->port_no = index; 281 277 br_init_port(p); 282 278 p->state = BR_STATE_DISABLED; 283 - INIT_WORK(&p->carrier_check, port_carrier_check, dev); 279 + INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check); 284 280 br_stp_port_timer_init(p); 285 281 286 282 kobject_init(&p->kobj);
+1 -1
net/bridge/br_private.h
··· 82 82 struct timer_list hold_timer; 83 83 struct timer_list message_age_timer; 84 84 struct kobject kobj; 85 - struct work_struct carrier_check; 85 + struct delayed_work carrier_check; 86 86 struct rcu_head rcu; 87 87 }; 88 88
+2 -2
net/core/netpoll.c
··· 56 56 static void zap_completion_queue(void); 57 57 static void arp_reply(struct sk_buff *skb); 58 58 59 - static void queue_process(void *p) 59 + static void queue_process(struct work_struct *work) 60 60 { 61 61 unsigned long flags; 62 62 struct sk_buff *skb; ··· 77 77 } 78 78 } 79 79 80 - static DECLARE_WORK(send_queue, queue_process, NULL); 80 + static DECLARE_WORK(send_queue, queue_process); 81 81 82 82 void netpoll_queue(struct sk_buff *skb) 83 83 {
+1 -2
net/dccp/minisocks.c
··· 31 31 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, 32 32 (unsigned long)&dccp_death_row), 33 33 .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, 34 - inet_twdr_twkill_work, 35 - &dccp_death_row), 34 + inet_twdr_twkill_work), 36 35 /* Short-time timewait calendar */ 37 36 38 37 .twcal_hand = -1,
+11 -7
net/ieee80211/softmac/ieee80211softmac_assoc.c
··· 58 58 } 59 59 60 60 void 61 - ieee80211softmac_assoc_timeout(void *d) 61 + ieee80211softmac_assoc_timeout(struct work_struct *work) 62 62 { 63 - struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; 63 + struct ieee80211softmac_device *mac = 64 + container_of(work, struct ieee80211softmac_device, 65 + associnfo.timeout.work); 64 66 struct ieee80211softmac_network *n; 65 67 66 68 mutex_lock(&mac->associnfo.mutex); ··· 188 186 189 187 /* This function is called to handle userspace requests (asynchronously) */ 190 188 void 191 - ieee80211softmac_assoc_work(void *d) 189 + ieee80211softmac_assoc_work(struct work_struct *work) 192 190 { 193 - struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; 191 + struct ieee80211softmac_device *mac = 192 + container_of(work, struct ieee80211softmac_device, 193 + associnfo.work.work); 194 194 struct ieee80211softmac_network *found = NULL; 195 195 struct ieee80211_network *net = NULL, *best = NULL; 196 196 int bssvalid; ··· 416 412 network->authenticated = 0; 417 413 /* we don't want to do this more than once ... */ 418 414 network->auth_desynced_once = 1; 419 - schedule_work(&mac->associnfo.work); 415 + schedule_delayed_work(&mac->associnfo.work, 0); 420 416 break; 421 417 } 422 418 default: ··· 450 446 ieee80211softmac_disassoc(mac); 451 447 452 448 /* try to reassociate */ 453 - schedule_work(&mac->associnfo.work); 449 + schedule_delayed_work(&mac->associnfo.work, 0); 454 450 455 451 return 0; 456 452 } ··· 470 466 dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); 471 467 return 0; 472 468 } 473 - schedule_work(&mac->associnfo.work); 469 + schedule_delayed_work(&mac->associnfo.work, 0); 474 470 475 471 return 0; 476 472 }
+13 -10
net/ieee80211/softmac/ieee80211softmac_auth.c
··· 26 26 27 27 #include "ieee80211softmac_priv.h" 28 28 29 - static void ieee80211softmac_auth_queue(void *data); 29 + static void ieee80211softmac_auth_queue(struct work_struct *work); 30 30 31 31 /* Queues an auth request to the desired AP */ 32 32 int ··· 54 54 auth->mac = mac; 55 55 auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; 56 56 auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; 57 - INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth); 57 + INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue); 58 58 59 59 /* Lock (for list) */ 60 60 spin_lock_irqsave(&mac->lock, flags); 61 61 62 62 /* add to list */ 63 63 list_add_tail(&auth->list, &mac->auth_queue); 64 - schedule_work(&auth->work); 64 + schedule_delayed_work(&auth->work, 0); 65 65 spin_unlock_irqrestore(&mac->lock, flags); 66 66 67 67 return 0; ··· 70 70 71 71 /* Sends an auth request to the desired AP and handles timeouts */ 72 72 static void 73 - ieee80211softmac_auth_queue(void *data) 73 + ieee80211softmac_auth_queue(struct work_struct *work) 74 74 { 75 75 struct ieee80211softmac_device *mac; 76 76 struct ieee80211softmac_auth_queue_item *auth; 77 77 struct ieee80211softmac_network *net; 78 78 unsigned long flags; 79 79 80 - auth = (struct ieee80211softmac_auth_queue_item *)data; 80 + auth = container_of(work, struct ieee80211softmac_auth_queue_item, 81 + work.work); 81 82 net = auth->net; 82 83 mac = auth->mac; 83 84 ··· 119 118 120 119 /* Sends a response to an auth challenge (for shared key auth). */ 121 120 static void 122 - ieee80211softmac_auth_challenge_response(void *_aq) 121 + ieee80211softmac_auth_challenge_response(struct work_struct *work) 123 122 { 124 - struct ieee80211softmac_auth_queue_item *aq = _aq; 123 + struct ieee80211softmac_auth_queue_item *aq = 124 + container_of(work, struct ieee80211softmac_auth_queue_item, 125 + work.work); 125 126 126 127 /* Send our response */ 127 128 ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); ··· 231 228 * we have obviously already sent the initial auth 232 229 * request. */ 233 230 cancel_delayed_work(&aq->work); 234 - INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq); 235 - schedule_work(&aq->work); 231 + INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response); 232 + schedule_delayed_work(&aq->work, 0); 236 233 spin_unlock_irqrestore(&mac->lock, flags); 237 234 return 0; 238 235 case IEEE80211SOFTMAC_AUTH_SHARED_PASS: ··· 395 392 ieee80211softmac_deauth_from_net(mac, net); 396 393 397 394 /* let's try to re-associate */ 398 - schedule_work(&mac->associnfo.work); 395 + schedule_delayed_work(&mac->associnfo.work, 0); 399 396 return 0; 400 397 }
+7 -5
net/ieee80211/softmac/ieee80211softmac_event.c
··· 73 73 74 74 75 75 static void 76 - ieee80211softmac_notify_callback(void *d) 76 + ieee80211softmac_notify_callback(struct work_struct *work) 77 77 { 78 - struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d; 79 - kfree(d); 78 + struct ieee80211softmac_event *pevent = 79 + container_of(work, struct ieee80211softmac_event, work.work); 80 + struct ieee80211softmac_event event = *pevent; 81 + kfree(pevent); 80 82 81 83 event.fun(event.mac->dev, event.event_type, event.context); 82 84 } ··· 101 99 return -ENOMEM; 102 100 103 101 eventptr->event_type = event; 104 - INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr); 102 + INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback); 105 103 eventptr->fun = fun; 106 104 eventptr->context = context; 107 105 eventptr->mac = mac; ··· 172 170 /* User may have subscribed to ANY event, so 173 171 * we tell them which event triggered it. */ 174 172 eventptr->event_type = event; 175 - schedule_work(&eventptr->work); 173 + schedule_delayed_work(&eventptr->work, 0); 176 174 } 177 175 } 178 176 }
+2 -2
net/ieee80211/softmac/ieee80211softmac_module.c
··· 58 58 INIT_LIST_HEAD(&softmac->events); 59 59 60 60 mutex_init(&softmac->associnfo.mutex); 61 - INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac); 62 - INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac); 61 + INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work); 62 + INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout); 63 63 softmac->start_scan = ieee80211softmac_start_scan_implementation; 64 64 softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; 65 65 softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
+7 -6
net/ieee80211/softmac/ieee80211softmac_priv.h
··· 78 78 /* private definitions and prototypes */ 79 79 80 80 /*** prototypes from _scan.c */ 81 - void ieee80211softmac_scan(void *sm); 81 + void ieee80211softmac_scan(struct work_struct *work); 82 82 /* for internal use if scanning is needed */ 83 83 int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); 84 84 void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); ··· 149 149 int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); 150 150 151 151 /*** prototypes from _assoc.c */ 152 - void ieee80211softmac_assoc_work(void *d); 152 + void ieee80211softmac_assoc_work(struct work_struct *work); 153 153 int ieee80211softmac_handle_assoc_response(struct net_device * dev, 154 154 struct ieee80211_assoc_response * resp, 155 155 struct ieee80211_network * network); ··· 157 157 struct ieee80211_disassoc * disassoc); 158 158 int ieee80211softmac_handle_reassoc_req(struct net_device * dev, 159 159 struct ieee80211_reassoc_request * reassoc); 160 - void ieee80211softmac_assoc_timeout(void *d); 160 + void ieee80211softmac_assoc_timeout(struct work_struct *work); 161 161 void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); 162 162 void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); 163 163 ··· 207 207 struct ieee80211softmac_device *mac; /* SoftMAC device */ 208 208 u8 retry; /* Retry limit */ 209 209 u8 state; /* Auth State */ 210 - struct work_struct work; /* Work queue */ 210 + struct delayed_work work; /* Work queue */ 211 211 }; 212 212 213 213 /* scanning information */ ··· 219 219 stop:1; 220 220 u8 skip_flags; 221 221 struct completion finished; 222 - struct work_struct softmac_scan; 222 + struct delayed_work softmac_scan; 223 + struct ieee80211softmac_device *mac; 223 224 }; 224 225 225 226 /* private event struct */ ··· 228 227 struct list_head list; 229 228 int event_type; 230 229 void *event_context; 231 - struct work_struct work; 230 + struct delayed_work work; 232 231 notify_function_ptr fun; 233 232 void *context; 234 233 struct ieee80211softmac_device *mac;
+8 -5
net/ieee80211/softmac/ieee80211softmac_scan.c
··· 91 91 92 92 93 93 /* internal scanning implementation follows */ 94 - void ieee80211softmac_scan(void *d) 94 + void ieee80211softmac_scan(struct work_struct *work) 95 95 { 96 96 int invalid_channel; 97 97 u8 current_channel_idx; 98 - struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d; 99 - struct ieee80211softmac_scaninfo *si = sm->scaninfo; 98 + struct ieee80211softmac_scaninfo *si = 99 + container_of(work, struct ieee80211softmac_scaninfo, 100 + softmac_scan.work); 101 + struct ieee80211softmac_device *sm = si->mac; 100 102 unsigned long flags; 101 103 102 104 while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { ··· 148 146 struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); 149 147 if (unlikely(!info)) 150 148 return NULL; 151 - INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac); 149 + INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan); 150 + info->mac = mac; 152 151 init_completion(&info->finished); 153 152 return info; 154 153 } ··· 192 189 sm->scaninfo->started = 1; 193 190 sm->scaninfo->stop = 0; 194 191 INIT_COMPLETION(sm->scaninfo->finished); 195 - schedule_work(&sm->scaninfo->softmac_scan); 192 + schedule_delayed_work(&sm->scaninfo->softmac_scan, 0); 196 193 spin_unlock_irqrestore(&sm->lock, flags); 197 194 return 0; 198 195 }
+3 -3
net/ieee80211/softmac/ieee80211softmac_wx.c
··· 122 122 123 123 sm->associnfo.associating = 1; 124 124 /* queue lower level code to do work (if necessary) */ 125 - schedule_work(&sm->associnfo.work); 125 + schedule_delayed_work(&sm->associnfo.work, 0); 126 126 out: 127 127 mutex_unlock(&sm->associnfo.mutex); 128 128 ··· 356 356 /* force reassociation */ 357 357 mac->associnfo.bssvalid = 0; 358 358 if (mac->associnfo.associated) 359 - schedule_work(&mac->associnfo.work); 359 + schedule_delayed_work(&mac->associnfo.work, 0); 360 360 } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { 361 361 /* the bssid we have is no longer fixed */ 362 362 mac->associnfo.bssfixed = 0; ··· 373 373 /* tell the other code that this bssid should be used no matter what */ 374 374 mac->associnfo.bssfixed = 1; 375 375 /* queue associate if new bssid or (old one again and not associated) */ 376 - schedule_work(&mac->associnfo.work); 376 + schedule_delayed_work(&mac->associnfo.work, 0); 377 377 } 378 378 379 379 out:
+3 -3
net/ipv4/ipvs/ip_vs_ctl.c
··· 221 221 * Timer for checking the defense 222 222 */ 223 223 #define DEFENSE_TIMER_PERIOD 1*HZ 224 - static void defense_work_handler(void *data); 225 - static DECLARE_WORK(defense_work, defense_work_handler, NULL); 224 + static void defense_work_handler(struct work_struct *work); 225 + static DECLARE_DELAYED_WORK(defense_work, defense_work_handler); 226 226 227 - static void defense_work_handler(void *data) 227 + static void defense_work_handler(struct work_struct *work) 228 228 { 229 229 update_defense_level(); 230 230 if (atomic_read(&ip_vs_dropentry))
+6 -5
net/irda/ircomm/ircomm_tty.c
··· 61 61 static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); 62 62 static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); 63 63 static void ircomm_tty_hangup(struct tty_struct *tty); 64 - static void ircomm_tty_do_softint(void *private_); 64 + static void ircomm_tty_do_softint(struct work_struct *work); 65 65 static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); 66 66 static void ircomm_tty_stop(struct tty_struct *tty); 67 67 ··· 389 389 self->flow = FLOW_STOP; 390 390 391 391 self->line = line; 392 - INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self); 392 + INIT_WORK(&self->tqueue, ircomm_tty_do_softint); 393 393 self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; 394 394 self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; 395 395 self->close_delay = 5*HZ/10; ··· 594 594 } 595 595 596 596 /* 597 - * Function ircomm_tty_do_softint (private_) 597 + * Function ircomm_tty_do_softint (work) 598 598 * 599 599 * We use this routine to give the write wakeup to the user at at a 600 600 * safe time (as fast as possible after write have completed). This 601 601 * can be compared to the Tx interrupt. 602 602 */ 603 - static void ircomm_tty_do_softint(void *private_) 603 + static void ircomm_tty_do_softint(struct work_struct *work) 604 604 { 605 - struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_; 605 + struct ircomm_tty_cb *self = 606 + container_of(work, struct ircomm_tty_cb, tqueue); 606 607 struct tty_struct *tty; 607 608 unsigned long flags; 608 609 struct sk_buff *skb, *ctrl_skb;
+6 -5
net/sctp/associola.c
··· 61 61 #include <net/sctp/sm.h> 62 62 63 63 /* Forward declarations for internal functions. */ 64 - static void sctp_assoc_bh_rcv(struct sctp_association *asoc); 64 + static void sctp_assoc_bh_rcv(struct work_struct *work); 65 65 66 66 67 67 /* 1st Level Abstractions. */ ··· 269 269 270 270 /* Create an input queue. */ 271 271 sctp_inq_init(&asoc->base.inqueue); 272 - sctp_inq_set_th_handler(&asoc->base.inqueue, 273 - (void (*)(void *))sctp_assoc_bh_rcv, 274 - asoc); 272 + sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); 275 273 276 274 /* Create an output queue. */ 277 275 sctp_outq_init(asoc, &asoc->outqueue); ··· 942 944 } 943 945 944 946 /* Do delayed input processing. This is scheduled by sctp_rcv(). */ 945 - static void sctp_assoc_bh_rcv(struct sctp_association *asoc) 947 + static void sctp_assoc_bh_rcv(struct work_struct *work) 946 948 { 949 + struct sctp_association *asoc = 950 + container_of(work, struct sctp_association, 951 + base.inqueue.immediate); 947 952 struct sctp_endpoint *ep; 948 953 struct sctp_chunk *chunk; 949 954 struct sock *sk;
+6 -4
net/sctp/endpointola.c
··· 61 61 #include <net/sctp/sm.h> 62 62 63 63 /* Forward declarations for internal helpers. */ 64 - static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); 64 + static void sctp_endpoint_bh_rcv(struct work_struct *work); 65 65 66 66 /* 67 67 * Initialize the base fields of the endpoint structure. ··· 85 85 sctp_inq_init(&ep->base.inqueue); 86 86 87 87 /* Set its top-half handler */ 88 - sctp_inq_set_th_handler(&ep->base.inqueue, 89 - (void (*)(void *))sctp_endpoint_bh_rcv, ep); 88 + sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); 90 89 91 90 /* Initialize the bind addr area */ 92 91 sctp_bind_addr_init(&ep->base.bind_addr, 0); ··· 310 311 /* Do delayed input processing. This is scheduled by sctp_rcv(). 311 312 * This may be called on BH or task time. 312 313 */ 313 - static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep) 314 + static void sctp_endpoint_bh_rcv(struct work_struct *work) 314 315 { 316 + struct sctp_endpoint *ep = 317 + container_of(work, struct sctp_endpoint, 318 + base.inqueue.immediate); 315 319 struct sctp_association *asoc; 316 320 struct sock *sk; 317 321 struct sctp_transport *transport;
+4 -5
net/sctp/inqueue.c
··· 54 54 queue->in_progress = NULL; 55 55 56 56 /* Create a task for delivering data. */ 57 - INIT_WORK(&queue->immediate, NULL, NULL); 57 + INIT_WORK(&queue->immediate, NULL); 58 58 59 59 queue->malloced = 0; 60 60 } ··· 97 97 * on the BH related data structures. 98 98 */ 99 99 list_add_tail(&chunk->list, &q->in_chunk_list); 100 - q->immediate.func(q->immediate.data); 100 + q->immediate.func(&q->immediate); 101 101 } 102 102 103 103 /* Extract a chunk from an SCTP inqueue. ··· 205 205 * The intent is that this routine will pull stuff out of the 206 206 * inqueue and process it. 207 207 */ 208 - void sctp_inq_set_th_handler(struct sctp_inq *q, 209 - void (*callback)(void *), void *arg) 208 + void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback) 210 209 { 211 - INIT_WORK(&q->immediate, callback, arg); 210 + INIT_WORK(&q->immediate, callback); 212 211 } 213 212
+4 -4
net/xfrm/xfrm_policy.c
··· 358 358 xfrm_pol_put(policy); 359 359 } 360 360 361 - static void xfrm_policy_gc_task(void *data) 361 + static void xfrm_policy_gc_task(struct work_struct *work) 362 362 { 363 363 struct xfrm_policy *policy; 364 364 struct hlist_node *entry, *tmp; ··· 546 546 547 547 static DEFINE_MUTEX(hash_resize_mutex); 548 548 549 - static void xfrm_hash_resize(void *__unused) 549 + static void xfrm_hash_resize(struct work_struct *__unused) 550 550 { 551 551 int dir, total; 552 552 ··· 563 563 mutex_unlock(&hash_resize_mutex); 564 564 } 565 565 566 - static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); 566 + static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); 567 567 568 568 /* Generate new index... KAME seems to generate them ordered by cost 569 569 * of an absolute inpredictability of ordering of rules. This will not pass. */ ··· 2080 2080 panic("XFRM: failed to allocate bydst hash\n"); 2081 2081 } 2082 2082 2083 - INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); 2083 + INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); 2084 2084 register_netdevice_notifier(&xfrm_dev_notifier); 2085 2085 } 2086 2086
+4 -4
net/xfrm/xfrm_state.c
··· 115 115 116 116 static DEFINE_MUTEX(hash_resize_mutex); 117 117 118 - static void xfrm_hash_resize(void *__unused) 118 + static void xfrm_hash_resize(struct work_struct *__unused) 119 119 { 120 120 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; 121 121 unsigned long nsize, osize; ··· 168 168 mutex_unlock(&hash_resize_mutex); 169 169 } 170 170 171 - static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); 171 + static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); 172 172 173 173 DECLARE_WAIT_QUEUE_HEAD(km_waitq); 174 174 EXPORT_SYMBOL(km_waitq); ··· 207 207 kfree(x); 208 208 } 209 209 210 - static void xfrm_state_gc_task(void *data) 210 + static void xfrm_state_gc_task(struct work_struct *data) 211 211 { 212 212 struct xfrm_state *x; 213 213 struct hlist_node *entry, *tmp; ··· 1568 1568 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); 1569 1569 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); 1570 1570 1571 - INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL); 1571 + INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task); 1572 1572 } 1573 1573
+1 -1
sound/aoa/aoa-gpio.h
··· 59 59 }; 60 60 61 61 struct gpio_notification { 62 + struct delayed_work work; 62 63 notify_func_t notify; 63 64 void *data; 64 65 void *gpio_private; 65 - struct work_struct work; 66 66 struct mutex mutex; 67 67 }; 68 68
+7 -9
sound/aoa/core/snd-aoa-gpio-feature.c
··· 195 195 ftr_gpio_set_lineout(rt, (s>>2)&1); 196 196 } 197 197 198 - static void ftr_handle_notify(void *data) 198 + static void ftr_handle_notify(struct work_struct *work) 199 199 { 200 - struct gpio_notification *notif = data; 200 + struct gpio_notification *notif = 201 + container_of(work, struct gpio_notification, work.work); 201 202 202 203 mutex_lock(&notif->mutex); 203 204 if (notif->notify) ··· 254 253 255 254 ftr_gpio_all_amps_off(rt); 256 255 rt->implementation_private = 0; 257 - INIT_WORK(&rt->headphone_notify.work, ftr_handle_notify, 258 - &rt->headphone_notify); 259 - INIT_WORK(&rt->line_in_notify.work, ftr_handle_notify, 260 - &rt->line_in_notify); 261 - INIT_WORK(&rt->line_out_notify.work, ftr_handle_notify, 262 - &rt->line_out_notify); 256 + INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify); 257 + INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify); 258 + INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify); 263 259 mutex_init(&rt->headphone_notify.mutex); 264 260 mutex_init(&rt->line_in_notify.mutex); 265 261 mutex_init(&rt->line_out_notify.mutex); ··· 285 287 { 286 288 struct gpio_notification *notif = data; 287 289 288 - schedule_work(&notif->work); 290 + schedule_delayed_work(&notif->work, 0); 289 291 290 292 return IRQ_HANDLED; 291 293 }
+7 -9
sound/aoa/core/snd-aoa-gpio-pmf.c
··· 69 69 pmf_gpio_set_lineout(rt, (s>>2)&1); 70 70 } 71 71 72 - static void pmf_handle_notify(void *data) 72 + static void pmf_handle_notify(struct work_struct *work) 73 73 { 74 - struct gpio_notification *notif = data; 74 + struct gpio_notification *notif = 75 + container_of(work, struct gpio_notification, work.work); 75 76 76 77 mutex_lock(&notif->mutex); 77 78 if (notif->notify) ··· 84 83 { 85 84 pmf_gpio_all_amps_off(rt); 86 85 rt->implementation_private = 0; 87 - INIT_WORK(&rt->headphone_notify.work, pmf_handle_notify, 88 - &rt->headphone_notify); 89 - INIT_WORK(&rt->line_in_notify.work, pmf_handle_notify, 90 - &rt->line_in_notify); 91 - INIT_WORK(&rt->line_out_notify.work, pmf_handle_notify, 92 - &rt->line_out_notify); 86 + INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify); 87 + INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify); 88 + INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify); 93 89 mutex_init(&rt->headphone_notify.mutex); 94 90 mutex_init(&rt->line_in_notify.mutex); 95 91 mutex_init(&rt->line_out_notify.mutex); ··· 127 129 { 128 130 struct gpio_notification *notif = data; 129 131 130 - schedule_work(&notif->work); 132 + schedule_delayed_work(&notif->work, 0); 131 133 } 132 134 133 135 static int pmf_set_notify(struct gpio_runtime *rt,
+4 -4
sound/i2c/other/ak4114.c
··· 35 35 36 36 #define AK4114_ADDR 0x00 /* fixed address */ 37 37 38 - static void ak4114_stats(void *); 38 + static void ak4114_stats(struct work_struct *work); 39 39 40 40 static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val) 41 41 { ··· 158 158 reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN); 159 159 /* bring up statistics / event queing */ 160 160 chip->init = 0; 161 - INIT_WORK(&chip->work, ak4114_stats, chip); 161 + INIT_DELAYED_WORK(&chip->work, ak4114_stats); 162 162 queue_delayed_work(chip->workqueue, &chip->work, HZ / 10); 163 163 } 164 164 ··· 561 561 return res; 562 562 } 563 563 564 - static void ak4114_stats(void *data) 564 + static void ak4114_stats(struct work_struct *work) 565 565 { 566 - struct ak4114 *chip = (struct ak4114 *)data; 566 + struct ak4114 *chip = container_of(work, struct ak4114, work.work); 567 567 568 568 if (chip->init) 569 569 return;
+4 -3
sound/pci/ac97/ac97_codec.c
··· 1927 1927 static struct snd_ac97_build_ops null_build_ops; 1928 1928 1929 1929 #ifdef CONFIG_SND_AC97_POWER_SAVE 1930 - static void do_update_power(void *data) 1930 + static void do_update_power(struct work_struct *work) 1931 1931 { 1932 - update_power_regs(data); 1932 + update_power_regs( 1933 + container_of(work, struct snd_ac97, power_work.work)); 1933 1934 } 1934 1935 #endif 1935 1936 ··· 1990 1989 mutex_init(&ac97->page_mutex); 1991 1990 #ifdef CONFIG_SND_AC97_POWER_SAVE 1992 1991 ac97->power_workq = create_workqueue("ac97"); 1993 - INIT_WORK(&ac97->power_work, do_update_power, ac97); 1992 + INIT_DELAYED_WORK(&ac97->power_work, do_update_power); 1994 1993 #endif 1995 1994 1996 1995 #ifdef CONFIG_PCI
+6 -4
sound/pci/hda/hda_codec.c
··· 272 272 /* 273 273 * process queueud unsolicited events 274 274 */ 275 - static void process_unsol_events(void *data) 275 + static void process_unsol_events(struct work_struct *work) 276 276 { 277 - struct hda_bus *bus = data; 278 - struct hda_bus_unsolicited *unsol = bus->unsol; 277 + struct hda_bus_unsolicited *unsol = 278 + container_of(work, struct hda_bus_unsolicited, work); 279 + struct hda_bus *bus = unsol->bus; 279 280 struct hda_codec *codec; 280 281 unsigned int rp, caddr, res; 281 282 ··· 315 314 kfree(unsol); 316 315 return -ENOMEM; 317 316 } 318 - INIT_WORK(&unsol->work, process_unsol_events, bus); 317 + INIT_WORK(&unsol->work, process_unsol_events); 318 + unsol->bus = bus; 319 319 bus->unsol = unsol; 320 320 return 0; 321 321 }
+1
sound/pci/hda/hda_local.h
··· 206 206 /* workqueue */ 207 207 struct workqueue_struct *workq; 208 208 struct work_struct work; 209 + struct hda_bus *bus; 209 210 }; 210 211 211 212 /*
+5 -3
sound/ppc/tumbler.c
··· 942 942 } 943 943 944 944 static struct work_struct device_change; 945 + static struct snd_pmac *device_change_chip; 945 946 946 - static void device_change_handler(void *self) 947 + static void device_change_handler(struct work_struct *work) 947 948 { 948 - struct snd_pmac *chip = self; 949 + struct snd_pmac *chip = device_change_chip; 949 950 struct pmac_tumbler *mix; 950 951 int headphone, lineout; 951 952 ··· 1418 1417 chip->resume = tumbler_resume; 1419 1418 #endif 1420 1419 1421 - INIT_WORK(&device_change, device_change_handler, (void *)chip); 1420 + INIT_WORK(&device_change, device_change_handler); 1421 + device_change_chip = chip; 1422 1422 1423 1423 #ifdef PMAC_SUPPORT_AUTOMUTE 1424 1424 if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0)