Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

treewide: setup_timer() -> timer_setup() (2 field)

This converts all remaining setup_timer() calls that use a nested field
to reach a struct timer_list. Coccinelle does not have an easy way to
match multiple fields, so a new script is needed to change the matches of
"&_E->_timer" into "&_E->_field1._timer" in all the rules.

spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup-2fields.cocci

@fix_address_of depends@
expression e;
@@

setup_timer(
-&(e)
+&e
, ...)

// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _field1;
identifier _timer;
type _cast_data;
@@

(
-setup_timer(&_E->_field1._timer, NULL, _E);
+timer_setup(&_E->_field1._timer, NULL, 0);
|
-setup_timer(&_E->_field1._timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_field1._timer, NULL, 0);
|
-setup_timer(&_E._field1._timer, NULL, &_E);
+timer_setup(&_E._field1._timer, NULL, 0);
|
-setup_timer(&_E._field1._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._field1._timer, NULL, 0);
)

@change_timer_function_usage@
expression _E;
identifier _field1;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@

(
-setup_timer(&_E->_field1._timer, _callback, _E);
+timer_setup(&_E->_field1._timer, _callback, 0);
|
-setup_timer(&_E->_field1._timer, &_callback, _E);
+timer_setup(&_E->_field1._timer, _callback, 0);
|
-setup_timer(&_E->_field1._timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_field1._timer, _callback, 0);
|
-setup_timer(&_E->_field1._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_field1._timer, _callback, 0);
|
-setup_timer(&_E->_field1._timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_field1._timer, _callback, 0);
|
-setup_timer(&_E->_field1._timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_field1._timer, _callback, 0);
|
-setup_timer(&_E->_field1._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_field1._timer, _callback, 0);
|
-setup_timer(&_E->_field1._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_field1._timer, _callback, 0);
|
-setup_timer(&_E._field1._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._field1._timer, _callback, 0);
|
-setup_timer(&_E._field1._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._field1._timer, _callback, 0);
|
-setup_timer(&_E._field1._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._field1._timer, _callback, 0);
|
-setup_timer(&_E._field1._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._field1._timer, _callback, 0);
|
-setup_timer(&_E._field1._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._field1._timer, _callback, 0);
|
-setup_timer(&_E._field1._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._field1._timer, _callback, 0);
|
-setup_timer(&_E._field1._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._field1._timer, _callback, 0);
|
-setup_timer(&_E._field1._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._field1._timer, _callback, 0);
|
_E->_field1._timer@_stl.function = _callback;
|
_E->_field1._timer@_stl.function = &_callback;
|
_E->_field1._timer@_stl.function = (_cast_func)_callback;
|
_E->_field1._timer@_stl.function = (_cast_func)&_callback;
|
_E._field1._timer@_stl.function = _callback;
|
_E._field1._timer@_stl.function = &_callback;
|
_E._field1._timer@_stl.function = (_cast_func)_callback;
|
_E._field1._timer@_stl.function = (_cast_func)&_callback;
)

// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._field1;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@

void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _field1._timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _field1._timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _field1._timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _field1._timer);
... when != _origarg
)
}

// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._field1;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@

void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _field1._timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}

// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@

void _callback(struct timer_list *t)
{ ... }

// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._field1;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@

void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _field1._timer);
...
}

// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._field1;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@

void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _field1._timer);
}

// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._field1;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@

(
-timer_setup(&_E->_field1._timer, _callback, 0);
+setup_timer(&_E->_field1._timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._field1._timer, _callback, 0);
+setup_timer(&_E._field1._timer, _callback, (_cast_data)&_E);
)

// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._field1;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@

(
_E->_field1._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_field1._timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_field1._timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_field1._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._field1._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._field1._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._field1._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._field1._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)

// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._field1;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@

_callback(
(
-(_cast_data)_E
+&_E->_field1._timer
|
-(_cast_data)&_E
+&_E._field1._timer
|
-_E
+&_E->_field1._timer
)
)

// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _field1;
identifier _timer;
identifier _callback;
@@

(
-setup_timer(&_E->_field1._timer, _callback, 0);
+timer_setup(&_E->_field1._timer, _callback, 0);
|
-setup_timer(&_E->_field1._timer, _callback, 0L);
+timer_setup(&_E->_field1._timer, _callback, 0);
|
-setup_timer(&_E->_field1._timer, _callback, 0UL);
+timer_setup(&_E->_field1._timer, _callback, 0);
|
-setup_timer(&_E._field1._timer, _callback, 0);
+timer_setup(&_E._field1._timer, _callback, 0);
|
-setup_timer(&_E._field1._timer, _callback, 0L);
+timer_setup(&_E._field1._timer, _callback, 0);
|
-setup_timer(&_E._field1._timer, _callback, 0UL);
+timer_setup(&_E._field1._timer, _callback, 0);
|
-setup_timer(&_field1._timer, _callback, 0);
+timer_setup(&_field1._timer, _callback, 0);
|
-setup_timer(&_field1._timer, _callback, 0L);
+timer_setup(&_field1._timer, _callback, 0);
|
-setup_timer(&_field1._timer, _callback, 0UL);
+timer_setup(&_field1._timer, _callback, 0);
|
-setup_timer(_field1._timer, _callback, 0);
+timer_setup(_field1._timer, _callback, 0);
|
-setup_timer(_field1._timer, _callback, 0L);
+timer_setup(_field1._timer, _callback, 0);
|
-setup_timer(_field1._timer, _callback, 0UL);
+timer_setup(_field1._timer, _callback, 0);
)

@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@

void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}

Signed-off-by: Kees Cook <keescook@chromium.org>

+65 -65
+3 -4
arch/powerpc/kvm/booke.c
··· 599 599 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); 600 600 } 601 601 602 - void kvmppc_watchdog_func(unsigned long data) 602 + void kvmppc_watchdog_func(struct timer_list *t) 603 603 { 604 - struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 604 + struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer); 605 605 u32 tsr, new_tsr; 606 606 int final; 607 607 ··· 1412 1412 { 1413 1413 /* setup watchdog timer once */ 1414 1414 spin_lock_init(&vcpu->arch.wdt_lock); 1415 - setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 1416 - (unsigned long)vcpu); 1415 + timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0); 1417 1416 1418 1417 /* 1419 1418 * Clear DBSR.MRR to avoid guest debug interrupt as
+3 -4
drivers/block/rsxx/cregs.c
··· 203 203 return 0; 204 204 } 205 205 206 - static void creg_cmd_timed_out(unsigned long data) 206 + static void creg_cmd_timed_out(struct timer_list *t) 207 207 { 208 - struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data; 208 + struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer); 209 209 struct creg_cmd *cmd; 210 210 211 211 spin_lock(&card->creg_ctrl.lock); ··· 745 745 mutex_init(&card->creg_ctrl.reset_lock); 746 746 INIT_LIST_HEAD(&card->creg_ctrl.queue); 747 747 spin_lock_init(&card->creg_ctrl.lock); 748 - setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 749 - (unsigned long) card); 748 + timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0); 750 749 751 750 return 0; 752 751 }
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
··· 268 268 * 269 269 * Checks for fence activity. 270 270 */ 271 - static void amdgpu_fence_fallback(unsigned long arg) 271 + static void amdgpu_fence_fallback(struct timer_list *t) 272 272 { 273 - struct amdgpu_ring *ring = (void *)arg; 273 + struct amdgpu_ring *ring = from_timer(ring, t, 274 + fence_drv.fallback_timer); 274 275 275 276 amdgpu_fence_process(ring); 276 277 } ··· 423 422 atomic_set(&ring->fence_drv.last_seq, 0); 424 423 ring->fence_drv.initialized = false; 425 424 426 - setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 427 - (unsigned long)ring); 425 + timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); 428 426 429 427 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1; 430 428 spin_lock_init(&ring->fence_drv.lock);
+3 -3
drivers/infiniband/hw/mthca/mthca_catas.c
··· 130 130 spin_unlock_irqrestore(&catas_lock, flags); 131 131 } 132 132 133 - static void poll_catas(unsigned long dev_ptr) 133 + static void poll_catas(struct timer_list *t) 134 134 { 135 - struct mthca_dev *dev = (struct mthca_dev *) dev_ptr; 135 + struct mthca_dev *dev = from_timer(dev, t, catas_err.timer); 136 136 int i; 137 137 138 138 for (i = 0; i < dev->catas_err.size; ++i) ··· 149 149 { 150 150 phys_addr_t addr; 151 151 152 - setup_timer(&dev->catas_err.timer, poll_catas, (unsigned long)dev); 152 + timer_setup(&dev->catas_err.timer, poll_catas, 0); 153 153 dev->catas_err.map = NULL; 154 154 155 155 addr = pci_resource_start(dev->pdev, 0) +
+3 -2
drivers/isdn/hardware/mISDN/hfcpci.c
··· 301 301 * Timer function called when kernel timer expires 302 302 */ 303 303 static void 304 - hfcpci_Timer(struct hfc_pci *hc) 304 + hfcpci_Timer(struct timer_list *t) 305 305 { 306 + struct hfc_pci *hc = from_timer(hc, t, hw.timer); 306 307 hc->hw.timer.expires = jiffies + 75; 307 308 /* WD RESET */ 308 309 /* ··· 2043 2042 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 2044 2043 /* At this point the needed PCI config is done */ 2045 2044 /* fifos are still not enabled */ 2046 - setup_timer(&hc->hw.timer, (void *)hfcpci_Timer, (long)hc); 2045 + timer_setup(&hc->hw.timer, hfcpci_Timer, 0); 2047 2046 /* default PCM master */ 2048 2047 test_and_set_bit(HFC_CFG_MASTER, &hc->cfg); 2049 2048 return 0;
+3 -4
drivers/media/platform/fsl-viu.c
··· 339 339 } 340 340 } 341 341 342 - static void viu_vid_timeout(unsigned long data) 342 + static void viu_vid_timeout(struct timer_list *t) 343 343 { 344 - struct viu_dev *dev = (struct viu_dev *)data; 344 + struct viu_dev *dev = from_timer(dev, t, vidq.timeout); 345 345 struct viu_buf *buf; 346 346 struct viu_dmaqueue *vidq = &dev->vidq; 347 347 ··· 1466 1466 viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad, 1467 1467 "saa7113", VIU_VIDEO_DECODER_ADDR, NULL); 1468 1468 1469 - setup_timer(&viu_dev->vidq.timeout, viu_vid_timeout, 1470 - (unsigned long)viu_dev); 1469 + timer_setup(&viu_dev->vidq.timeout, viu_vid_timeout, 0); 1471 1470 viu_dev->std = V4L2_STD_NTSC_M; 1472 1471 viu_dev->first = 1; 1473 1472
+2 -2
drivers/net/ethernet/cisco/enic/enic_clsf.c
··· 123 123 } 124 124 125 125 #ifdef CONFIG_RFS_ACCEL 126 - void enic_flow_may_expire(unsigned long data) 126 + void enic_flow_may_expire(struct timer_list *t) 127 127 { 128 - struct enic *enic = (struct enic *)data; 128 + struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire); 129 129 bool res; 130 130 int j; 131 131
+2 -3
drivers/net/ethernet/cisco/enic/enic_clsf.h
··· 16 16 #ifdef CONFIG_RFS_ACCEL 17 17 int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 18 18 u16 rxq_index, u32 flow_id); 19 - void enic_flow_may_expire(unsigned long data); 19 + void enic_flow_may_expire(struct timer_list *t); 20 20 21 21 static inline void enic_rfs_timer_start(struct enic *enic) 22 22 { 23 - setup_timer(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire, 24 - (unsigned long)enic); 23 + timer_setup(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire, 0); 25 24 mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); 26 25 } 27 26
+10 -8
drivers/net/wireless/intel/iwlwifi/dvm/tt.c
··· 164 164 * without doing anything, driver should continue the 5 seconds timer 165 165 * to wake up uCode for temperature check until temperature drop below CT 166 166 */ 167 - static void iwl_tt_check_exit_ct_kill(unsigned long data) 167 + static void iwl_tt_check_exit_ct_kill(struct timer_list *t) 168 168 { 169 - struct iwl_priv *priv = (struct iwl_priv *)data; 169 + struct iwl_priv *priv = from_timer(priv, t, 170 + thermal_throttle.ct_kill_exit_tm); 170 171 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 171 172 unsigned long flags; 172 173 ··· 215 214 } 216 215 } 217 216 218 - static void iwl_tt_ready_for_ct_kill(unsigned long data) 217 + static void iwl_tt_ready_for_ct_kill(struct timer_list *t) 219 218 { 220 - struct iwl_priv *priv = (struct iwl_priv *)data; 219 + struct iwl_priv *priv = from_timer(priv, t, 220 + thermal_throttle.ct_kill_waiting_tm); 221 221 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 222 222 223 223 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) ··· 614 612 memset(tt, 0, sizeof(struct iwl_tt_mgmt)); 615 613 616 614 tt->state = IWL_TI_0; 617 - setup_timer(&priv->thermal_throttle.ct_kill_exit_tm, 618 - iwl_tt_check_exit_ct_kill, (unsigned long)priv); 619 - setup_timer(&priv->thermal_throttle.ct_kill_waiting_tm, 620 - iwl_tt_ready_for_ct_kill, (unsigned long)priv); 615 + timer_setup(&priv->thermal_throttle.ct_kill_exit_tm, 616 + iwl_tt_check_exit_ct_kill, 0); 617 + timer_setup(&priv->thermal_throttle.ct_kill_waiting_tm, 618 + iwl_tt_ready_for_ct_kill, 0); 621 619 /* setup deferred ct kill work */ 622 620 INIT_WORK(&priv->tt_work, iwl_bg_tt_work); 623 621 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
+3 -4
drivers/nfc/nfcmrvl/fw_dnld.c
··· 130 130 nfc_fw_download_done(priv->ndev->nfc_dev, priv->fw_dnld.name, error); 131 131 } 132 132 133 - static void fw_dnld_timeout(unsigned long arg) 133 + static void fw_dnld_timeout(struct timer_list *t) 134 134 { 135 - struct nfcmrvl_private *priv = (struct nfcmrvl_private *) arg; 135 + struct nfcmrvl_private *priv = from_timer(priv, t, fw_dnld.timer); 136 136 137 137 nfc_err(priv->dev, "FW loading timeout"); 138 138 priv->fw_dnld.state = STATE_RESET; ··· 538 538 } 539 539 540 540 /* Configure a timer for timeout */ 541 - setup_timer(&priv->fw_dnld.timer, fw_dnld_timeout, 542 - (unsigned long) priv); 541 + timer_setup(&priv->fw_dnld.timer, fw_dnld_timeout, 0); 543 542 mod_timer(&priv->fw_dnld.timer, 544 543 jiffies + msecs_to_jiffies(FW_DNLD_TIMEOUT)); 545 544
+8 -8
drivers/nfc/st-nci/se.c
··· 677 677 } 678 678 EXPORT_SYMBOL(st_nci_se_io); 679 679 680 - static void st_nci_se_wt_timeout(unsigned long data) 680 + static void st_nci_se_wt_timeout(struct timer_list *t) 681 681 { 682 682 /* 683 683 * No answer from the secure element ··· 690 690 */ 691 691 /* hardware reset managed through VCC_UICC_OUT power supply */ 692 692 u8 param = 0x01; 693 - struct st_nci_info *info = (struct st_nci_info *) data; 693 + struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer); 694 694 695 695 pr_debug("\n"); 696 696 ··· 708 708 info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME); 709 709 } 710 710 711 - static void st_nci_se_activation_timeout(unsigned long data) 711 + static void st_nci_se_activation_timeout(struct timer_list *t) 712 712 { 713 - struct st_nci_info *info = (struct st_nci_info *) data; 713 + struct st_nci_info *info = from_timer(info, t, 714 + se_info.se_active_timer); 714 715 715 716 pr_debug("\n"); 716 717 ··· 726 725 727 726 init_completion(&info->se_info.req_completion); 728 727 /* initialize timers */ 729 - setup_timer(&info->se_info.bwi_timer, st_nci_se_wt_timeout, 730 - (unsigned long)info); 728 + timer_setup(&info->se_info.bwi_timer, st_nci_se_wt_timeout, 0); 731 729 info->se_info.bwi_active = false; 732 730 733 - setup_timer(&info->se_info.se_active_timer, 734 - st_nci_se_activation_timeout, (unsigned long)info); 731 + timer_setup(&info->se_info.se_active_timer, 732 + st_nci_se_activation_timeout, 0); 735 733 info->se_info.se_active = false; 736 734 737 735 info->se_info.xch_error = false;
+9 -8
drivers/nfc/st21nfca/se.c
··· 252 252 } 253 253 EXPORT_SYMBOL(st21nfca_hci_se_io); 254 254 255 - static void st21nfca_se_wt_timeout(unsigned long data) 255 + static void st21nfca_se_wt_timeout(struct timer_list *t) 256 256 { 257 257 /* 258 258 * No answer from the secure element ··· 265 265 */ 266 266 /* hardware reset managed through VCC_UICC_OUT power supply */ 267 267 u8 param = 0x01; 268 - struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data; 268 + struct st21nfca_hci_info *info = from_timer(info, t, 269 + se_info.bwi_timer); 269 270 270 271 pr_debug("\n"); 271 272 ··· 284 283 info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME); 285 284 } 286 285 287 - static void st21nfca_se_activation_timeout(unsigned long data) 286 + static void st21nfca_se_activation_timeout(struct timer_list *t) 288 287 { 289 - struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data; 288 + struct st21nfca_hci_info *info = from_timer(info, t, 289 + se_info.se_active_timer); 290 290 291 291 pr_debug("\n"); 292 292 ··· 394 392 395 393 init_completion(&info->se_info.req_completion); 396 394 /* initialize timers */ 397 - setup_timer(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 398 - (unsigned long)info); 395 + timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0); 399 396 info->se_info.bwi_active = false; 400 397 401 - setup_timer(&info->se_info.se_active_timer, 402 - st21nfca_se_activation_timeout, (unsigned long)info); 398 + timer_setup(&info->se_info.se_active_timer, 399 + st21nfca_se_activation_timeout, 0); 403 400 info->se_info.se_active = false; 404 401 405 402 info->se_info.count_pipes = 0;
+3 -3
drivers/scsi/sym53c8xx_2/sym_glue.c
··· 565 565 /* 566 566 * Linux entry point of the timer handler 567 567 */ 568 - static void sym53c8xx_timer(unsigned long npref) 568 + static void sym53c8xx_timer(struct timer_list *t) 569 569 { 570 - struct sym_hcb *np = (struct sym_hcb *)npref; 570 + struct sym_hcb *np = from_timer(np, t, s.timer); 571 571 unsigned long flags; 572 572 573 573 spin_lock_irqsave(np->s.host->host_lock, flags); ··· 1351 1351 /* 1352 1352 * Start the timer daemon 1353 1353 */ 1354 - setup_timer(&np->s.timer, sym53c8xx_timer, (unsigned long)np); 1354 + timer_setup(&np->s.timer, sym53c8xx_timer, 0); 1355 1355 np->s.lasttime=0; 1356 1356 sym_timer (np); 1357 1357
+6 -4
net/ipv6/ip6_fib.c
··· 70 70 * result of redirects, path MTU changes, etc. 71 71 */ 72 72 73 - static void fib6_gc_timer_cb(unsigned long arg); 73 + static void fib6_gc_timer_cb(struct timer_list *t); 74 74 75 75 #define FOR_WALKERS(net, w) \ 76 76 list_for_each_entry(w, &(net)->ipv6.fib6_walkers, lh) ··· 2026 2026 spin_unlock_bh(&net->ipv6.fib6_gc_lock); 2027 2027 } 2028 2028 2029 - static void fib6_gc_timer_cb(unsigned long arg) 2029 + static void fib6_gc_timer_cb(struct timer_list *t) 2030 2030 { 2031 - fib6_run_gc(0, (struct net *)arg, true); 2031 + struct net *arg = from_timer(arg, t, ipv6.ip6_fib_timer); 2032 + 2033 + fib6_run_gc(0, arg, true); 2032 2034 } 2033 2035 2034 2036 static int __net_init fib6_net_init(struct net *net) ··· 2045 2043 spin_lock_init(&net->ipv6.fib6_gc_lock); 2046 2044 rwlock_init(&net->ipv6.fib6_walker_lock); 2047 2045 INIT_LIST_HEAD(&net->ipv6.fib6_walkers); 2048 - setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net); 2046 + timer_setup(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, 0); 2049 2047 2050 2048 net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL); 2051 2049 if (!net->ipv6.rt6_stats)
+3 -4
net/ncsi/ncsi-manage.c
··· 184 184 nd->handler(nd); 185 185 } 186 186 187 - static void ncsi_channel_monitor(unsigned long data) 187 + static void ncsi_channel_monitor(struct timer_list *t) 188 188 { 189 - struct ncsi_channel *nc = (struct ncsi_channel *)data; 189 + struct ncsi_channel *nc = from_timer(nc, t, monitor.timer); 190 190 struct ncsi_package *np = nc->package; 191 191 struct ncsi_dev_priv *ndp = np->ndp; 192 192 struct ncsi_channel_mode *ncm; ··· 313 313 nc->package = np; 314 314 nc->state = NCSI_CHANNEL_INACTIVE; 315 315 nc->monitor.enabled = false; 316 - setup_timer(&nc->monitor.timer, 317 - ncsi_channel_monitor, (unsigned long)nc); 316 + timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0); 318 317 spin_lock_init(&nc->lock); 319 318 INIT_LIST_HEAD(&nc->link); 320 319 for (index = 0; index < NCSI_CAP_MAX; index++)